text
stringlengths 27
947k
| id
stringlengths 10
118
| metadata
dict | __index_level_0__
int64 0
80
|
---|---|---|---|
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a threadblock-scoped GEMV kernel.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/gemm/gemm.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix-vector product using SIMT math instructions.
template <
class Core_ //< GemvCore
>
class Gemv {
public:
using Shape = typename Core_::Shape;
/// The MMA operator that computes GEMV
using Operator = typename Core_::Operator;
/// Iterates over A in global memory
using IteratorA = typename Core_::IteratorA;
/// Iterates over B in global memory
using IteratorB = typename Core_::IteratorB;
/// Fragment of operand C loaded from global memory
using IteratorC = typename Core_::IteratorC;
/// Fragment of operand A loaded from global memory
using FragmentA = typename IteratorA::Fragment;
/// Fragment of operand B loaded from global memory
using FragmentB = typename IteratorB::Fragment;
/// Fragment of operand accumulator loaded/stored to global memory
using FragmentC = typename Operator::FragmentC;
/// Shape of the per-thread GEMV operation
using ThreadShape = typename Core_::ThreadShape;
public:
CUTLASS_DEVICE
Gemv() { }
CUTLASS_DEVICE
void operator()(
GemmCoord const &problem_size, ///< problem size of batched GEMV
FragmentC &accum, ///< destination accumulator tile
IteratorA iterator_A, ///< iterator over A operand in global memory
IteratorB iterator_B, ///< iterator over B operand in global memory
FragmentC const &src_accum) { ///< source accumualtor tile
//
// Prologue
//
FragmentA frag_A;
FragmentB frag_B;
frag_A.clear();
frag_B.clear();
iterator_A.load(frag_A);
iterator_B.load(frag_B);
++iterator_A;
++iterator_B;
//
// Mainloop
//
Operator thread_mma;
int gemm_k = problem_size.k();
if (gemm_k < Shape::kK)
{
iterator_A.clear_mask();
iterator_B.clear_mask();
}
// iterate over K to accumulate result
CUTLASS_GEMM_LOOP
for (; gemm_k > 0; gemm_k -= Shape::kK) {
thread_mma(accum, frag_A, frag_B, accum);
iterator_A.load(frag_A);
iterator_B.load(frag_B);
++iterator_A;
++iterator_B;
if (gemm_k < Shape::kK)
{
iterator_A.clear_mask();
iterator_B.clear_mask();
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
| include/cutlass/gemm/threadblock/gemv.h/0 | {
"file_path": "include/cutlass/gemm/threadblock/gemv.h",
"repo_id": "include",
"token_count": 1482
} | 40 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Implements streamk threadblock mapping blockIdx to GEMM problems.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/platform/platform.h"
#include "cutlass/gemm/gemm_enumerated_types.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/conv3d_problem_size.h"
#include "cutlass/gemm/threadblock/index_remat.h"
#if !defined(__CUDACC_RTC__)
#include <iostream>
#include "cutlass/core_io.h"
#include "cutlass/trace.h"
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Threadblock mapping control for GEMMs
struct ThreadblockSwizzleStreamK {
/// Advertise StreamkFeature
using StreamkFeature = void;
/// Kernel traits
template <typename GemmKernel>
struct KernelTraits {};
/// Reduction strategy
enum ReductionStrategy
{
kNone, // Data-parallel strategy (no seams, fixup, etc.)
kAtomic, // Non-deterministic reduction of SK-block partials using atomic aggregation in L2
kMixed, // Deterministic reduction of SK-block partials employing either:
// (a) A separate wave of reduction thread blocks" (for scenarios with lots of
// SK-blocks per SK-tile)
// (b) Turnstile-ordered atomic aggregation in L2 (for scenarios with few
// SK-blocks per SK-tile)
};
static ReductionStrategy const kReductionStrategy = kMixed;
//
// Heuristics
//
/// Data-parallel wave-quantization efficiency threshold (above which we go data-parallel)
static float constexpr kDpEfficiencyThreshold = 0.92f;
/// Minimum number of MAC-iterations per streamk block
static int const kMinItersPerSkBlock = 2;
/// Height in CTAs of a grid rasterization cohort
static int const kCohortCtasM = 8;
/// Width in CTAs of a grid rasterization cohort
static int const kCohortCtasN = 4;
/// Number of CTAs per cohort
static int const kCtasPerCohort = kCohortCtasN * kCohortCtasM;
/// Cost-equivalent number of SM-iterations for fixup I/O
static int const kFixupStartupIterEquiv = 10;
static int const kFixupPeerIterEquiv = 3;
//
// Member state
//
/// The 3D value-extents of the GEMM computation volume (m,n,k)
GemmCoord problem_size;
/// Div/mod accelerators
FastDivmod div_mod_tiled_shape_m;
FastDivmod div_mod_tiled_shape_n;
FastDivmod div_mod_tiled_cohort_shape_n;
FastDivmod div_mod_iters_per_tile;
/// Whether to perform cohort CTA rasterization
bool cohort_raster;
// Whether to pad and remap block indices
bool remap_block_indices;
/// CTA occupancy per SM
int sm_occupancy;
/// Number of SMs for dispatch heuristics to load-balance using Stream-K CTAs (wave size)
int avail_sms;
int dp_blocks; /// Number of data-parallel thread blocks in the grid
int dp_first_wave_tiles; /// Number of output tiles each CTA in the first DP wave will produce
/// Number of reduction blocks in the grid
int reduction_blocks;
int sk_waves;
int sk_tiles;
int sk_big_blocks_per_region;
int sk_iters_per_region;
/// Div/mod accelerators
FastDivmod div_mod_sk_iters_per_normal_block;
FastDivmod div_mod_sk_iters_per_big_block;
FastDivmod div_mod_sk_iters_per_region;
FastDivmod div_mod_sk_regions; //!! used in block map
FastDivmod div_mod_sk_blocks_per_region; //!! used in block map
/// The batch count
int batch_count;
//
// Host+device interface
//
/// Constructor
ThreadblockSwizzleStreamK() = default;
/// Returns the GEMM volume in thread block tiles
CUTLASS_HOST_DEVICE
GemmCoord tiled_shape() const
{
return GemmCoord(
static_cast<int>(div_mod_tiled_shape_m),
static_cast<int>(div_mod_tiled_shape_n),
batch_count);
}
/// Number of iterations per output tile
CUTLASS_HOST_DEVICE
int iters_per_tile() const
{
return static_cast<int>(div_mod_iters_per_tile);
}
/// Number of iterations for normal SK-blocks
CUTLASS_HOST_DEVICE
int sk_iters_per_normal_block() const
{
return static_cast<int>(div_mod_sk_iters_per_normal_block);
}
/// Number of SK regions
CUTLASS_HOST_DEVICE
int sk_regions() const
{
return static_cast<int>(div_mod_sk_regions);
}
/// Number of SK blocks per region (splitting factor)
CUTLASS_HOST_DEVICE
int sk_blocks_per_region() const
{
return static_cast<int>(div_mod_sk_blocks_per_region);
}
//
// Host-side interface
//
/// Debug print
void Print()
{
#ifndef __CUDA_ARCH__
auto tiles = tiled_shape().mn().product();
std::cout <<
"problem_size: (" << problem_size.m() << "," << problem_size.n() << ")" <<
", tiled_shape: (" << tiled_shape().m() << "," << tiled_shape().n() << ")" <<
", tiles: " << tiles <<
", dp_tiles: " << tiles - sk_tiles <<
", sk_tiles: " << sk_tiles <<
", iters_per_tile: " << iters_per_tile() <<
", reduction_blocks: " << reduction_blocks <<
", dp_blocks: " << dp_blocks <<
", dp_waves: " << dp_blocks / avail_sms <<
", dp_first_wave_tiles: " << dp_first_wave_tiles <<
", sk_blocks_per_region: " << sk_blocks_per_region() <<
", sk_regions: " << sk_regions() <<
", sk_waves: " << sk_waves <<
", sk_iters_per_normal_block: " << sk_iters_per_normal_block() <<
", sk_big_blocks_per_region: " << sk_big_blocks_per_region <<
", remap_block_indices: " << remap_block_indices <<
", cohort_raster: " << cohort_raster <<
", sm_occupancy: " << sm_occupancy <<
", avail_sms: " << avail_sms <<
", num_blocks: " << get_num_blocks() <<
"\n\n";
#endif
}
// Compute sk_blocks to dispatch for a given number of sk_tiles
static void get_sk_blocks(
int &sk_blocks, /// [out]
int &savings_iters, /// [out]
int sk_tiles,
int iters_per_tile,
int avail_sms,
int max_sk_occupancy,
bool allow_partial_wave)
{
savings_iters = INT_MIN;
sk_blocks = 0;
if (sk_tiles == 0) {
return;
}
int sk_iters = sk_tiles * iters_per_tile;
int dp_equiv_waves = (sk_tiles + avail_sms - 1) / avail_sms;
int dp_equiv_iters = iters_per_tile * dp_equiv_waves;
int min_sk_blocks = (allow_partial_wave) ? fast_min(avail_sms, sk_tiles + 1) : avail_sms;
int max_sk_blocks = fast_min(avail_sms * max_sk_occupancy, sk_iters / kMinItersPerSkBlock);
for (int trial_sk_blocks = min_sk_blocks; trial_sk_blocks <= max_sk_blocks; ++trial_sk_blocks)
{
int sk_waves = (trial_sk_blocks + avail_sms - 1) / avail_sms;
int max_sk_iters_per_block = (sk_iters + trial_sk_blocks - 1) / trial_sk_blocks;
int sk_iter_equiv = max_sk_iters_per_block * sk_waves;
int num_peers = ((trial_sk_blocks + sk_tiles - 1) / sk_tiles) + 1; // add one for alignment skew
float iter_cost = 0.02f * float(num_peers) * float(sk_iter_equiv);
if (trial_sk_blocks % sk_tiles == 0)
{
// aligned
num_peers = (trial_sk_blocks / sk_tiles);
iter_cost = 0.0f;
}
float peer_cost = 2.0f * float(num_peers);
float base_cost = 2.0f * float(sk_waves);
int fixup_iter_equiv = int(base_cost + iter_cost + peer_cost);
int trial_savings_iters = dp_equiv_iters - sk_iter_equiv - fixup_iter_equiv;
if (trial_savings_iters >= savings_iters) {
savings_iters = trial_savings_iters;
sk_blocks = trial_sk_blocks;
}
}
}
/// Determine the populations of DP and SK blocks to invoke for the given number of output tiles
static void get_blocks(
int &dp_tiles, /// [out]
int &sk_blocks, /// [out]
int output_tiles,
int iters_per_tile,
int avail_sms,
int sm_occupancy)
{
int full_waves = output_tiles / avail_sms;
int full_wave_tiles = full_waves * avail_sms;
int partial_wave_tiles = output_tiles - full_wave_tiles;
int score = -1;
dp_tiles = output_tiles;
sk_blocks = 0;
if (partial_wave_tiles == 0)
{
// Perfect quantization
return;
}
if (full_waves < sm_occupancy)
{
// We're less than full GPU occupancy
// Form the SK wave from the partial wave to get us up to full GPU occupancy
int max_sk_occupancy = sm_occupancy - full_waves;
dp_tiles = full_wave_tiles;
get_sk_blocks(
sk_blocks,
score,
partial_wave_tiles,
iters_per_tile,
avail_sms,
max_sk_occupancy,
true); // we can run with less than a full wave of SK-blocks
if (score < 0) {
// not profitable
sk_blocks = 0;
dp_tiles = output_tiles;
}
return;
}
// We're at (or greater) than GPU occupancy
if ((sm_occupancy > 1 ) && (full_waves % sm_occupancy == sm_occupancy - 1))
{
// If occupancy is more than one CTA per SM, form the SK wave from the partial
// wave to get us to full GPU occupancy
int max_sk_occupancy = 1;
dp_tiles = full_wave_tiles;
get_sk_blocks(
sk_blocks,
score,
partial_wave_tiles,
iters_per_tile,
avail_sms,
max_sk_occupancy,
true); // we can run with less than a full wave of SK-blocks
if (score >= 0) {
return;
}
}
// Form the SK wave by combining the last full wave and the partial wave
// We're less than full GPU occupancy
dp_tiles = full_wave_tiles - avail_sms;
int max_sk_occupancy = sm_occupancy - ((full_waves - 1) % sm_occupancy);
get_sk_blocks(
sk_blocks,
score,
partial_wave_tiles + avail_sms,
iters_per_tile,
avail_sms,
max_sk_occupancy,
false); // we cannot run with less than a full wave of SK-blocks
if (score < 0) {
// not profitable
sk_blocks = 0;
dp_tiles = output_tiles;
}
}
/// Constructor: *Gemm* problem size (m, n, k)
ThreadblockSwizzleStreamK(
GemmUniversalMode const mode_,
GemmCoord const problem_size_,
GemmCoord const tile_size_,
int const batch_split_, /// Either (mode == GemmUniversalMode::kBatched) the batch count, or (mode == GemmUniversalMode::kGemm) the tile-splitting factor (1 defaults to StreamK, >1 emulates Split-K)
int const sm_occupancy_,
int const device_sms_,
int const avail_sms_, /// The number of SMs that StreamK dispatch heuristics will attempt to load-balance across (-1 defaults to device width, 1 implies classic data-parallel scheduling)
size_t const element_A_bytes_,
size_t const element_B_bytes_,
size_t const element_C_bytes_,
int const epilogue_acc_fragments_)
:
problem_size(problem_size_),
batch_count((mode_ == GemmUniversalMode::kBatched || mode_ == GemmUniversalMode::kArray) ? batch_split_ : 1),
reduction_blocks(0),
dp_blocks(0),
dp_first_wave_tiles(1), // Default: one tile per DP-block in the first wave of DP blocks
sk_tiles(0),
sk_big_blocks_per_region(0),
sk_iters_per_region(0),
sk_waves(0),
sm_occupancy(sm_occupancy_),
remap_block_indices(false),
avail_sms(fast_max(1, avail_sms_)),
cohort_raster(false)
{
int gpu_occupancy = device_sms_ * sm_occupancy;
int iters_per_tile = (problem_size.k() + tile_size_.k() - 1) / tile_size_.k();
int sk_iters_per_normal_block = 0;
int sk_regions = 1; // Default: a single region of iteration space (across all SK tiles)
int sk_blocks_per_region = 0;
GemmCoord tiled_shape(
(problem_size.m() + tile_size_.m() - 1) / tile_size_.m(),
(problem_size.n() + tile_size_.n() - 1) / tile_size_.n(),
batch_count);
size_t problem_bytes =
(element_C_bytes_ * problem_size.m() * problem_size.n()) +
(element_A_bytes_ * problem_size.m() * problem_size.k()) +
(element_B_bytes_ * problem_size.k() * problem_size.n());
size_t problem_flops = size_t(problem_size.m()) * size_t(problem_size.n()) * size_t(problem_size.k()) * 2;
[[maybe_unused]] float flops_per_byte = float(problem_flops) / float(problem_bytes);
int output_tiles = tiled_shape.m() * tiled_shape.n();
int waves = (output_tiles + avail_sms - 1) / avail_sms;
[[maybe_unused]] float dp_efficiency = float(output_tiles) / float(waves * avail_sms);
//
// Determine dispatch composition of DP-tiles and SK-blocks
//
// Start with a DP-only configuration
int dp_tiles = output_tiles; // Number of data-parallel tiles
int sk_blocks = 0; // Number of thread blocks to produce the remaining SK tiles
// Only kGemm mode allows for SK load balancing
if (mode_ == GemmUniversalMode::kGemm)
{
int split_factor = batch_split_;
if (split_factor > 1)
{
// Split-K override
dp_tiles = 0;
sk_blocks = output_tiles * split_factor;
}
else if ((kReductionStrategy != kNone) && // Load-balancing strategy statically enabled
(avail_sms > 1)) // Plurality of SMs to load balance across
{
// Use heuristics
get_blocks(
dp_tiles, /// [out]
sk_blocks, /// [out]
output_tiles,
iters_per_tile,
avail_sms,
sm_occupancy);
}
}
sk_tiles = output_tiles - dp_tiles;
// Compute SK block iteration details
if (sk_blocks > 0)
{
sk_waves = (sk_blocks + avail_sms - 1) / avail_sms;
int sk_iters = sk_tiles * iters_per_tile;
sk_blocks = fast_min(sk_blocks, sk_iters);
sk_iters_per_normal_block = sk_iters / sk_blocks;
int extra_sk_iters = sk_iters - (sk_iters_per_normal_block * sk_blocks);
int sk_big_blocks = extra_sk_iters;
if ((sk_blocks > sk_tiles) && (sk_blocks % sk_tiles == 0))
{
// Split-K decomposition
sk_regions = sk_tiles;
}
sk_blocks_per_region = sk_blocks / sk_regions;
sk_big_blocks_per_region = sk_big_blocks / sk_regions;
sk_iters_per_region = sk_iters / sk_regions;
// Use a separate reduction wave when all of:
// - Non-atomic reduction stratgy
// - The number of SK waves won't fully occupy the GPU (Otherwise we don't have
// a strong-scaling case for more parallel reduction)
// - More than three peers working on an SK tile. (This occurs when the ratio of
// SK-blocks to SK-tiles > 2, as a single tile may be covered by four SK-blocks,
// e.g.:[partial-block | block | block | partial-block] ). With three or
// less peers, the two non-finishing SK-blocks are not expexted to contend.
if ((kReductionStrategy == kMixed) &&
(sk_waves < sm_occupancy) &&
(sk_blocks > 2 * sk_tiles))
{
// Launch a reduction block for every accumulator fragment in each SK-tile
reduction_blocks = sk_tiles * epilogue_acc_fragments_;
}
// When we have a multi-occupancy kernel and at least two waves of active blocks (where
// at least one wave is SK blocks), we need to (1) dispatch at least four waves, and (2)
// remap the block indices so that we can reliably spread the SK blocks evenly across the
// device's first SM occupancy valence. Also see get_num_blocks() and get_block_idx().
remap_block_indices = (
(sm_occupancy > 1) &&
(device_sms_ == avail_sms) &&
(get_num_active_blocks() > avail_sms * 2));
// Initialize fast div/mod members related to SK
div_mod_sk_iters_per_normal_block = FastDivmod(sk_iters_per_normal_block);
div_mod_sk_iters_per_big_block = FastDivmod(sk_iters_per_normal_block + 1);
div_mod_sk_iters_per_region = FastDivmod(sk_iters_per_region);
div_mod_sk_regions = FastDivmod(sk_regions);
div_mod_sk_blocks_per_region = FastDivmod(sk_blocks_per_region);
}
//
// Compute DP blocks
//
dp_blocks = dp_tiles;
cutlass::gemm::GemmCoord tiled_cohort_shape(
(tiled_shape.m() + kCohortCtasM - 1) / kCohortCtasM,
(tiled_shape.n() + kCohortCtasN - 1) / kCohortCtasN,
tiled_shape.k());
int cohort_blocks = (tiled_cohort_shape.m() * tiled_cohort_shape.n()) * kCtasPerCohort;
float cohort_efficiency = float(dp_blocks) / float(cohort_blocks);
// Check if the SK tiles would be in cohorts that are in-bounds
bool sk_in_range = true;
if (sk_tiles > 0)
{
int last_sk_tile = sk_tiles - 1;
int cohort_tile_idx = last_sk_tile / kCtasPerCohort;
int cohort_grid_m = cohort_tile_idx / tiled_cohort_shape.n();
int cohort_grid_n = (cohort_grid_m > 0) ?
tiled_cohort_shape.n() - 1 :
cohort_tile_idx % tiled_cohort_shape.n();
if ((((cohort_grid_m + 1) * kCohortCtasM) >= tiled_shape.m()) ||
(((cohort_grid_n + 1) * kCohortCtasN) >= tiled_shape.n()))
{
sk_in_range = false;
}
}
// Decide if we're going to be doing cohort raster
if (sk_in_range &&
(dp_blocks >= gpu_occupancy * 2) &&
(cohort_efficiency > 0.85f))
{
cohort_raster = true;
dp_blocks = cohort_blocks;
}
else if (sk_waves > 0)
{
// Update semi-persistence of first DP wave to ensure full grid wavesets
// (Only applies when there's an SK component and we're not doing blocked cohort rasterization)
int dp_tile_waves = (dp_tiles + avail_sms - 1) / avail_sms;
int full_dp_tile_waves = dp_tiles / avail_sms;
int waveset_excess = (sk_waves + dp_tile_waves) % sm_occupancy;
if (dp_first_wave_tiles + waveset_excess <= full_dp_tile_waves)
{
dp_first_wave_tiles += waveset_excess;
dp_blocks -= (waveset_excess * avail_sms);
}
}
// Setup fast-div/mod for device-side usage
div_mod_tiled_shape_m = FastDivmod(tiled_shape.m());
div_mod_tiled_shape_n = FastDivmod(tiled_shape.n());
div_mod_tiled_cohort_shape_n = FastDivmod(tiled_cohort_shape.n());
div_mod_iters_per_tile = FastDivmod(iters_per_tile);
}
/// Number of blocks performing useful work
int get_num_active_blocks() const
{
return (sk_waves * avail_sms) + dp_blocks + reduction_blocks;
}
/// Obtains number of threadblocks per GEMM
int get_num_blocks() const
{
int active_blocks = get_num_active_blocks();
if (remap_block_indices)
{
// Add padding blocks if we are performing remapping in order to dispatch a grid of at least four waves
return fast_max(active_blocks, avail_sms * 4);
}
return active_blocks;
}
/// Obtains grid extents in CTAs
dim3 get_grid_dims() const
{
return dim3(get_num_blocks(), 1, batch_count);
}
//
// Device-side interface
//
/// Obtains number of threadblocks per GEMM
CUTLASS_DEVICE
int device_num_blocks() const
{
return gridDim.x;
}
/// Obtains tile index for the given sk iteration
CUTLASS_DEVICE
int get_sk_tile_idx(int iter) const
{
int tile_idx = div_mod_iters_per_tile.div(iter);
return tile_idx;
}
/// Obtains the batch index
CUTLASS_DEVICE
int get_batch_idx() const
{
return RematerializeBlockIdxZ();
}
/// Obtains the calling threadblock's tiled coordinates for the given tile index
CUTLASS_DEVICE
GemmCoord get_tile_offset(int tile_idx) const
{
int m, n;
// row-major raster
div_mod_tiled_shape_n(m, n, tile_idx);
if (tiled_shape().m() < tiled_shape().n())
{
// column-major raster
div_mod_tiled_shape_m(n, m, tile_idx);
}
if (cohort_raster)
{
// tiled cohort raster
int cohort_tile_idx = tile_idx / kCtasPerCohort;
int cohort_grid_m, cohort_grid_n;
div_mod_tiled_cohort_shape_n(cohort_grid_m, cohort_grid_n, cohort_tile_idx);
int block_idx_cohort = tile_idx % kCtasPerCohort;
int block_cohort_m = block_idx_cohort / kCohortCtasN;
int block_cohort_n = block_idx_cohort % kCohortCtasN;
m = (cohort_grid_m * kCohortCtasM) + block_cohort_m;
n = (cohort_grid_n * kCohortCtasN) + block_cohort_n;
}
return GemmCoord(m, n, get_batch_idx());
}
/// Obtains the calling threadblock's tiled coordinates for the given tile index (row-major rasterization)
CUTLASS_DEVICE
GemmCoord get_tile_offset_row_major(int tile_idx) const
{
// row-major raster
int m, n;
div_mod_tiled_shape_n(m, n, tile_idx);
return GemmCoord(m, n, get_batch_idx());
}
/// Obtains calling threadblock's linear threadblock index
CUTLASS_DEVICE
int get_block_idx() const
{
int block_idx = RematerializeBlockIdxX();
// Remap the block indices for the first two waves of thread blocks if
// we have multi-occupancy and the grid constitutes four or more waves
if (remap_block_indices && (block_idx < avail_sms * 2))
{
int dest_sm = block_idx / 2;
int dest_wave = block_idx % 2;
int remapped_block_idx = dest_sm + (dest_wave * avail_sms);
block_idx = remapped_block_idx;
}
// Remap block indices to interleave SK regions to limit intra-region waiting
if (block_idx < sk_regions() * sk_blocks_per_region())
{
int block_in_region;
int region;
div_mod_sk_regions(block_in_region, region, block_idx);
block_idx = (region * sk_blocks_per_region()) + block_in_region;
}
return block_idx;
}
/// Obtains calling linear threadblock index of the first block to work on the given tile
CUTLASS_DEVICE
int get_sk_block_idx(int iter) const
{
int region_idx;
int iter_in_region;
div_mod_sk_iters_per_region(region_idx, iter_in_region, iter);
int big_block_iters = (sk_big_blocks_per_region * sk_iters_per_normal_block()) + sk_big_blocks_per_region; // number of iterations in the region's big blocks
int normal_block_iters = iter_in_region - big_block_iters; // number of iterations in the region's normal blocks
int big_block_idx_in_region = div_mod_sk_iters_per_big_block.div(iter_in_region);
int normal_block_idx_in_region = sk_big_blocks_per_region + div_mod_sk_iters_per_normal_block.div(normal_block_iters);
int block_idx_in_region = (big_block_idx_in_region < sk_big_blocks_per_region) ?
big_block_idx_in_region :
normal_block_idx_in_region;
int owning_block_idx = (sk_blocks_per_region() * region_idx) + block_idx_in_region;
return owning_block_idx;
}
/// Obtains iteration extends for the given SK block index
CUTLASS_DEVICE
void get_iter_extents(
int sk_block_idx,
int &block_iter_begin,
int &block_iter_end) const
{
int region_idx;
int block_idx_in_region;
div_mod_sk_blocks_per_region(region_idx, block_idx_in_region, sk_block_idx);
block_iter_begin = (region_idx * sk_iters_per_region) + (block_idx_in_region * sk_iters_per_normal_block());
// Adjust extents for the first "num_big_blocks" blocks that get one extra iteration
int block_iters = sk_iters_per_normal_block();
if (block_idx_in_region < sk_big_blocks_per_region) {
// This is a +1 iteration block
block_iter_begin += block_idx_in_region;
block_iters++;
} else {
// This is a regular block
block_iter_begin += sk_big_blocks_per_region;
}
block_iter_end = block_iter_begin + block_iters;
}
/// Obtains calling linear threadblock index of the first block to work on the given tile
CUTLASS_DEVICE
int get_first_block_idx(int tile_idx, int block_idx) const
{
if (tile_idx >= sk_tiles) {
// DP tile
return block_idx;
}
int iter = tile_idx * iters_per_tile();
return get_sk_block_idx(iter);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
| include/cutlass/gemm/threadblock/threadblock_swizzle_streamk.h/0 | {
"file_path": "include/cutlass/gemm/threadblock/threadblock_swizzle_streamk.h",
"repo_id": "include",
"token_count": 10734
} | 41 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing warp-level matrix multiply-accumulate operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/mma.h"
#include "cutlass/gemm/thread/mma.h"
#include "cutlass/gemm/warp/mma_simt_tile_iterator.h"
#include "cutlass/gemm/warp/mma_simt_policy.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename ElementA_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Data type of B elements
typename ElementB_,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Element type of C matrix
typename ElementC_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Shape of the warp in units of thread (concept: MmaSimtPolicy)
typename Policy_,
/// Number of partitions along K dimension
int PartitionsK = 1,
/// Complex transformation on operand A
ComplexTransform TransformA = ComplexTransform::kNone,
/// Complex transformation on operand B
ComplexTransform TransformB = ComplexTransform::kNone,
/// Used for partial specialization
typename Enable = bool
>
class MmaSimt {
public:
/// Shape of warp-level matrix operation (concept: GemmShape)
using Shape = Shape_;
/// Data type of multiplicand A
using ElementA = ElementA_;
/// Layout of multiplicand A
using LayoutA = LayoutA_;
/// Data type of multiplicand B
using ElementB = ElementB_;
/// Layout of multiplicand B
using LayoutB = LayoutB_;
/// Data type of accumulator matrix C
using ElementC = ElementC_;
/// Layout of accumulator matrix C
using LayoutC = LayoutC_;
/// Shape of the warp in units of thread (concept: MmaLanePolicySimt)
using Policy = Policy_;
/// Indicates class of matrix operator
using OperatorClass = arch::OpClassSimt;
/// Hard-coded for now
using ArchTag = arch::Sm50;
/// Complex transform on A operand
static ComplexTransform const kTransformA = TransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB = TransformB;
/// Layout of threads
using ThreadLayoutA = typename platform::conditional< platform::is_same< layout::ColumnMajorInterleaved<4>, LayoutA >::value,
layout::ColumnMajor,
typename platform::conditional < platform::is_same< layout::RowMajorInterleaved<4>, LayoutA >::value,
layout::RowMajor,
LayoutA>::type
>::type;
using ThreadLayoutB = typename platform::conditional< platform::is_same< layout::ColumnMajorInterleaved<4>, LayoutB >::value,
layout::ColumnMajor,
typename platform::conditional < platform::is_same< layout::RowMajorInterleaved<4>, LayoutB >::value,
layout::RowMajor,
LayoutB>::type
>::type;
static constexpr bool use_dp4a = (platform::is_same< layout::ColumnMajorInterleaved<4>, LayoutA>::value ||
platform::is_same< layout::RowMajorInterleaved<4>, LayoutA >::value) &&
platform::is_same< ElementA, int8_t >::value &&
platform::is_same< ElementB, int8_t >::value;
using dp4a_type = typename platform::conditional< use_dp4a , int8_t, bool >::type;
/// Thread-level matrix multiply accumulate operator
using ThreadMma = thread::Mma<
GemmShape<
Shape::kM / Policy::WarpShape::kRow,
Shape::kN / Policy::WarpShape::kColumn,
Policy::LaneMmaShape::kK>,
ElementA,
ThreadLayoutA,
ElementB,
ThreadLayoutB,
ElementC,
LayoutC,
arch::OpMultiplyAdd,
dp4a_type
>;
/// Underlying matrix multiply operator (concept: arch::Mma)
using ArchMmaOperator = typename ThreadMma::ArchMmaOperator;
/// Indicates math operator
using MathOperator = typename ArchMmaOperator::Operator;
/// Shape of the underlying instruction
using InstructionShape = GemmShape<1,1,use_dp4a ? 4 : 1>;
public:
/// Iterates over the A operand in memory
using IteratorA = MmaSimtTileIterator<
MatrixShape<Shape::kM, Policy::LaneMmaShape::kK>,
Operand::kA,
ElementA,
LayoutA,
Policy,
PartitionsK,
Shape::kK
>;
/// Storage for A tile
using FragmentA = typename IteratorA::Fragment;
/// Storage for transformed A tile
using TransformedFragmentA = FragmentA;
/// Iterates over the B operand in memory
using IteratorB = MmaSimtTileIterator<
MatrixShape<Policy::LaneMmaShape::kK, Shape::kN>,
Operand::kB,
ElementB,
LayoutB,
Policy,
PartitionsK,
Shape::kK
>;
/// Storage for B tile
using FragmentB = typename IteratorB::Fragment;
/// Storage for transformed A tile
using TransformedFragmentB = FragmentB;
/// Iterates over the C operand in memory
using IteratorC = MmaSimtTileIterator<
MatrixShape<Shape::kM, Shape::kN>,
Operand::kC,
ElementC,
LayoutC,
Policy
>;
/// Storage for C tile
using FragmentC = typename ThreadMma::FragmentC;
public:
//
// Methods
//
/// Ctor
CUTLASS_DEVICE
MmaSimt() {}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &d,
FragmentA a,
FragmentB b,
FragmentC const &c, int group_idx = 0) const {
ThreadMma mma;
if (kTransformA == ComplexTransform::kConjugate) {
conjugate<FragmentA> conj_a;
a = conj_a(a);
}
if (kTransformB == ComplexTransform::kConjugate) {
conjugate<FragmentB> conj_b;
b = conj_b(b);
}
mma(d, a, b, c);
}
/// Transform the mma operands to the required types
CUTLASS_DEVICE
void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B,
FragmentA const &A, FragmentB const &B) const {
dst_A = A;
dst_B = B;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
| include/cutlass/gemm/warp/mma_simt.h/0 | {
"file_path": "include/cutlass/gemm/warp/mma_simt.h",
"repo_id": "include",
"token_count": 2843
} | 42 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing warp-level matrix multiply-accumulate operations targeting
Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/platform/platform.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/arch/mma_sm75.h"
#include "cutlass/arch/mma_sm80.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/mma.h"
#include "cutlass/gemm/warp/mma_tensor_op_policy.h"
#include "cutlass/gemm/warp/mma_tensor_op.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename ElementA_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Data type of B elements
typename ElementB_,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Element type of C matrix
typename ElementC_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
typename Policy_,
/// Reduce operand A or B along K dimension
bool ReduceKForA_,
/// Number of partitions along K dimension
int PartitionsK_ = 1,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor = false,
/// Used for partial specialization
typename Enable = bool
>
class MmaWithReductionTensorOp {
public:
/// Shape of warp-level matrix operation (concept: GemmShape)
using Shape = Shape_;
/// Data type of multiplicand A
using ElementA = ElementA_;
/// Layout of multiplicand A
using LayoutA = LayoutA_;
/// Data type of multiplicand B
using ElementB = ElementB_;
/// Layout of multiplicand B
using LayoutB = LayoutB_;
/// Data type of accumulator matrix C
using ElementC = ElementC_;
/// Layout of accumulator matrix C
using LayoutC = LayoutC_;
/// Shape of the warp in units of thread (concept: MmaLanePolicySimt)
using Policy = Policy_;
/// Underlying matrix multiply operator (concept: arch::Mma)
using ArchMmaOperator = typename Policy::Operator;
/// Indicates math operator
using MathOperator = typename ArchMmaOperator::Operator;
/// Architecture tag from underlying instruction
using ArchTag = typename ArchMmaOperator::ArchTag;
/// Indicates class of matrix operator
using OperatorClass = arch::OpClassTensorOp;
/// Shape of underlying instruction
using InstructionShape = typename ArchMmaOperator::Shape;
/// Complex transform on A operand
static ComplexTransform const kTransformA = ComplexTransform::kNone;
/// Complex transform on B operand
static ComplexTransform const kTransformB = ComplexTransform::kNone;
/// Number of threads participating in warp-level matrix product
static int const kThreadCount = 32;
/// Number of partitions along K dimension
static int const kPartitionsK = PartitionsK_;
static bool const kReduceKForA = ReduceKForA_;
static_assert(platform::is_same<ElementA, cutlass::half_t>::value ||
platform::is_same<ElementA, cutlass::bfloat16_t>::value,
"ElementA needs to be fp16 or bf16.");
static_assert(platform::is_same<ElementB, cutlass::half_t>::value ||
platform::is_same<ElementB, cutlass::bfloat16_t>::value,
"ElementB needs to be fp16 or bf16.");
static_assert(platform::is_same<InstructionShape,
cutlass::gemm::GemmShape<16, 8, 16>>::value,
"Only supports 16x8x16 tensor core instruction.");
static_assert(!AccumulatorsInRowMajor,
"Only calls tensor core instructions in column major.");
public:
/// Iterates over the A operand in memory
using IteratorA = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kM, Shape::kK>, Operand::kA, ElementA, LayoutA,
MatrixShape<ArchMmaOperator::Shape::kM, ArchMmaOperator::Shape::kK>,
Policy::OpDelta::kRow, kThreadCount, kPartitionsK>;
/// Storage for A tile
using FragmentA = typename IteratorA::Fragment;
/// Storage for transformed A tile
using TransformedFragmentA =
Array<typename ArchMmaOperator::ElementA, FragmentA::kElements>;
/// Iterates over the B operand in memory
using IteratorB = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kK, Shape::kN>, Operand::kB, ElementB, LayoutB,
MatrixShape<ArchMmaOperator::Shape::kK, ArchMmaOperator::Shape::kN>,
Policy::OpDelta::kRow, kThreadCount, kPartitionsK>;
/// Storage for B tile
using FragmentB = typename IteratorB::Fragment;
/// Storage for transformed B tile
using TransformedFragmentB =
Array<typename ArchMmaOperator::ElementB, FragmentB::kElements>;
/// Iterates over the C operand in memory
using IteratorC = MmaTensorOpAccumulatorTileIterator<
MatrixShape<Shape::kM, Shape::kN>, ElementC, LayoutC,
typename ArchMmaOperator::Shape, typename Policy::OpDelta>;
/// Storage for C tile
using FragmentC = typename IteratorC::Fragment;
/// Number of mma operations performed
using MmaIterations = MatrixShape<
(Shape::kM + ArchMmaOperator::Shape::kM - 1) / ArchMmaOperator::Shape::kM,
(Shape::kN + ArchMmaOperator::Shape::kN - 1) / ArchMmaOperator::Shape::kN
>;
using FragmentReduction = Array<ElementC, kReduceKForA ? (Shape::kM / 8) : (Shape::kN / 8)>;
public:
/// Underlying matrix multiply operator (concept: arch::Mma)
ArchMmaOperator mma;
public:
//
// Methods
//
/// Ctor
CUTLASS_DEVICE
MmaWithReductionTensorOp() {}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
TransformedFragmentA const &A,
TransformedFragmentB const &B,
FragmentC const &C,
FragmentReduction &gemm_k_reduction
) const {
using MmaOperandA = typename ArchMmaOperator::FragmentA;
using MmaOperandB = typename ArchMmaOperator::FragmentB;
using MmaOperandC = typename ArchMmaOperator::FragmentC;
D = C;
[[maybe_unused]] MmaOperandA const *ptr_A = reinterpret_cast<MmaOperandA const *>(&A);
[[maybe_unused]] MmaOperandB const *ptr_B = reinterpret_cast<MmaOperandB const *>(&B);
[[maybe_unused]] MmaOperandC *ptr_D = reinterpret_cast<MmaOperandC *>(&D);
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 800)
assert(0);
#elif defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
// Serpentine visitation order maximizing reuse of Ra
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < MmaIterations::kRow; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
int n_serpentine = ((m % 2) ? (MmaIterations::kColumn - 1 - n) : n);
mma(ptr_D[m + n_serpentine * MmaIterations::kRow],
ptr_A[m],
ptr_B[n_serpentine],
ptr_D[m + n_serpentine * MmaIterations::kRow]);
if (!kReduceKForA && m == 0) {
#if 0
gemm_k_reduction[n_serpentine] += float(B[n_serpentine * 4]);
gemm_k_reduction[n_serpentine] += float(B[n_serpentine * 4 + 1]);
gemm_k_reduction[n_serpentine] += float(B[n_serpentine * 4 + 2]);
gemm_k_reduction[n_serpentine] += float(B[n_serpentine * 4 + 3]);
#else
uint32_t const *tmp = reinterpret_cast<uint32_t const *>(&B);
if (platform::is_same<ElementB, cutlass::half_t>::value) {
asm volatile(
"{\n\t"
" .reg .f16 low, high;\n\t"
" .reg .f32 tmp;\n\t"
" mov.b32 {low, high}, %1;\n\t"
" cvt.f32.f16 tmp, low;\n\t"
" add.f32 %0, tmp, %0;\n\t"
" cvt.f32.f16 tmp, high;\n\t"
" add.f32 %0, tmp, %0;\n\t"
" mov.b32 {low, high}, %2;\n\t"
" cvt.f32.f16 tmp, low;\n\t"
" add.f32 %0, tmp, %0;\n\t"
" cvt.f32.f16 tmp, high;\n\t"
" add.f32 %0, tmp, %0;\n\t"
"}\n\t"
: "+f"(gemm_k_reduction[n_serpentine])
: "r"(tmp[n_serpentine * 2]), "r"(tmp[n_serpentine * 2 + 1]));
} else if (platform::is_same<ElementB, cutlass::bfloat16_t>::value) {
asm volatile(
"{\n\t"
" .reg .f32 tmp;\n\t"
" shl.b32 tmp, %1, 16;\n\t"
" add.f32 %0, tmp, %0;\n\t"
" and.b32 tmp, %1, 0xffff0000;\n\t"
" add.f32 %0, tmp, %0;\n\t"
" shl.b32 tmp, %2, 16;\n\t"
" add.f32 %0, tmp, %0;\n\t"
" and.b32 tmp, %2, 0xffff0000;\n\t"
" add.f32 %0, tmp, %0;\n\t"
"}\n\t"
: "+f"(gemm_k_reduction[n_serpentine])
: "r"(tmp[n_serpentine * 2]), "r"(tmp[n_serpentine * 2 + 1]));
} else {
assert(0);
}
#endif
}
if (kReduceKForA && (n == 0)) {
#if 0
gemm_k_reduction[m * 2] += float(A[m * 8]);
gemm_k_reduction[m * 2] += float(A[m * 8 + 1]);
gemm_k_reduction[m * 2] += float(A[m * 8 + 4]);
gemm_k_reduction[m * 2] += float(A[m * 8 + 5]);
gemm_k_reduction[m * 2 + 1] += float(A[m * 8 + 2]);
gemm_k_reduction[m * 2 + 1] += float(A[m * 8 + 3]);
gemm_k_reduction[m * 2 + 1] += float(A[m * 8 + 6]);
gemm_k_reduction[m * 2 + 1] += float(A[m * 8 + 7]);
#else
uint32_t const *tmp = reinterpret_cast<uint32_t const *>(&A);
if (platform::is_same<ElementA, cutlass::half_t>::value) {
asm volatile(
"{\n\t"
" .reg .f16 low, high;\n\t"
" .reg .f32 tmp;\n\t"
" mov.b32 {low, high}, %2;\n\t"
" cvt.f32.f16 tmp, low;\n\t"
" add.f32 %0, tmp, %0;\n\t"
" cvt.f32.f16 tmp, high;\n\t"
" add.f32 %0, tmp, %0;\n\t"
" mov.b32 {low, high}, %3;\n\t"
" cvt.f32.f16 tmp, low;\n\t"
" add.f32 %1, tmp, %1;\n\t"
" cvt.f32.f16 tmp, high;\n\t"
" add.f32 %1, tmp, %1;\n\t"
" mov.b32 {low, high}, %4;\n\t"
" cvt.f32.f16 tmp, low;\n\t"
" add.f32 %0, tmp, %0;\n\t"
" cvt.f32.f16 tmp, high;\n\t"
" add.f32 %0, tmp, %0;\n\t"
" mov.b32 {low, high}, %5;\n\t"
" cvt.f32.f16 tmp, low;\n\t"
" add.f32 %1, tmp, %1;\n\t"
" cvt.f32.f16 tmp, high;\n\t"
" add.f32 %1, tmp, %1;\n\t"
"}\n\t"
: "+f"(gemm_k_reduction[m * 2]), "+f"(gemm_k_reduction[m * 2 + 1])
: "r"(tmp[m * 4]), "r"(tmp[m * 4 + 1]),"r"(tmp[m * 4 + 2]), "r"(tmp[m * 4 + 3]));
} else if (platform::is_same<ElementA, cutlass::bfloat16_t>::value) {
asm volatile(
"{\n\t"
" .reg .f32 tmp;\n\t"
" shl.b32 tmp, %2, 16;\n\t"
" add.f32 %0, tmp, %0;\n\t"
" and.b32 tmp, %2, 0xffff0000;\n\t"
" add.f32 %0, tmp, %0;\n\t"
" shl.b32 tmp, %3, 16;\n\t"
" add.f32 %1, tmp, %1;\n\t"
" and.b32 tmp, %3, 0xffff0000;\n\t"
" add.f32 %1, tmp, %1;\n\t"
" shl.b32 tmp, %4, 16;\n\t"
" add.f32 %0, tmp, %0;\n\t"
" and.b32 tmp, %4, 0xffff0000;\n\t"
" add.f32 %0, tmp, %0;\n\t"
" shl.b32 tmp, %5, 16;\n\t"
" add.f32 %1, tmp, %1;\n\t"
" and.b32 tmp, %5, 0xffff0000;\n\t"
" add.f32 %1, tmp, %1;\n\t"
"}\n\t"
: "+f"(gemm_k_reduction[m * 2]), "+f"(gemm_k_reduction[m * 2 + 1])
: "r"(tmp[m * 4]), "r"(tmp[m * 4 + 1]),"r"(tmp[m * 4 + 2]), "r"(tmp[m * 4 + 3]));
} else {
assert(0);
}
#endif
}
}
}
#else
assert(0);
#endif
}
/// Transform the mma operands to the required types
CUTLASS_DEVICE
void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B,
FragmentA const &A, FragmentB const &B) const {
//
// Define conversions from source type to instruction type
//
FloatRoundStyle const kRoundA =
PreferredRoundingMode<typename ArchMmaOperator::ElementA,
ElementA>::kRound;
FloatRoundStyle const kRoundB =
PreferredRoundingMode<typename ArchMmaOperator::ElementB,
ElementB>::kRound;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 800)
detail::ConvertAndPack<typename ArchMmaOperator::ElementA, ElementA,
FragmentA::kElements, kRoundA>
convert_A;
NumericArrayConverter<typename ArchMmaOperator::ElementB, ElementB,
FragmentB::kElements / 2, kRoundB>
convert_B;
Array<ElementB, FragmentB::kElements / 2> const *ptr_B =
reinterpret_cast<Array<ElementB, FragmentB::kElements / 2> const *>(&B);
Array<typename ArchMmaOperator::ElementB, FragmentB::kElements / 2> *
ptr_dst_B = reinterpret_cast<Array<typename ArchMmaOperator::ElementB,
FragmentB::kElements / 2> *>(&dst_B);
dst_A = convert_A(A);
ptr_dst_B[0] = convert_B(ptr_B[0]);
ptr_dst_B[1] = convert_B(ptr_B[1]);
#elif defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
detail::ConvertAndPack<typename ArchMmaOperator::ElementA, ElementA,
FragmentA::kElements / 2, kRoundA>
convert_A;
NumericArrayConverter<typename ArchMmaOperator::ElementB, ElementB,
FragmentB::kElements, kRoundB>
convert_B;
Array<ElementA, FragmentA::kElements / 2> const *ptr_A =
reinterpret_cast<Array<ElementA, FragmentA::kElements / 2> const *>(&A);
Array<typename ArchMmaOperator::ElementA, FragmentA::kElements / 2> *
ptr_dst_A = reinterpret_cast<Array<typename ArchMmaOperator::ElementA,
FragmentA::kElements / 2> *>(&dst_A);
dst_B = convert_B(B);
ptr_dst_A[0] = convert_A(ptr_A[0]);
ptr_dst_A[1] = convert_A(ptr_A[1]);
#else
assert(0);
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/warp/mma_with_reduction_tensor_op.h/0 | {
"file_path": "include/cutlass/gemm/warp/mma_with_reduction_tensor_op.h",
"repo_id": "include",
"token_count": 7946
} | 43 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/coord.h"
#include "cutlass/layout/pitch_linear.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace layout {
// template <
// int ElementSize,
// gemm::Operand Operand
// >
// struct VoltaTensorOpMultiplicandCongruous;
// template <
// int ElementSize,
// gemm::Operand Operand
// >
// struct ColumnMajorVoltaTensorOpMultiplicandCongruous;
// template <
// int ElementSize,
// gemm::Operand Operand
// >
// struct RowMajorVoltaTensorOpMultiplicandCongruous;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template based on element size (in bits) - defined in terms of pitch-linear memory.
template <int ElementSize>
struct VoltaTensorOpMultiplicandCongruous {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = PitchLinearCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Invariants
//
/// This layout is optimized for 128b accesses
static int const kAccessSize = 128;
/// Fundamental tile shape in units of vectors
using TileShape = PitchLinearShape<8, 4>;
/// Fundamental partition shape in units of vectors
using PartitionShape = PitchLinearShape<8, 2>;
//
// Static constants
//
static int const kElementSize = ElementSize;
static int const kElementsPerAccess = kAccessSize / kElementSize;
using PartitionCount = PitchLinearShape<
TileShape::kContiguous / PartitionShape::kContiguous,
TileShape::kStrided / PartitionShape::kStrided
>;
using AccessCount = PitchLinearShape<
PartitionShape::kContiguous,
PartitionShape::kStrided
>;
private:
//
// Data members
//
/// Stride data member
Stride stride_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
VoltaTensorOpMultiplicandCongruous(Index ldm = 0): stride_(ldm) { }
/// Ctor
CUTLASS_HOST_DEVICE
VoltaTensorOpMultiplicandCongruous(Stride stride): stride_(stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static VoltaTensorOpMultiplicandCongruous packed(TensorCoord const &extent) {
return VoltaTensorOpMultiplicandCongruous(extent[0]);
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
// First, compute c and s of vector within source (in units of vector accesses)
int vec_contiguous_idx = coord.contiguous() / kElementsPerAccess;
int vec_strided_idx = coord.strided();
// Compute the fundamental tile being accessed
int tile_contiguous_idx = vec_contiguous_idx / TileShape::kContiguous;
int tile_strided_idx = vec_strided_idx / TileShape::kStrided;
int tile_contiguous_residual = vec_contiguous_idx % TileShape::kContiguous;
int tile_strided_residual = vec_strided_idx % TileShape::kStrided;
// Then swizzle in a tile
// Swizzle pattern is (tid[2:0] << 2)|(tid[4:3] ^ tid[2:1])
int permuted_strided_within_tile = (tile_contiguous_residual >> 1);
int permuted_contiguous_within_tile = (tile_strided_residual ^ permuted_strided_within_tile) |
((tile_contiguous_residual & 1) << 2);
// Compute final element location
int element_contiguous = (tile_contiguous_idx * TileShape::kContiguous +
permuted_contiguous_within_tile) * kElementsPerAccess + (coord.contiguous() % kElementsPerAccess);
int element_strided = tile_strided_idx * TileShape::kStrided + permuted_strided_within_tile;
return element_contiguous + element_strided * stride_[0];
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return extent[1] * stride_[0];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template mapping a column-major view of pitch-linear memory to VoltaTensorOpMultiplicandCongruous
template <int ElementSize>
struct ColumnMajorVoltaTensorOpMultiplicandCongruous {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Invariants
//
using Base = VoltaTensorOpMultiplicandCongruous<ElementSize>;
/// This layout is optimized for 128b accesses
static int const kAccessSize = Base::kAccessSize;
using TileShape = typename Base::TileShape;
using PartitionShape = typename Base::PartitionShape;
//
// Static constants
//
static int const kElementSize = Base::kElementSize;
static int const kElementsPerAccess = Base::kElementsPerAccess;
using PartitionCount = typename Base::PartitionCount;
using AccessCount = typename Base::AccessCount;
private:
//
// Data members
//
Base layout_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
ColumnMajorVoltaTensorOpMultiplicandCongruous(Index ldm = 0): layout_(ldm) { }
/// Ctor
CUTLASS_HOST_DEVICE
ColumnMajorVoltaTensorOpMultiplicandCongruous(Stride stride): layout_(stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static ColumnMajorVoltaTensorOpMultiplicandCongruous packed(TensorCoord const &extent) {
return ColumnMajorVoltaTensorOpMultiplicandCongruous(extent.row());
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return layout_(PitchLinearCoord(coord.row(), coord.column()));
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
TensorCoord inverse(LongIndex offset) const {
PitchLinearCoord coord = layout_.inverse(offset);
return MatrixCoord(coord.contiguous(), coord.strided());
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return layout_.stride();
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return layout_.stride();
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return layout_.capacity(PitchLinearCoord(extent.row(), extent.column()));
}
};
/// Template mapping a row-major view of pitch-linear memory to VoltaTensorOpMultiplicandCongruous
template <int ElementSize>
struct RowMajorVoltaTensorOpMultiplicandCongruous {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Invariants
//
using Base = VoltaTensorOpMultiplicandCongruous<ElementSize>;
/// This layout is optimized for 128b accesses
static int const kAccessSize = Base::kAccessSize;
using TileShape = typename Base::TileShape;
using PartitionShape = typename Base::PartitionShape;
//
// Static constants
//
static int const kElementSize = Base::kElementSize;
static int const kElementsPerAccess = Base::kElementsPerAccess;
using PartitionCount = typename Base::PartitionCount;
using AccessCount = typename Base::AccessCount;
private:
//
// Data members
//
Base layout_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
RowMajorVoltaTensorOpMultiplicandCongruous(Index ldm = 0): layout_(ldm) { }
/// Ctor
CUTLASS_HOST_DEVICE
RowMajorVoltaTensorOpMultiplicandCongruous(Stride stride): layout_(stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static RowMajorVoltaTensorOpMultiplicandCongruous packed(TensorCoord const &extent) {
return RowMajorVoltaTensorOpMultiplicandCongruous(extent.column());
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return layout_(PitchLinearCoord(coord.column(), coord.row()));
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
TensorCoord inverse(LongIndex offset) const {
PitchLinearCoord coord = layout_.inverse(offset);
return MatrixCoord(coord.strided(), coord.contiguous());
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return layout_.stride();
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return layout_.stride();
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return layout_.capacity(PitchLinearCoord(extent.column(), extent.row()));
}
};
/// Template based on element size (in bits) - defined in terms of pitch-linear memory.
// template <int ElementSize, Operand Operand>
template <int ElementSize>
struct VoltaTensorOpMultiplicandBCongruous {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = PitchLinearCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Invariants
//
/// This layout is optimized for 128b accesses
static int const kAccessSize = 128;
/// Fundamental tile shape in units of vectors
using TileShape = PitchLinearShape<8, 4>;
/// Fundamental partition shape in units of vectors
using PartitionShape = PitchLinearShape<4, 4>;
//
// Static constants
//
static int const kElementSize = ElementSize;
static int const kElementsPerAccess = kAccessSize / kElementSize;
using PartitionCount = PitchLinearShape<
TileShape::kContiguous / PartitionShape::kContiguous,
TileShape::kStrided / PartitionShape::kStrided
>;
using AccessCount = PitchLinearShape<
PartitionShape::kContiguous,
PartitionShape::kStrided
>;
private:
//
// Data members
//
/// Stride data member
Stride stride_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
VoltaTensorOpMultiplicandBCongruous(Index ldm = 0): stride_(ldm) { }
/// Ctor
CUTLASS_HOST_DEVICE
VoltaTensorOpMultiplicandBCongruous(Stride stride): stride_(stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static VoltaTensorOpMultiplicandBCongruous packed(TensorCoord const &extent) {
return VoltaTensorOpMultiplicandBCongruous(extent[0]);
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
// First, compute c and s of vector within source (in units of vector accesses)
int vec_contiguous_idx = coord.contiguous() / kElementsPerAccess;
int vec_strided_idx = coord.strided();
// Compute the fundamental tile being accessed
int tile_contiguous_idx = vec_contiguous_idx / TileShape::kContiguous;
int tile_strided_idx = vec_strided_idx / TileShape::kStrided;
int tile_contiguous_residual = vec_contiguous_idx % TileShape::kContiguous;
int tile_strided_residual = vec_strided_idx % TileShape::kStrided;
// Then swizzle in a tile
// Swizzle pattern is (tid[1:0] << 3)|(tid & 0x4)|(tid[1:0])
int permuted_strided_within_tile = (tile_contiguous_residual & 0x3);
int permuted_contiguous_within_tile = (tile_strided_residual ^ permuted_strided_within_tile) |
(tile_contiguous_residual & 0x4);
// Compute final element location
int element_contiguous = (tile_contiguous_idx * TileShape::kContiguous +
permuted_contiguous_within_tile) * kElementsPerAccess + (coord.contiguous() % kElementsPerAccess);
int element_strided = tile_strided_idx * TileShape::kStrided + permuted_strided_within_tile;
return element_contiguous + element_strided * stride_[0];
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return extent[1] * stride_[0];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template mapping a column-major view of pitch-linear memory to VoltaTensorOpMultiplicandCongruous
template <int ElementSize>
struct ColumnMajorVoltaTensorOpMultiplicandBCongruous {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Invariants
//
using Base = VoltaTensorOpMultiplicandBCongruous<ElementSize>;
/// This layout is optimized for 128b accesses
static int const kAccessSize = Base::kAccessSize;
using TileShape = typename Base::TileShape;
using PartitionShape = typename Base::PartitionShape;
//
// Static constants
//
static int const kElementSize = Base::kElementSize;
static int const kElementsPerAccess = Base::kElementsPerAccess;
using PartitionCount = typename Base::PartitionCount;
using AccessCount = typename Base::AccessCount;
private:
//
// Data members
//
Base layout_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
ColumnMajorVoltaTensorOpMultiplicandBCongruous(Index ldm = 0): layout_(ldm) { }
/// Ctor
CUTLASS_HOST_DEVICE
ColumnMajorVoltaTensorOpMultiplicandBCongruous(Stride stride): layout_(stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static ColumnMajorVoltaTensorOpMultiplicandBCongruous packed(TensorCoord const &extent) {
return ColumnMajorVoltaTensorOpMultiplicandBCongruous(extent.row());
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return layout_(PitchLinearCoord(coord.row(), coord.column()));
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
TensorCoord inverse(LongIndex offset) const {
PitchLinearCoord coord = layout_.inverse(offset);
return MatrixCoord(coord.contiguous(), coord.strided());
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return layout_.stride();
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return layout_.stride();
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return layout_.capacity(PitchLinearCoord(extent.row(), extent.column()));
}
};
/// Template mapping a row-major view of pitch-linear memory to VoltaTensorOpMultiplicandCongruous
template <int ElementSize>
struct RowMajorVoltaTensorOpMultiplicandBCongruous {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Invariants
//
using Base = VoltaTensorOpMultiplicandBCongruous<ElementSize>;
/// This layout is optimized for 128b accesses
static int const kAccessSize = Base::kAccessSize;
using TileShape = typename Base::TileShape;
using PartitionShape = typename Base::PartitionShape;
//
// Static constants
//
static int const kElementSize = Base::kElementSize;
static int const kElementsPerAccess = Base::kElementsPerAccess;
using PartitionCount = typename Base::PartitionCount;
using AccessCount = typename Base::AccessCount;
private:
//
// Data members
//
Base layout_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
RowMajorVoltaTensorOpMultiplicandBCongruous(Index ldm = 0): layout_(ldm) { }
/// Ctor
CUTLASS_HOST_DEVICE
RowMajorVoltaTensorOpMultiplicandBCongruous(Stride stride): layout_(stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static RowMajorVoltaTensorOpMultiplicandBCongruous packed(TensorCoord const &extent) {
return RowMajorVoltaTensorOpMultiplicandBCongruous(extent.column());
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return layout_(PitchLinearCoord(coord.column(), coord.row()));
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
TensorCoord inverse(LongIndex offset) const {
PitchLinearCoord coord = layout_.inverse(offset);
return MatrixCoord(coord.strided(), coord.contiguous());
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return layout_.stride();
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return layout_.stride();
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return layout_.capacity(PitchLinearCoord(extent.column(), extent.row()));
}
};
/// Template based on element size (in bits) - defined in terms of pitch-linear
/// memory and KBlock size (in elements).
template <int ElementSize, int KBlock>
struct VoltaTensorOpMultiplicandCrosswise {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = PitchLinearCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Invariants
//
/// This layout is optimized for 64b accesses
static int const kAccessSize = 64;
//
// Static constants
//
static int const kElementSize = ElementSize;
static int const kElementsPerAccess = kAccessSize / kElementSize;
static int const kKBlock = KBlock;
private:
//
// Data members
//
/// Stride data member. For GEMM, it equals to KBlock x stage.
Stride stride_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
VoltaTensorOpMultiplicandCrosswise(Index ldm = 0) : stride_(ldm) {}
/// Ctor
CUTLASS_HOST_DEVICE
VoltaTensorOpMultiplicandCrosswise(Stride stride) : stride_(stride) {}
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static VoltaTensorOpMultiplicandCrosswise packed(TensorCoord const &extent) {
return VoltaTensorOpMultiplicandCrosswise(extent[1]);
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
//
// First, compute c and s of vector within source (in units of vector
// accesses)
//
int vec_contiguous_idx = coord.contiguous() / kElementsPerAccess;
int vec_strided_idx = coord.strided();
//
// Then swizzle
// The mapping is like this:
// id[1:0]|(id[3]^id[4])|id[2]
int vec_strided_within_tile = vec_contiguous_idx & 0x7;
int permuted_vec_contiguous =
(vec_strided_idx & (~0xF)) + (vec_strided_idx & 0x3) * 4 +
(((vec_strided_idx >> 2) ^ ((vec_strided_idx & 0x10) >> 3)) & 0x3);
permuted_vec_contiguous ^= ((vec_strided_within_tile >> 1) & 0x3);
int permuted_vec_strided = vec_contiguous_idx;
//
// Compute final element location
//
int element_contiguous = permuted_vec_contiguous * kElementsPerAccess +
(coord.contiguous() % kElementsPerAccess);
return element_contiguous + permuted_vec_strided * (stride_[0] * kElementsPerAccess);
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const { return stride_; }
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride &stride() { return stride_; }
/// Compute the number of contiguous elements needed to store a tensor with
/// the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return extent[0] * stride_[0];
}
};
/// Template mapping a column-major view of pitch-linear memory to
/// VoltaTensorOpMultiplicandCrosswise
template <int ElementSize, int KBlock>
struct ColumnMajorVoltaTensorOpMultiplicandCrosswise {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Invariants
//
using Base = VoltaTensorOpMultiplicandCrosswise<ElementSize, KBlock>;
/// This layout is optimized for 64b accesses
static int const kAccessSize = Base::kAccessSize;
//
// Static constants
//
static int const kElementSize = Base::kElementSize;
static int const kElementsPerAccess = Base::kElementsPerAccess;
private:
//
// Data members
//
Base layout_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
ColumnMajorVoltaTensorOpMultiplicandCrosswise(Index ldm = 0) : layout_(ldm) {}
/// Ctor
CUTLASS_HOST_DEVICE
ColumnMajorVoltaTensorOpMultiplicandCrosswise(Stride stride) : layout_(stride) {}
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static ColumnMajorVoltaTensorOpMultiplicandCrosswise packed(
TensorCoord const &extent) {
return ColumnMajorVoltaTensorOpMultiplicandCrosswise(extent.column());
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return layout_(PitchLinearCoord(coord.row(), coord.column()));
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
TensorCoord inverse(LongIndex offset) const {
PitchLinearCoord coord = layout_.inverse(offset);
return MatrixCoord(coord.contiguous(), coord.strided());
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const { return layout_.stride(); }
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride &stride() { return layout_.stride(); }
/// Compute the number of contiguous elements needed to store a tensor with
/// the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return layout_.capacity(PitchLinearCoord(extent.row(), extent.column()));
}
};
/// Template mapping a row-major view of pitch-linear memory to
/// TensorOpMultiplicandCrosswise
template <int ElementSize, int KBlock>
struct RowMajorVoltaTensorOpMultiplicandCrosswise {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Invariants
//
using Base = VoltaTensorOpMultiplicandCrosswise<ElementSize, KBlock>;
/// This layout is optimized for 64b accesses
static int const kAccessSize = Base::kAccessSize;
//
// Static constants
//
static int const kElementSize = Base::kElementSize;
static int const kElementsPerAccess = Base::kElementsPerAccess;
private:
//
// Data members
//
Base layout_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
RowMajorVoltaTensorOpMultiplicandCrosswise(Index ldm = 0) : layout_(ldm) {}
/// Ctor
CUTLASS_HOST_DEVICE
RowMajorVoltaTensorOpMultiplicandCrosswise(Stride stride) : layout_(stride) {}
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static RowMajorVoltaTensorOpMultiplicandCrosswise packed(
TensorCoord const &extent) {
return RowMajorVoltaTensorOpMultiplicandCrosswise(extent.row());
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return layout_(PitchLinearCoord(coord.column(), coord.row()));
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
TensorCoord inverse(LongIndex offset) const {
PitchLinearCoord coord = layout_.inverse(offset);
return MatrixCoord(coord.strided(), coord.contiguous());
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const { return layout_.stride(); }
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride &stride() { return layout_.stride(); }
/// Compute the number of contiguous elements needed to store a tensor with
/// the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return layout_.capacity(PitchLinearCoord(extent.column(), extent.row()));
}
};
} // namespace layout
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/layout/tensor_op_multiplicand_sm70.h/0 | {
"file_path": "include/cutlass/layout/tensor_op_multiplicand_sm70.h",
"repo_id": "include",
"token_count": 9595
} | 44 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines a structure containing strides, bounds, and a pointer to tensor data.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/coord.h"
#include "cutlass/platform/platform.h"
#include "cutlass/subbyte_reference.h"
namespace cutlass {
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Default layout function from coordinates in a tensor's index space into the n-D array held
/// in memory.
///
/// All layout functions must define at least the members shown in IdentityTensorLayout<>.
template <int Rank>
class IdentityTensorLayout {
public:
/// Logical rank of tensor
static int const kRank = Rank;
/// Rank of stride vector
static int const kStrideRank = Rank;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = Coord<kRank, Index>;
/// Stride vector
using Stride = Coord<kStrideRank, Index>;
private:
//
// Data members
//
/// Stride data member
Stride stride_;
public:
//
// Methods
//
CUTLASS_HOST_DEVICE
IdentityTensorLayout(Stride const &stride = Stride()): stride_(stride) { }
/// Returns the offset of a coordinate in linear memory
CUTLASS_HOST_DEVICE
LongIndex operator()(Coord<Rank> const &coord) const {
return coord.dot(stride_);
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &size) const {
int idx = stride_.max_dim_index();
return stride_[idx] * size[idx];
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
/* \brief TensorRef is a template for objects pointing to the start of tensors of arbitrary rank
and layout within memory. A TensorRef combines a pointer and a Layout concept
Examples:
(These examples use helpers for matrix layouts defined in cutlass/layout/matrix.h)
1. Column-major matrix may be represented as a rank=2 tensor:
TensorRef<float, layout::ColumnMajor> A(ptr_A, ldm);
2. Row-major matrix may be represented as a rank=2 tensor:
TensorRef<float, layout::RowMajor> B(ptr_A, ldm);
3. An interleaved matrix may be represented as a rank=2 tensor:
TensorRef<int8_t, layout::ColumnMajorInterleaved<32> > C;
4. A helper exists to define a TensorRef for a contiguous matrix whose layout
is not known at compile time.
int ldm; // leading dimension
layout::Matrix kind; // Could be layout::Matrix::kRowMajor or layout::Matrix::kColumnMajor
TensorRef<int, layout::ContiguousMatrix> E(ptr_E, {ldm, kind});
*/
template <
/// Data type of element stored within tensor (concept: NumericType)
typename Element_,
/// Defines a mapping from logical coordinate to linear memory (concept: Layout)
typename Layout_
>
class TensorRef {
public:
/// Data type of individual access
using Element = Element_;
/// Mapping function from logical coordinate to linear memory
using Layout = Layout_;
/// Reference type to an element
using Reference = typename platform::conditional<
sizeof_bits<Element>::value >= 8,
Element &,
SubbyteReference<Element>
>::type;
/// Logical rank of tensor index space
static int const kRank = Layout::kRank;
/// Index type
using Index = typename Layout::Index;
/// Long index used for pointer offsets
using LongIndex = typename Layout::LongIndex;
/// Coordinate in logical tensor space
using TensorCoord = typename Layout::TensorCoord;
/// Layout's stride vector
using Stride = typename Layout::Stride;
/// TensorRef to constant data
using ConstTensorRef = TensorRef<
typename platform::remove_const<Element>::type const,
Layout>;
/// TensorRef to non-constant data
using NonConstTensorRef = TensorRef<
typename platform::remove_const<Element>::type,
Layout>;
/// Require at least rank=1. Mathematically, a rank=0 tensor would be considered to be a
/// scalar, but degenerate cases such as these are difficult to accommodate without
/// extensive C++ metaprogramming or support for zero-length arrays.
static_assert(kRank > 0, "Cannot define a zero-rank TensorRef");
private:
/// Pointer
Element* ptr_;
/// Layout object maps logical coordinates to linear offsets
Layout layout_;
public:
//
// Methods
//
/// Constructs a TensorRef with a pointer and layout object.
CUTLASS_HOST_DEVICE
TensorRef(): ptr_(nullptr) {
}
/// Constructs a TensorRef with a pointer and layout object.
CUTLASS_HOST_DEVICE
TensorRef(
Element *ptr, ///< pointer to start of tensor
Layout const &layout ///< layout object containing stride and mapping function
):
ptr_(ptr), layout_(layout) {
}
/// Converting constructor from TensorRef to non-constant data.
template<typename _Magic = int>
CUTLASS_HOST_DEVICE
TensorRef(
NonConstTensorRef const &ref, ///< TensorRef to non-const data
///SFINAE trick to avoid creating a copy-constructor when Element_ is already non-const
_Magic magic = (typename platform::enable_if< ! platform::is_same<NonConstTensorRef, TensorRef<Element_, Layout_> >::value, _Magic>::type)0
):
ptr_(ref.data()), layout_(ref.layout()) { }
/// Returns a reference to constant-valued tensor.
CUTLASS_HOST_DEVICE
ConstTensorRef const_ref() const {
return ConstTensorRef(ptr_, layout_);
}
CUTLASS_HOST_DEVICE
NonConstTensorRef non_const_ref() const {
return NonConstTensorRef(const_cast<typename platform::remove_const<Element>::type *>(ptr_), layout_);
}
/// Updates only the pointer
CUTLASS_HOST_DEVICE
void reset(Element* ptr = nullptr) {
ptr_ = ptr;
}
/// Updates the pointer and layout object
CUTLASS_HOST_DEVICE
void reset(Element* ptr, Layout const &layout) {
ptr_ = ptr;
layout_ = layout;
}
/// Returns true if the TensorRef is non-null
CUTLASS_HOST_DEVICE
bool good() const {
return ptr_ != nullptr;
}
/// Returns the pointer to referenced data
CUTLASS_HOST_DEVICE
Element * data() const { return ptr_; }
/// Returns a reference to the element at a given linear index
CUTLASS_HOST_DEVICE
Reference data(LongIndex idx) const {
return ReferenceFactory<typename platform::remove_const<Element>::type,
(sizeof_bits<Element>::value < 8)>::get(ptr_, idx);
}
/// Returns the layout object
CUTLASS_HOST_DEVICE
Layout & layout() {
return layout_;
}
/// Returns the layout object
CUTLASS_HOST_DEVICE
Layout layout() const {
return layout_;
}
/// Returns the layout object's stride vector
CUTLASS_HOST_DEVICE
Stride stride() const {
return layout_.stride();
}
/// Returns the layout object's stride vector
CUTLASS_HOST_DEVICE
Stride & stride() {
return layout_.stride();
}
/// Returns the layout object's stride in a given physical dimension
CUTLASS_HOST_DEVICE
typename Layout::Stride::Index stride(int dim) const {
return layout_.stride().at(dim);
}
/// Returns the layout object's stride in a given physical dimension
CUTLASS_HOST_DEVICE
typename Layout::Stride::Index & stride(int dim) {
return layout_.stride().at(dim);
}
/// Computes the offset of an index from the origin of the tensor
CUTLASS_HOST_DEVICE
LongIndex offset(TensorCoord const& coord) const {
return layout_(coord);
}
/// Returns a reference to the element at a given Coord
CUTLASS_HOST_DEVICE
Reference at(TensorCoord const& coord) const {
return data(offset(coord));
}
/// Returns a reference to the element at a given Coord
CUTLASS_HOST_DEVICE
Reference operator[](TensorCoord const& coord) const {
return data(offset(coord));
}
/// Adds an offset to each pointer
CUTLASS_HOST_DEVICE
TensorRef & add_pointer_offset(LongIndex offset_) {
ptr_ += offset_;
return *this;
}
/// Adds an offset to each pointer
CUTLASS_HOST_DEVICE
TensorRef & add_coord_offset(TensorCoord const &coord) {
add_pointer_offset(offset(coord));
return *this;
}
/// Returns a TensorRef offset by a given amount
CUTLASS_HOST_DEVICE
TensorRef operator+(TensorCoord const& b) const {
TensorRef result(*this);
result.add_coord_offset(b);
return result;
}
/// Returns a TensorRef offset by a given amount
CUTLASS_HOST_DEVICE
TensorRef & operator+=(TensorCoord const& b) {
add_coord_offset(b);
return *this;
}
/// Returns a TensorRef offset by a given amount
CUTLASS_HOST_DEVICE
TensorRef operator-(TensorCoord const& b) const {
TensorRef result(*this);
result.add_pointer_offset(-offset(b));
return result;
}
/// Returns a TensorRef offset by a given amount
CUTLASS_HOST_DEVICE
TensorRef & operator-=(TensorCoord const& b) {
add_pointer_offset(-offset(b));
return *this;
}
};
/// Constructs a TensorRef, deducing types from arguments.
template <
typename Element,
typename Layout
>
CUTLASS_HOST_DEVICE
TensorRef<Element, Layout> make_TensorRef(Element *ptr, Layout const &layout) {
return TensorRef<Element, Layout>(ptr, layout);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// Partial specializations to handle degenerate and sub-byte cases.
//
///////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Element,
typename Layout
>
CUTLASS_HOST_DEVICE
bool TensorRef_aligned(TensorRef<Element, Layout> const &ref, int alignment) {
int const kStrideRank = Layout::kStrideRank;
if (reinterpret_cast<uintptr_t>(ref.data()) % alignment) {
return false;
}
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kStrideRank; ++i) {
if (ref.stride(i) % alignment) {
return false;
}
}
return true;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
| include/cutlass/tensor_ref.h/0 | {
"file_path": "include/cutlass/tensor_ref.h",
"repo_id": "include",
"token_count": 3815
} | 45 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates calculating the address and predicates to the load of tiles
from pitch-linear rank=2 tensors.
This iterator uses masks to guard out-of-bounds accesses. The first tile this
iterator visits maybe partial, then the remaining tiles are complete. So, we
only need to compute the predicates twice, once before the first tile and
once for the remaining full tiles which can share the same predicates.
A precomputed "Params" object minimizes the amount of state that must be
stored in registers, and integer addition is used to advance the pointer
through memory.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/permute.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/transform/threadblock/predicated_tile_access_iterator_params.h"
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// PredicatedTileAccessIteratorPredicates
///
template <typename Shape_, typename Element_, typename Layout_, int AdvanceRank,
typename ThreadMap_, typename AccessType_>
class PredicatedTileAccessIteratorPredicates {
public:
using Shape = Shape_;
using Element = Element_;
using Layout = Layout_;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorCoord = typename Layout::TensorCoord;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
static int const kPredicatesPerByte = 4;
static int const kPredicatesPerWord = 4 * kPredicatesPerByte;
static int const kPredicateCount = ThreadMap::Iterations::kCount * kAccessesPerVector;
/// Number of 32b words containing predicates
static int const kPredicateByteCount =
(kPredicateCount + kPredicatesPerByte - 1) / kPredicatesPerByte;
static int const kPredicateWordCount = (kPredicateByteCount + 3) / 4;
static unsigned const kPredicateMask = (1u << kPredicatesPerByte) - 1u;
static_assert(kPredicateWordCount <= 4, "Too many predicates.");
/// Predicate vector stores mask to guard accesses
using Mask = Array<uint32_t, kPredicateWordCount>;
// private:
/// Guard predicates
uint32_t predicates_[kPredicateWordCount];
/// Size of tensor
TensorCoord extent_;
/// Initial offset for each thread
TensorCoord thread_offset_;
/// Offset to the first steady-state tile
TensorCoord residue_offset_;
/// Iteration along vectors implied by the thread map
int iteration_vector_;
/// Iteration in the contiguous dimension
int iteration_contiguous_;
/// Iteration in the strided dimension
int iteration_strided_;
public:
/// Computes predicates based on internally tracked per-thread offset.
CUTLASS_DEVICE
void compute_predicates_(
/// Extent of the matrix window
TensorCoord extent,
/// optionally, simplify predicate calculation during 'steady state' phase
bool is_steady_state = false) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
predicates_[i] = 0u;
}
CUTLASS_PRAGMA_UNROLL
for (int access_idx = 0; access_idx < ThreadMap::Iterations::kCount * kAccessesPerVector; ++access_idx) {
int s = access_idx / (ThreadMap::Iterations::kContiguous * kAccessesPerVector);
int access_residual = access_idx % (ThreadMap::Iterations::kContiguous * kAccessesPerVector);
int c = access_residual / kAccessesPerVector;
int v = access_residual % kAccessesPerVector;
TensorCoord iteration_coord(c * ThreadMap::Delta::kContiguous + v * AccessType::kElements,
s * ThreadMap::Delta::kStrided);
TensorCoord coord = thread_offset_ + iteration_coord;
bool guard;
if (is_steady_state) {
if (kAdvanceRank == 0) {
guard = (coord.strided() < extent.strided());
} else {
guard = (coord.contiguous() < extent.contiguous());
}
} else {
guard = (coord.strided() < extent.strided() &&
coord.contiguous() < extent.contiguous());
}
int pred_idx = v + kAccessesPerVector * (c + ThreadMap::Iterations::kContiguous * s);
int word_idx = pred_idx / kPredicatesPerWord;
int residual = pred_idx % kPredicatesPerWord;
int byte_idx = residual / kPredicatesPerByte;
int bit_idx = residual % kPredicatesPerByte;
predicates_[word_idx] |= (unsigned(guard) << (byte_idx * 8 + bit_idx));
}
}
CUTLASS_HOST_DEVICE
void set_predicates(int thread_id, TensorCoord const &threadblock_offset) {
TensorCoord residue_extent;
if (kAdvanceRank) {
typename TensorCoord::Index residue_size = (extent_[kAdvanceRank] - threadblock_offset.strided()) % Shape::kStrided;
if (!residue_size) {
residue_size = Shape::kStrided;
}
residue_offset_ = make_Coord(0, residue_size);
residue_extent = make_Coord(
extent_.contiguous(),
min(threadblock_offset.strided() + residue_size, extent_.strided())
);
} else {
typename TensorCoord::Index residue_size = (extent_[kAdvanceRank] - threadblock_offset.contiguous()) % Shape::kContiguous;
if (!residue_size) {
residue_size = Shape::kContiguous;
}
residue_offset_ = make_Coord(residue_size, 0);
residue_extent = make_Coord(
min(extent_.contiguous(), threadblock_offset.contiguous() + residue_size),
extent_.strided()
);
}
// Per-thread offset in logical coordinates of tensor
thread_offset_ = threadblock_offset + ThreadMap::initial_offset(thread_id);
compute_predicates_(residue_extent, false);
set_iteration_index(0);
}
/// Default constructor
PredicatedTileAccessIteratorPredicates() = default;
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorPredicates(
/// Extent of tensor
TensorCoord extent)
: extent_(extent) {
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorPredicates &operator++() {
return *this;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
predicates_[i] = enable ? 0u : predicates_[i];
}
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
predicates_[i] = 0xffffffff;
}
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
predicates_[i] = mask[i];
}
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
mask[i] = predicates_[i];
}
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() const {
int pred_idx =
iteration_vector_ + kAccessesPerVector * (iteration_contiguous_ + iteration_strided_ * ThreadMap::Iterations::kContiguous);
int word_idx = pred_idx / kPredicatesPerWord;
int residual = pred_idx % kPredicatesPerWord;
int byte_idx = residual / kPredicatesPerByte;
int bit_idx = residual % kPredicatesPerByte;
bool pred = (predicates_[word_idx] & (1u << (byte_idx * 8 + bit_idx))) != 0;
return pred;
}
};
////////////////////////////////////////////////////////////////////////////////
/// PredicatedTileAccessIterator
///
template <typename Shape, typename Element, typename Layout, int AdvanceRank,
typename ThreadMap, typename AccessType, bool Gather = false,
typename PermuteLayout = layout::NoPermute>
class PredicatedTileAccessIterator;
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator for pitch-linear data.
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, typename AccessType_, bool Gather,
typename PermuteLayout>
class PredicatedTileAccessIterator<Shape_, Element_, layout::PitchLinear,
AdvanceRank, ThreadMap_, AccessType_, Gather,
PermuteLayout> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::PitchLinear;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingPredicates = PredicatedTileAccessIteratorPredicates<
Shape, Element, Layout, AdvanceRank, ThreadMap, AccessType>;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
static bool constexpr Permute = !platform::is_same<PermuteLayout, layout::NoPermute>::value
&& !platform::is_same<PermuteLayout, layout::InversePermute<layout::NoPermute>>::value;
using Mask = typename UnderlyingPredicates::Mask;
/// Uses a non-template class
struct Params : PredicatedTileAccessIteratorParams {
using Base = PredicatedTileAccessIteratorParams;
/// Default constructor
Params() = default;
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout) :
Base(layout.stride(0),
MakePredicatedTileAccessIteratorDesc<Shape, Element, Layout, kAdvanceRank, ThreadMap>()()
) { }
CUTLASS_HOST_DEVICE
Params(Base const &base) :
Base(base) { }
};
private:
/// Internal pointer type permits fast address arithmetic
using BytePointer = char *;
private:
//
// Data members
//
UnderlyingPredicates the_predicates;
/// Parameters object with precomputed internal state
Params params_;
/// Internal pointer to first access of tile
BytePointer pointer_;
/// Used for out-of-order visitation
bool is_residue_tile_;
/// Below is used when Gather is turned on. We need to record strided_offset
/// and contiguous_offset separated to compute the offset by using
///
/// offset = contiguous_offset + indices[strided_offset]
/// Gather indices
int const *indices_;
/// Function to perform layout permutation and offset computation
PermuteLayout permute_layout_;
/// Tracks thread's coordinate offset in the matrix for current tile.
/// This is only used in the following cases:
/// - when Gather is true, strided coordinate needed to access indices (contiguous offset is tracked via pointer_)
/// - when Permute is true, both coordinates are neeeded as input into permutation function (pointer_ is fixed)
TensorCoord coord_offset_;
private:
/// Computes predicates based on internally tracked per-thread offset.
CUTLASS_DEVICE
void compute_predicates_(
/// Extent of the matrix window
TensorCoord extent,
/// optionally, simplify predicate calculation during 'steady state' phase
bool is_steady_state = false) {
the_predicates.compute_predicates_(extent, is_steady_state);
}
public:
/// Default constructor
PredicatedTileAccessIterator() = default;
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset,
/// Gather indices
int const *indices = nullptr)
: params_(params),
pointer_(reinterpret_cast<BytePointer>(
const_cast<NonConstPointer>(pointer))),
the_predicates(extent),
is_residue_tile_(true),
indices_(indices),
permute_layout_(TensorCoord(extent.contiguous(), extent.strided()), params.stride_) {
the_predicates.set_predicates(thread_id, threadblock_offset);
if (Gather) {
assert(indices_);
}
// update internal pointers
Layout layout(params_.stride_);
if (!Gather && !Permute) {
add_pointer_offset(layout(the_predicates.thread_offset_));
} else {
coord_offset_ = the_predicates.thread_offset_;
if (!Permute) {
add_pointer_offset(layout(make_Coord(coord_offset_.contiguous(), 0)));
}
}
}
/// Construct a PredicatedTileAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id)
: PredicatedTileAccessIterator(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
the_predicates.set_iteration_index(index);
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += sizeof_bits<Element>::value * pointer_offset / 8;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_DEVICE
void add_tile_offset(
TensorCoord const &tile_offset) {
if (is_residue_tile_) {
the_predicates.thread_offset_ += the_predicates.residue_offset_;
the_predicates.compute_predicates_(the_predicates.extent_, true);
Layout layout(params_.stride_);
if (!Gather && !Permute) {
add_pointer_offset(layout(the_predicates.residue_offset_));
if (kAdvanceRank) {
pointer_ += params_.inc_advance_ * LongIndex(tile_offset.strided() - 1);
pointer_ += Shape::kContiguous * tile_offset.contiguous() * sizeof_bits<Element>::value / 8;
} else {
pointer_ += params_.inc_advance_ * LongIndex(tile_offset.contiguous() - 1);
pointer_ += Shape::kStrided * tile_offset.strided() * sizeof_bits<Element>::value / 8;
}
} else {
coord_offset_.strided() = the_predicates.thread_offset_.strided() + Shape::kStrided * (tile_offset.strided() - kAdvanceRank);
if (!Permute) {
add_pointer_offset(layout(make_Coord(the_predicates.residue_offset_.contiguous(), 0)));
add_pointer_offset(Shape::kContiguous * (tile_offset.contiguous() - (1 - kAdvanceRank)));
} else {
coord_offset_.contiguous() = the_predicates.thread_offset_.contiguous() + Shape::kContiguous * (tile_offset.contiguous() - (1 - kAdvanceRank));
}
}
} else {
if (!Gather && !Permute) {
if (kAdvanceRank) {
pointer_ += params_.inc_advance_ * LongIndex(tile_offset.strided());
pointer_ += Shape::kContiguous * tile_offset.contiguous();
} else {
pointer_ += params_.inc_advance_ * LongIndex(tile_offset.contiguous());
pointer_ += Shape::kStrided * tile_offset.strided();
}
} else {
coord_offset_.strided() += Shape::kStrided * tile_offset.strided();
if (!Permute) {
add_pointer_offset(Shape::kContiguous * tile_offset.contiguous());
} else {
coord_offset_.contiguous() += Shape::kContiguous * tile_offset.contiguous();
}
}
}
is_residue_tile_ = false;
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
if (Gather || Permute)
{
if (!valid()) {
return nullptr;
}
Index coord_contig = (Permute ? coord_offset_.contiguous() : 0) + the_predicates.iteration_contiguous_ * ThreadMap::Delta::kContiguous + the_predicates.iteration_vector_ * AccessType::kElements;
Index coord_strided = coord_offset_.strided() + the_predicates.iteration_strided_ * ThreadMap::Delta::kStrided;
if (Gather) {
coord_strided = indices_[coord_strided];
}
LongIndex offset = Permute ? permute_layout_(TensorCoord(coord_contig, coord_strided)) : (coord_strided * LongIndex(params_.stride_) + coord_contig);
return reinterpret_cast<AccessType *>(pointer_ + OffsetBytes<Element>(offset));
}
return reinterpret_cast<AccessType *>(
pointer_ +
the_predicates.iteration_contiguous_ * (ThreadMap::Delta::kContiguous * sizeof_bits<Element>::value) / 8) + the_predicates.iteration_vector_;
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator &operator++() {
the_predicates.operator++();
++the_predicates.iteration_vector_;
if (the_predicates.iteration_vector_ < kAccessesPerVector) {
return *this;
}
the_predicates.iteration_vector_ = 0;
++the_predicates.iteration_contiguous_;
if (the_predicates.iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
// Enter here only if (iteration_contiguous_ == ThreadMap::Iteration::kContiguous)
the_predicates.iteration_contiguous_ = 0;
++the_predicates.iteration_strided_;
if (the_predicates.iteration_strided_ < ThreadMap::Iterations::kStrided) {
if (!Gather && !Permute) {
pointer_ += params_.inc_strided_;
}
return *this;
}
// Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
// which means we enter the next tile.
the_predicates.iteration_strided_ = 0;
if (!Gather && !Permute) {
// advance to next tile
pointer_ += params_.inc_next_;
// now return to start tile - if the iterator is subsequently advanced, this
// subtraction as well as the subsequent integer addition are both elided by
// the compiler.
pointer_ -= params_.inc_advance_;
}
return *this;
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator operator++(int) {
PredicatedTileAccessIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
the_predicates.clear_mask(enable);
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
the_predicates.enable_mask();
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) {
the_predicates.set_mask(mask);
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) {
the_predicates.get_mask(mask);
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() const {
return the_predicates.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator for column-major data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, typename AccessType_, bool Gather,
typename PermuteLayout>
class PredicatedTileAccessIterator<Shape_, Element_, layout::ColumnMajor,
AdvanceRank, ThreadMap_, AccessType_, Gather,
PermuteLayout> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedTileAccessIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap, AccessType,
Gather, PermuteLayout>;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileAccessIterator;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default constructor
Params() = default;
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout)
: params_(layout::PitchLinear(layout.stride(0))){};
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(typename UnderlyingIterator::Params::Base const &base)
: params_(base) {}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Default constructor
PredicatedTileAccessIterator() = default;
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator(
///< Precomputed parameters object
Params const ¶ms,
///< Pointer to start of tensor
Pointer pointer,
///< Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset,
int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization
)
: iterator_(params.params_, pointer,
layout::PitchLinearCoord(extent.row(), extent.column()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.row(),
threadblock_offset.column()),
indices) {}
/// Construct a PredicatedTileAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileAccessIterator(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator operator++(int) {
PredicatedTileAccessIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { iterator_.get_mask(mask); }
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator for row-major data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, typename AccessType_, bool Gather,
typename PermuteLayout>
class PredicatedTileAccessIterator<Shape_, Element_, layout::RowMajor,
AdvanceRank, ThreadMap_, AccessType_, Gather,
PermuteLayout> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedTileAccessIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap, AccessType,
Gather, PermuteLayout>;
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileAccessIterator;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default constructor
Params() = default;
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout)
: params_(layout::PitchLinear(layout.stride(0))){};
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(typename UnderlyingIterator::Params::Base const &base)
: params_(base) {}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Default constructor
PredicatedTileAccessIterator() = default;
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator(
///< Precomputed parameters object
Params const ¶ms,
///< Pointer to start of tensor
Pointer pointer,
///< Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset,
/// Gather indices
int const *indices = nullptr)
: iterator_(params.params_, pointer,
layout::PitchLinearCoord(extent.column(), extent.row()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.column(),
threadblock_offset.row()),
indices) {}
/// Construct a PredicatedTileAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileAccessIterator(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator operator++(int) {
PredicatedTileAccessIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { iterator_.get_mask(mask); }
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator for affine rank 2 data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, typename AccessType_>
class PredicatedTileAccessIterator<Shape_, Element_, layout::AffineRankN<2>,
AdvanceRank, ThreadMap_, AccessType_, false,
layout::NoPermute> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::AffineRankN<2>;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingPredicates = PredicatedTileAccessIteratorPredicates<
Shape, Element, layout::PitchLinear, AdvanceRank, ThreadMap, AccessType>;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingPredicates::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
public:
friend PredicatedTileAccessIterator;
private:
/// stride of pitch-linear layout (units of Element)
Coord<Layout::kStrideRank, Layout::LongIndex> stride_;
/// amount (in byte) to increment pointer to move to next access along
/// contiguous dimension
LongIndex inc_contiguous_;
/// amount (in byte) to increment pointer from first access of current
/// contiguous dimension to first access of next one.
LongIndex inc_strided_;
/// amount (in byte) to increment pointer from last access of current
/// contiguous dimension to first access of next one.
LongIndex inc_next_strided_;
/// amount (in byte) to increment pointer from last access to first access
/// of next tile
LongIndex inc_next_;
/// amount (in byte) to increment pointer from first access of current tile
/// to first access of next tile
LongIndex inc_advance_;
public:
// Default ctor
CUTLASS_HOST_DEVICE
Params(): stride_(0), inc_contiguous_(0), inc_strided_(0), inc_next_(0), inc_advance_(0) { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout) : stride_({layout.stride(0), layout.stride(1)}) {
inc_contiguous_ = (LongIndex(stride_[0]) * ThreadMap::Delta::kContiguous) *
sizeof_bits<Element>::value / 8;
inc_strided_ = (LongIndex(stride_[1]) * ThreadMap::Delta::kStrided) *
sizeof_bits<Element>::value / 8;
inc_next_strided_ = inc_strided_ - LongIndex(ThreadMap::Iterations::kContiguous - 1) * inc_contiguous_;
if (kAdvanceRank) {
// advance along strided dimension
inc_advance_ =
Shape::kStrided * LongIndex(stride_[1]) * sizeof_bits<Element>::value / 8;
} else {
// advance along contiguous dimension
inc_advance_ = Shape::kContiguous * stride_[0] * sizeof_bits<Element>::value / 8;
}
inc_next_ = inc_advance_ - LongIndex(ThreadMap::Iterations::kContiguous - 1) * inc_contiguous_ - LongIndex(ThreadMap::Iterations::kStrided - 1) * inc_strided_;
};
};
private:
/// Internal pointer type permits fast address arithmetic
using BytePointer = char *;
//
// Data members
//
/// Parameters object with precomputed internal state
Params params_;
/// Internal pointer to first access of tile
BytePointer pointer_;
UnderlyingPredicates the_predicates;
/// Used for out-of-order visitation
bool is_residue_tile_;
private:
/// Computes predicates based on internally tracked per-thread offset.
CUTLASS_DEVICE
void compute_predicates_(
/// Extent of the matrix window
TensorCoord extent,
/// optionally, simplify predicate calculation during 'steady state' phase
bool is_steady_state = false) {
the_predicates.compute_predicates_(extent, is_steady_state);
}
public:
/// Default constructor
PredicatedTileAccessIterator() = default;
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator(
///< Precomputed parameters object
Params const ¶ms,
///< Pointer to start of tensor
Pointer pointer,
///< Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset,
int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization
)
: params_(params),
pointer_(reinterpret_cast<BytePointer>(
const_cast<NonConstPointer>(pointer))),
the_predicates(extent),
is_residue_tile_(true) {
the_predicates.set_predicates(thread_id, threadblock_offset);
// update internal pointers
Layout layout(params_.stride_);
add_pointer_offset(layout(the_predicates.thread_offset_));
}
/// Construct a PredicatedTileAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileAccessIterator(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { the_predicates.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += sizeof_bits<Element>::value * pointer_offset / 8;
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
if (is_residue_tile_) {
the_predicates.thread_offset_ += the_predicates.residue_offset_;
Layout layout(params_.stride_);
add_pointer_offset(layout(the_predicates.residue_offset_));
the_predicates.compute_predicates_(the_predicates.extent_, true);
if (kAdvanceRank) {
pointer_ += params_.inc_advance_ * LongIndex(tile_offset[1] - 1);
pointer_ += Shape::kContiguous * tile_offset[0];
} else {
pointer_ += params_.inc_advance_ * LongIndex(tile_offset[0] - 1);
pointer_ += Shape::kStrided * tile_offset[1];
}
} else {
if (kAdvanceRank) {
pointer_ += params_.inc_advance_ * LongIndex(tile_offset[1]);
pointer_ += Shape::kContiguous * tile_offset[0];
} else {
pointer_ += params_.inc_advance_ * LongIndex(tile_offset[0]);
pointer_ += Shape::kStrided * tile_offset[1];
}
}
is_residue_tile_ = false;
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(pointer_) + the_predicates.iteration_vector_;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator &operator++() {
the_predicates.operator++();
++the_predicates.iteration_vector_;
if (the_predicates.iteration_vector_ < kAccessesPerVector) {
return *this;
}
the_predicates.iteration_vector_ = 0;
++the_predicates.iteration_contiguous_;
if (the_predicates.iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
pointer_ += params_.inc_contiguous_;
return *this;
}
// Enter here only if (iteration_contiguous_ ==
// ThreadMap::Iteration::kContiguous)
the_predicates.iteration_contiguous_ = 0;
++the_predicates.iteration_strided_;
if (the_predicates.iteration_strided_ < ThreadMap::Iterations::kStrided) {
pointer_ += params_.inc_next_strided_;
return *this;
}
// Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
// which means we enter the next tile.
the_predicates.iteration_strided_ = 0;
// advance to next tile
pointer_ += params_.inc_next_;
// now return to start tile - if the iterator is subsequently advanced, this
// subtraction as well as the subsequent integer addition are both elided by
// the compiler.
pointer_ -= params_.inc_advance_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator operator++(int) {
PredicatedTileAccessIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { the_predicates.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { the_predicates.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { the_predicates.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { the_predicates.get_mask(mask); }
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return the_predicates.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator for affine rank 2 column-major data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, typename AccessType_>
class PredicatedTileAccessIterator<Shape_, Element_, layout::AffineRank2ColumnMajor,
AdvanceRank, ThreadMap_, AccessType_, false,
layout::NoPermute> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::AffineRank2ColumnMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
// Map to the underlying AffineRankN<2> layout
using UnderlyingIterator = PredicatedTileAccessIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::AffineRankN<2>, (kAdvanceRank == 0 ? 0 : 1), ThreadMap, AccessType>;
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileAccessIterator;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default constructor
Params() = default;
/// Construct the Params object given an AffineRankN<2> tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout)
: params_(layout::AffineRankN<2>(layout.stride(0), layout.stride(1))){};
};
private:
//
// Data members
//
/// Underlying AffineRankN<2> tile iterator
UnderlyingIterator iterator_;
public:
/// Default constructor
PredicatedTileAccessIterator() = default;
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator(
///< Precomputed parameters object
Params const ¶ms,
///< Pointer to start of tensor
Pointer pointer,
///< Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset,
int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization
)
: iterator_(params.params_, pointer,
layout::PitchLinearCoord(extent.row(), extent.column()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.row(),
threadblock_offset.column())) {}
/// Construct a PredicatedTileAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileAccessIterator(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset(make_Coord(tile_offset.row(), tile_offset.column()));
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator operator++(int) {
PredicatedTileAccessIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { iterator_.get_mask(mask); }
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator for affine rank-2 row-major data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, typename AccessType_>
class PredicatedTileAccessIterator<Shape_, Element_, layout::AffineRank2RowMajor,
AdvanceRank, ThreadMap_, AccessType_, false,
layout::NoPermute> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::AffineRank2RowMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
// Map to the underlying AffineRankN<2> layout
using UnderlyingIterator = PredicatedTileAccessIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::AffineRankN<2>, (kAdvanceRank == 0 ? 1 : 0), ThreadMap, AccessType>;
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileAccessIterator;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default constructor
Params() = default;
/// Construct the Params object given an AffineRankN<2> tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout)
: params_(layout::AffineRankN<2>(layout.stride(1), layout.stride(0))){};
};
private:
//
// Data members
//
/// Underlying AffineRankN<2> tile iterator
UnderlyingIterator iterator_;
public:
/// Default constructor
PredicatedTileAccessIterator() = default;
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator(
///< Precomputed parameters object
Params const ¶ms,
///< Pointer to start of tensor
Pointer pointer,
///< Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset,
int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization
)
: iterator_(params.params_, pointer,
layout::PitchLinearCoord(extent.column(), extent.row()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.column(),
threadblock_offset.row())) {}
/// Construct a PredicatedTileAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileAccessIterator(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset(make_Coord(tile_offset.column(), tile_offset.row()));
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator operator++(int) {
PredicatedTileAccessIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { iterator_.get_mask(mask); }
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator for column-major interleaved data.
/// It is mapped to the congruous layout.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, typename AccessType_, int InterleavedK>
class PredicatedTileAccessIterator<Shape_, Element_,
layout::ColumnMajorInterleaved<InterleavedK>,
AdvanceRank, ThreadMap_, AccessType_, false,
layout::NoPermute> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
static int const kInterleavedK = InterleavedK;
using Layout = layout::ColumnMajorInterleaved<kInterleavedK>;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedTileAccessIterator<
layout::PitchLinearShape<Shape::kRow * kInterleavedK,
Shape::kColumn / kInterleavedK>,
Element, layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap,
AccessType>;
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileAccessIterator;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default constructor
Params() = default;
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout)
: params_(layout::PitchLinear(layout.stride(0))) {}
CUTLASS_HOST_DEVICE
Params(typename UnderlyingIterator::Params::Base const &base)
: params_(base) {}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Default constructor
PredicatedTileAccessIterator() = default;
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset,
int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization
)
: iterator_(params.params_, pointer,
layout::PitchLinearCoord(extent.row() * kInterleavedK,
extent.column() / kInterleavedK),
thread_id,
layout::PitchLinearCoord(
threadblock_offset.row() * kInterleavedK,
threadblock_offset.column() / kInterleavedK)) {}
/// Construct a PredicatedTileAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileAccessIterator(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator operator++(int) {
PredicatedTileAccessIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { iterator_.get_mask(mask); }
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() { return iterator_.valid(); }
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator for row-major interleaved data.
// It is mapped to the congruous layout.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, typename AccessType_, int InterleavedK>
class PredicatedTileAccessIterator<Shape_, Element_,
layout::RowMajorInterleaved<InterleavedK>,
AdvanceRank, ThreadMap_, AccessType_, false,
layout::NoPermute> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
static int const kInterleavedK = InterleavedK;
using Layout = layout::RowMajorInterleaved<kInterleavedK>;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedTileAccessIterator<
layout::PitchLinearShape<Shape::kColumn * kInterleavedK,
Shape::kRow / kInterleavedK>,
Element, layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap,
AccessType>;
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileAccessIterator;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default constructor
Params() = default;
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout)
: params_(layout::PitchLinear(layout.stride(0))) {}
CUTLASS_HOST_DEVICE
Params(typename UnderlyingIterator::Params::Base const &base)
: params_(base) {}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Default constructor
PredicatedTileAccessIterator() = default;
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset,
int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization
)
: iterator_(params.params_, pointer,
layout::PitchLinearCoord(extent.column() * kInterleavedK,
extent.row() / kInterleavedK),
thread_id,
layout::PitchLinearCoord(
threadblock_offset.column() * kInterleavedK,
threadblock_offset.row() / kInterleavedK)) {}
/// Construct a PredicatedTileAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileAccessIterator(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator operator++(int) {
PredicatedTileAccessIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { iterator_.get_mask(mask); }
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() { return iterator_.valid(); }
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/transform/threadblock/predicated_tile_access_iterator.h/0 | {
"file_path": "include/cutlass/transform/threadblock/predicated_tile_access_iterator.h",
"repo_id": "include",
"token_count": 25112
} | 46 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of tiles from pitch-linear rank=2 tensors.
This iterator uses masks to guard out-of-bounds accesses and visits the last "residue" tile
first, with the objective of minimizing predicate mask updates during steady-state operation.
A precomputed "Params" object minimizes the amount of state that must be stored in registers,
and integer addition is used to advance the pointer through memory.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "regular_tile_iterator.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape,
typename Element,
typename Layout,
int AdvanceRank,
typename ThreadMap,
int Alignment = sizeof_bits<Element>::value * ThreadMap::kElementsPerAccess / 8
>
class RegularTileIterator2dThreadTile;
/// Regular tile iterator specialized for pitch-linear + 2d thread-tiled threadmapping
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
int Alignment
>
class RegularTileIterator2dThreadTile<Shape_, Element_, layout::PitchLinear, AdvanceRank, ThreadMap_, Alignment> {
public:
using Shape = Shape_;
using Element = Element_;
using Layout = layout::PitchLinear;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using StrideIndex = typename Layout::Stride::Index;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Fragment = Array<Element, ThreadMap::Iterations::kCount * ThreadMap::ThreadAccessShape::kCount>;
static_assert(kAdvanceRank == 0 || kAdvanceRank == 1,
"Advance rank may only be along the contiguous or strided dimensions.");
private:
//
// Types
//
using AccessType = AlignedArray<Element, ThreadMap::ThreadAccessShape::kCount, kAlignment>;
//
// Data members
//
/// Pointer to memory
uint8_t *pointer_;
/// Stride quantity
StrideIndex stride_;
/// Amount to increment pointer along strided dimension
LongIndex increment_strided_;
/// Amount to advance pointer between tiles
LongIndex increment_advance_;
public:
CUTLASS_DEVICE
RegularTileIterator2dThreadTile(): pointer_(nullptr), increment_strided_(0), increment_advance_(0) { }
CUTLASS_DEVICE
RegularTileIterator2dThreadTile(
TensorRef const &ref,
int thread_idx,
int interleave
){
TensorCoord t = ThreadMap::initial_offset(thread_idx);
long int offset = t[0] * interleave + t[1] * ref.stride()[0]/interleave;
pointer_ = reinterpret_cast<uint8_t *>(ref.data() + offset);
stride_ = ref.stride()[0] / interleave;
increment_strided_ = (ref.stride()[0] * sizeof_bits<Element>::value / 8) * ThreadMap::Delta::kStrided / interleave;
increment_advance_ =
(kAdvanceRank == 0 ?
Shape::kContiguous * sizeof_bits<Element>::value / 8 :
Shape::kStrided * (ref.stride()[0] * sizeof_bits<Element>::value / 8) / interleave);
}
/// Loads a fragment
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
uint8_t const *byte_pointer = pointer_ + pointer_offset * sizeof_bits<Element>::value / 8;
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
AccessType const *access_ptr = reinterpret_cast<AccessType const *>(byte_pointer);
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int idx = c + s * ThreadMap::Iterations::kContiguous;
frag_ptr[idx] = access_ptr[c * ThreadMap::Delta::kContiguous / ThreadMap::ThreadAccessShape::kStrided];
}
if (s + 1 < ThreadMap::Iterations::kStrided) {
byte_pointer += increment_strided_;
}
}
}
/// Loads a fragment
CUTLASS_HOST_DEVICE
void load(Fragment &frag, TensorCoord const & tile_offset) {
load_with_pointer_offset(
frag,
tile_offset.contiguous() * Shape::kContiguous / ThreadMap::kElementsPerAccess +
tile_offset.strided() * Shape::kStrided * stride_
);
}
/// Loads a fragment
CUTLASS_HOST_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
/// Stores a fragment
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType const *frag_ptr = reinterpret_cast<AccessType const*>(&frag);
uint8_t *byte_pointer = pointer_ + pointer_offset * sizeof_bits<Element>::value / 8;
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
AccessType *access_ptr = reinterpret_cast<AccessType *>(byte_pointer);
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int idx = c + s * ThreadMap::Iterations::kContiguous;
access_ptr[c * ThreadMap::Delta::kContiguous / ThreadMap::ThreadAccessShape::kStrided] = frag_ptr[idx];
}
if (s + 1 < ThreadMap::Iterations::kStrided) {
byte_pointer += increment_strided_;
}
}
}
/// Stores a fragment
CUTLASS_HOST_DEVICE
void store(Fragment const &frag, TensorCoord const & tile_offset) {
store_with_pointer_offset(
frag,
tile_offset.contiguous() * Shape::kContiguous + tile_offset.strided() * Shape::kStrided * stride_
);
}
/// Stores a fragment
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
/// Advances the pointer
CUTLASS_HOST_DEVICE
RegularTileIterator2dThreadTile &operator++() {
pointer_ += increment_advance_;
return *this;
}
/// Advances the pointer
CUTLASS_HOST_DEVICE
RegularTileIterator2dThreadTile &operator--() {
pointer_ -= increment_advance_;
return *this;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset;
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
int offset = sizeof_bits<Element>::value *
(coord.contiguous() * Shape::kContiguous + coord.strided() * Shape::kStrided * stride_) / 8;
add_pointer_offset(offset);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Regular tile iterator specialized for interleaved layout + 2d thread-tiled threadmapping
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
int Alignment
>
class RegularTileIterator2dThreadTile<Shape_, Element_, layout::RowMajorInterleaved<4>, AdvanceRank, ThreadMap_, Alignment> {
public:
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajorInterleaved<4>;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Fragment = Array<Element, ThreadMap::Iterations::kCount * ThreadMap::ThreadAccessShape::kCount>;
using Underlying = RegularTileIterator2dThreadTile<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>,
Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 1 : 0),
ThreadMap,
kAlignment
>;
static_assert(kAdvanceRank == 0 || kAdvanceRank == 1,
"Advance rank may only be along the row or column dimensions.");
private:
Underlying iterator_;
public:
CUTLASS_DEVICE
RegularTileIterator2dThreadTile() { }
CUTLASS_DEVICE
RegularTileIterator2dThreadTile(
TensorRef const &ref,
int thread_idx
):
iterator_({ref.data(), ref.stride()}, thread_idx, 4) {
}
/// Loads a fragment
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment
CUTLASS_HOST_DEVICE
void load(Fragment &frag, TensorCoord const & tile_offset) {
iterator_.load_with_pointer_offset(frag, {tile_offset.column(), tile_offset.row()});
}
/// Loads a fragment
CUTLASS_HOST_DEVICE
void load(Fragment &frag) {
iterator_.load_with_pointer_offset(frag, 0);
}
/// Stores a fragment
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Stores a fragment
CUTLASS_HOST_DEVICE
void store(Fragment const &frag, TensorCoord const & tile_offset) {
iterator_.store_with_pointer_offset(frag, {tile_offset.column(), tile_offset.row()});
}
/// Stores a fragment
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
iterator_.store_with_pointer_offset(frag, 0);
}
/// Advances the pointer
CUTLASS_HOST_DEVICE
RegularTileIterator2dThreadTile &operator++() {
++iterator_;
return *this;
}
/// Advances the pointer
CUTLASS_HOST_DEVICE
RegularTileIterator2dThreadTile &operator--() {
--iterator_;
return *this;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.column(), coord.row()});
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Regular tile iterator specialized for interleaved layout + 2d thread-tiled threadmapping
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
int Alignment
>
class RegularTileIterator2dThreadTile<Shape_, Element_, layout::ColumnMajorInterleaved<4>, AdvanceRank, ThreadMap_, Alignment> {
public:
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajorInterleaved<4>;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Fragment = Array<Element, ThreadMap::Iterations::kCount * ThreadMap::ThreadAccessShape::kCount>;
using PitchLinearThreadMap = PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kRow, Shape::kColumn>,
ThreadMap::kThreads, ThreadMap::ThreadAccessShape::kCount >;
using Underlying = RegularTileIterator2dThreadTile<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>,
Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 0 : 1),
ThreadMap
>;
static_assert(kAdvanceRank == 0 || kAdvanceRank == 1,
"Advance rank may only be along the row or column dimensions.");
private:
Underlying iterator_;
public:
CUTLASS_DEVICE
RegularTileIterator2dThreadTile() { }
CUTLASS_DEVICE
RegularTileIterator2dThreadTile(
TensorRef const &ref,
int thread_idx
):
iterator_({ref.data(), ref.stride()}, thread_idx, 4) {
}
/// Loads a fragment
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment
CUTLASS_HOST_DEVICE
void load(Fragment &frag, TensorCoord const & tile_offset) {
iterator_.load_with_pointer_offset(frag, {tile_offset.row(), tile_offset.column()});
}
/// Loads a fragment
CUTLASS_HOST_DEVICE
void load(Fragment &frag) {
iterator_.load_with_pointer_offset(frag, 0);
}
/// Stores a fragment
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Stores a fragment
CUTLASS_HOST_DEVICE
void store(Fragment const &frag, TensorCoord const & tile_offset) {
iterator_.store_with_pointer_offset(frag, {tile_offset.row(), tile_offset.column()});
}
/// Stores a fragment
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
iterator_.store_with_pointer_offset(frag, 0);
}
/// Advances the pointer
CUTLASS_HOST_DEVICE
RegularTileIterator2dThreadTile &operator++() {
++iterator_;
return *this;
}
/// Advances the pointer
CUTLASS_HOST_DEVICE
RegularTileIterator2dThreadTile &operator--() {
--iterator_;
return *this;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.row(), coord.column()});
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
| include/cutlass/transform/threadblock/regular_tile_iterator_pitch_linear_2dthreadtile.h/0 | {
"file_path": "include/cutlass/transform/threadblock/regular_tile_iterator_pitch_linear_2dthreadtile.h",
"repo_id": "include",
"token_count": 5171
} | 47 |
# CuTe Tensor algorithms
This section summarizes the interfaces and implementations
of common numerical algorithms performed on `Tensor`s.
The implementation of these algorithms may be found in the
[include/cute/algorithm/](../../../include/cute/algorithm/)
directory.
## `copy`
CuTe's `copy` algorithm copies the elements of a source `Tensor`
into the elements of a destination `Tensor`.
The various overloads of `copy` can be found in
[`include/cute/algorithm/copy.hpp`](../../../include/cute/algorithm/copy.hpp).
### Interface and specialization opportunities
A `Tensor` encapsulates the data type, data location,
and possibly also the shape and stride of the tensor at compile time.
As a result, `copy` can and does dispatch,
based on the types of its arguments,
to use any of various synchronous or asynchronous hardware copy instructions.
The `copy` algorithm has two main overloads.
The first just takes the source `Tensor` and the destination `Tensor`.
```c++
template <class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE
void
copy(Tensor<SrcEngine, SrcLayout> const& src,
Tensor<DstEngine, DstLayout> & dst);
```
The second takes those two parameters, plus a `Copy_Atom`.
```c++
template <class... CopyArgs,
class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE
void
copy(Copy_Atom<CopyArgs...> const& copy_atom,
Tensor<SrcEngine, SrcLayout> const& src,
Tensor<DstEngine, DstLayout> & dst);
```
The two-parameter `copy` overload picks a default implementation
based only on the types of the two `Tensor` parameters.
The `Copy_Atom` overload lets callers override that default
by specifying a nondefault `copy` implementation.
### Parallelism and synchronization depend on parameter types
Either the default implementation or
the implementation selected by a `Copy_Atom` overload
may use none or all available parallelism,
and may have a variety of synchronization semantics.
The behavior depends on `copy`'s parameter types.
Users are expected to figure this out based on their knowledge
of the architecture on which they are running.
(Developers often write a custom optimized kernel
for each GPU architecture.)
The `copy` algorithm may be sequential per thread,
or it may be parallel across some collection of threads
(e.g., a block or cluster).
If `copy` is parallel,
then the collection of participating threads
may need synchronization before any thread in the collection
may assume that the copy operation has completed.
For example, if the participating threads form a thread block,
then users must invoke `__syncthreads()`
or the Cooperative Groups equivalent
before they may use the results of `copy`.
The `copy` algorithm may use asynchronous copy instructions,
such as `cp.async`, or its C++ interface `memcpy_async`.
In that case, users will need to perform
the additional synchronization appropriate to that underlying implementation
before they may use the results of the `copy` algorithm.
[The CuTe GEMM tutorial example](../../../examples/cute/tutorial/)
shows one such synchronization method.
More optimized GEMM implementations use pipelining techniques
to overlap asynchronous `copy` operations with other useful work.
### A generic copy implementation
A simple example of a generic `copy` implementation
for any two `Tensor`s looks like this.
```c++
template <class TA, class ALayout,
class TB, class BLayout>
CUTE_HOST_DEVICE
void
copy(Tensor<TA, ALayout> const& src, // Any logical shape
Tensor<TB, BLayout> & dst) // Any logical shape
{
for (int i = 0; i < size(src); ++i) {
dst(i) = src(i);
}
}
```
This generic `copy` algorithm addresses both `Tensor`s
with 1-D logical coordinates, thus traversing both `Tensor`s
in a logical column-major order.
Some reasonable architecture-independent optimizations
would include the following.
1. If the two `Tensor`s have known memory spaces with optimized
access instructions (like `cp.async`), then dispatch to the
custom instruction.
2. The two `Tensor`s have static layouts and it can be proven
that element vectorization is valid -- for example, four `ld.global.b32`s
can be combined into a single `ld.global.b128` -- then vectorize the source
and destinations tensors.
3. If possible, validate that the copy instruction to be used is
appropriate for the source and destination tensors.
CuTe's optimized copy implementations can do all of these.
## `copy_if`
CuTe's `copy_if` algorithm lives in the same header as `copy`,
[`include/cute/algorithm/copy.hpp`](../../../include/cute/algorithm/copy.hpp).
The algorithm takes source and destination `Tensor` parameters like `copy`,
but it also takes a "predication `Tensor`"
with the same shape as the input and output.
Elements of the source `Tensor` are only copied
if the corresponding predication `Tensor` element is nonzero.
For details on why and how to use `copy_if`,
please refer to the
["predication" section of the tutorial](./0y_predication.md).
## `gemm`
### What `gemm` computes
The `gemm` algorithm takes three `Tensor`s, A, B, and C.
What it does depends on the number of modes
that its `Tensor` parameters have.
We express these modes using letters.
* V indicates a "vector," a mode of independent elements.
* M and N indicate the number of rows resp. columns
of the matrix result C of the BLAS's GEMM routine.
* K indicates the "reduction mode" of GEMM,
that is, the mode along which GEMM sums.
Please see the [GEMM tutorial](./0x_gemm_tutorial.md) for details.
We list the modes of the input `Tensor`s A and B,
and the output `Tensor` C,
using a notation `(...) x (...) => (...)`.
The two leftmost `(...)` describe A and B (in that order),
and the `(...)` to the right of the `=>` describes C.
1. `(V) x (V) => (V)`. The element-wise product of vectors: C<sub>v</sub> += A<sub>v</sub> B<sub>v</sub>. Dispatches to FMA or MMA.
2. `(M) x (N) => (M,N)`. The outer product of vectors: C<sub>mn</sub> += A<sub>m</sub> B_<sub>n</sub>. Dispatches to (4) with V=1.
3. `(M,K) x (N,K) => (M,N)`. The product of matrices: C<sub>mn</sub> += A<sub>mk</sub> B<sub>nk</sub>. Dispatches to (2) for each K.
4. `(V,M) x (V,N) => (V,M,N)`. The batched outer product of vectors: C<sub>vmn</sub> += A<sub>vm</sub> B<sub>vn</sub>. Optimizes for register reuse and dispatches to (1) for each M, N.
5. `(V,M,K) x (V,N,K) => (V,M,N)`. The batched product of matrices: C<sub>vmn</sub> += A<sub>vmk</sub> B<sub>vnk</sub>. Dispatches to (4) for each K.
Please refer to the [GEMM tutorial](./0x_gemm_tutorial.md)
for an overview of CuTe's convention for ordering the modes.
For example, if K appears, it always appears rightmost ("outermost").
If V appears, it always appears leftmost ("innermost").
### Dispatch to optimized implementations
Just like with `copy`, CuTe's implementations of `gemm`
uses its `Tensor` arguments' types to dispatch
to an appropriately optimized implementation.
Also like `copy`, `gemm` takes an optional `MMA_Atom` parameter
that lets callers override the default `FMA` instruction
that CuTe would select based on the `Tensor` arguments' types.
For more information on `MMA_Atom` and on specialization of `gemm`
for different architectures, please refer to the
[MMA section of the tutorial](./0t_mma_atom.md).
## `axpby`
The `axpby` algorithm lives in the header file
[`include/cute/algorithm/axpby.hpp`](../../../include/cute/algorithm/axpby.hpp).
It assigns to $y$ the result of $\alpha x + \beta y$,
where $\alpha$ and $\beta$ are scalars and $x$ and $y$ are `Tensor`s.
The name stands for "Alpha times X Plus Beta times Y,"
and is a generalization of the original BLAS "AXPY" routine
("Alpha times X Plus Y").
## `fill`
The `fill` algorithm lives in the header file
[`include/cute/algorithm/fill.hpp`](../../../include/cute/algorithm/fill.hpp).
It overwrites the elements of its `Tensor` output argument
with a given scalar value.
## `clear`
The `clear` algorithm lives in the header file
[`include/cute/algorithm/clear.hpp`](../../../include/cute/algorithm/clear.hpp).
It overwrites the elements of its `Tensor` output argument with zeros.
## Other algorithms
CuTe provides other algorithms.
Their header files can be found in the
[`include/cute/algorithm`](../../../include/cute/algorithm)
directory.
| media/docs/cute/04_algorithms.md/0 | {
"file_path": "media/docs/cute/04_algorithms.md",
"repo_id": "media",
"token_count": 2578
} | 48 |
# Synchronization primitives
## Overview of CUDA's synchronization methods
The CUDA programming model provides 3 abstractions:
* hierarchical parallelism -- that is, parallel threads
grouped into hierarchical units such as blocks and clusters;
* shared memory, through which parallel threads that are
in the same hierarchical unit can communicate; and
* synchronization methods for threads.
These abstractions help developers extract
both fine-grained and coarse-grained parallelism,
by making it possible for them to subdivide problems
into independent components,
and to insert synchronization at appropriate points.
Over the years CUDA has introduced several synchronization primitives
that operate at different levels of the hierarchy.
These include
* [thread block - level](https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#synchronization-functions) synchronization (e.g., `__syncthreads()`);
* [warp-level](https://developer.nvidia.com/blog/using-cuda-warp-level-primitives/) synchronization (e.g., `__syncwarp()`); and
* [thread-level](https://docs.nvidia.com/cuda/cuda-c-programming-guide/#memory-fence-functions) fence operations.
As an extension to this, starting with the Hopper architecture, CUDA added the following improvements:
* [thread block clusters](https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#thread-block-clusters) --
a new level in the thread hierarchy representing
a group of thread blocks that can coordinate and share data;
* synchronization instructions for a thread block cluster and threads within a cluster scope.
## CUTLASS's abstractions for Hopper features
CUTLASS now includes abstractions
for the following features introduced in Hopper.
1. Thread block cluster - level synchronization and query
[APIs](/include/cute/arch/cluster_sm90.hpp)
2. Abstractions for new
[barrier instructions](/include/cutlass/arch/barrier.h)
which help with efficient synchronization
of threads within a thread block cluster.
### Asynchronous pipelines
In order to write a performant GEMM Kernel,
software pipelining is critical to hide the latency of global memory loads.
(Please refer to the
[Efficient GEMM](/media/docs/efficient_gemm.md#pipelining) document.)
Different threads or groups of threads
may have different roles in the pipeline.
Some are "producers" that load data or perform computations
to satisfy other threads' input data dependencies.
The same or different threads may be "consumers"
that do other work with those input data dependencies,
once they are satisfied.
Starting with the Hopper architecture,
the presence of hardware-accelerated synchronization instructions
make it possible for "producer" and "consumer" threads
to communicate with each other efficiently
about their data dependencies.
Implementing a persistent GEMM algorithm calls for managing
dozens of different kinds of asynchronously executing operations
that synchronize using multiple barriers organized as a circular list.
This complexity is too much for human programmers to manage by hand.
As a result, we have developed
[asynchronous Pipeline classes](/include/cutlass/pipeline/).
These classes help developers orchestrate a pipeline
of asynchronous producer and consumer threads,
without needing to worry about lower-level hardware details.
These classes serve a similar function as the various
[pipeline abstractions](https://nvidia.github.io/libcudacxx/extended_api/synchronization_primitives/pipeline.html)
in libcu++.
#### Pipeline methods
##### Producer acquire
The `producer_acquire` method is to be used by asynchronous producer threads
before issuing other instructions associated with a particular pipeline stage
(e.g., copy or write).
This is a blocking instruction
which blocks further execution of consumer threads
unless the particular stage waiting to be acquired
is released by a consumer.
We say that a pipeline at its start is "empty" if producer threads are free to produce and do not need to wait for a consumer release -- that is, if an acquire operation is expected to succeed. If the pipeline at its start is empty, then we can either skip performing producer acquire operations during the first pass through the pipeline stages, or use the `make_producer_start_state` method. The latter ensures that the acquire operation will succeed at the start of a pipeline.
##### Producer commit
The `producer_commit` method is to be issued by asynchronous producer threads
after the instructions associated with a particular stage
(e.g., shared memory writes) have completed,
in order to notify the waiting asynchronous consumer threads.
This is a nonblocking instruction.
This API may result in a No-Op in some cases,
if the producer instructions also update the barrier stage associated automatically
(e.g., TMA_based producer threads using the `PipelineTmaAsync ` class).
##### Consumer wait
The `consumer_wait` method is to be used by consumer threads
before consuming data from a particular pipeline stage
which is expected to be produced by producer threads.
This is a blocking instruction. That is,
until the producer threads have committed to a particular stage,
this instruction is expected to block further execution of consumer threads.
##### Consumer release
The `consumer_release` method is to be used by consumer threads
to signal waiting producer threads that they have finished consuming data
associated with a particular stage of the pipeline.
This is a nonblocking instruction.
#### Pipeline example
```c++
// 4-stage Pipeline
static constexpr int NumStages = 4;
using MainloopPipeline = typename cutlass::PipelineAsync<NumStages>;
using PipelineState = typename cutlass::PipelineState<NumStages>;
// 2 producer threads and 1 consumer thread
typename MainloopPipeline::Params params;
params.producer_arv_count = 2;
params.consumer_arv_count = 1;
MainloopPipeline pipeline(shared_storage.storage, params);
// Producer threads
if (thread_idx == 0 or thread_idx == 1) {
PipelineState smem_pipe_write = cutlass::make_producer_start_state<MainloopPipeline>();
for ( ; iter > 0; --iter) {
pipeline.producer_acquire(smem_pipe_write);
// Producer ops
// If any memory operations are involved, then we also need
// to guarantee that writes are completed and visible to consumer(s).
pipeline.producer_commit(smem_pipe_write);
++smem_pipe_write;
}
}
else if (thread_idx == 2) {
PipelineState smem_pipe_read;
for (; iter > 0; --iter) {
pipeline.consumer_wait(smem_pipe_read);
// Consumer ops
pipeline.consumer_release(smem_pipe_read);
++smem_pipe_read;
}
}
```
In this example, we create an instance of the asynchronous pipeline class `PipelineSync`,
and then synchronize among 3 asynchronously executing threads:
2 producer threads and 1 consumer thread.
Please note that this is a basic example.
There are different versions possible,
depending on what the producer and consumer threads are doing.
Please refer to our [unit tests](/test/unit/pipeline)
and the other [pipeline classes](/include/cutlass/pipeline/pipeline.hpp)
for more details.
# Copyright
Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
```
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
```
| media/docs/pipeline.md/0 | {
"file_path": "media/docs/pipeline.md",
"repo_id": "media",
"token_count": 2277
} | 49 |
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import ctypes
from cutlass_library import SubstituteTemplate
import numpy as np
from scipy.special import erf
from cutlass_library import DataType, DataTypeTag
from cutlass.backend.c_types import MatrixCoord_
from cutlass.backend.frontend import NumpyFrontend
from cutlass.backend.library import ActivationOp, ActivationOpTag
from cutlass.utils.datatypes import is_numpy_tensor, is_torch_available, is_torch_tensor
dtype2ctype = {
DataType.f16: ctypes.c_uint16,
DataType.f32: ctypes.c_float,
DataType.f64: ctypes.c_double,
DataType.s8: ctypes.c_int8,
DataType.s32: ctypes.c_int32
}
if is_torch_available():
import torch
import torch.nn.functional as F
def get_scalar(value):
"""
Returns a scalar value from a container (e.g., np.ndarray)
"""
if is_numpy_tensor(value):
if value.size != 1:
raise Exception("Scalars used in epilogue must be of size 1")
return value.reshape(-1)[0]
elif is_torch_tensor(value):
if value.size != 1:
raise Exception("Scalars used in epilogue must be of size 1")
return value.reshape(-1)[0]
else:
return value
def to_ctype_value(value, dtype):
"""
Converts ``value`` to the corresponding storage needed for the ctype that
will store ``value``.
"""
scalar = get_scalar(value)
if dtype == DataType.f16:
# Convert f16 value into an integer
return int.from_bytes(np.float16(scalar).tobytes(), "little")
else:
return scalar
#################################################################################################
#
# Epilogue Functors
#
#################################################################################################
class EpilogueFunctorBase:
"""
Base class for thread-level epilogue functors
"""
def __init__(self) -> None:
pass
def emit(self, tag, template_argument):
template = """${tag}<${arguments}>"""
arguments = ""
for idx, arg in enumerate(template_argument):
arguments += arg
if idx < len(template_argument) - 1:
arguments += ", "
values = {
"tag": tag,
"arguments": arguments,
}
return SubstituteTemplate(template, values)
class LinearCombination(EpilogueFunctorBase):
"""
Apply a linear combination operator to an array of elements
D = alpha * accumulator + beta * source
:param element_output: data type used to load and store tensors
:param epilogue_vector_length: number of elements computed per operation.
Usually it is 128/sizeof_bits_v<ElementOutput_>, but we use 64 and 32 sometimes
when there are not enough data to store
:param element_accumulator: Accumulator data type
:param element_epilogue: data type used to compute linear combination
"""
tag = "cutlass::epilogue::thread::LinearCombination"
def __init__(
self, element_output, epilogue_vector_length,
element_accumulator=None, element_epilogue=None) -> None:
super().__init__()
if element_accumulator is None:
element_accumulator = element_output
if element_epilogue is None:
element_epilogue = element_output
self.element_output = element_output
self.element_accumulator = element_accumulator
self.element_epilogue = element_epilogue
self.epilogue_vector_length = epilogue_vector_length
self.template_arguments = [
DataTypeTag[element_output],
str(epilogue_vector_length),
DataTypeTag[element_accumulator],
DataTypeTag[element_epilogue],
]
c_element_epilogue = dtype2ctype[self.element_epilogue]
element_epilogue = self.element_epilogue
class _EpilogueOutputOpParamsEVT(ctypes.Structure):
"""
Epilogue params when using the default linear combination of EVT, which
does not currently use {alpha,beta}_ptr_array
"""
_fields_ = [
("alpha", c_element_epilogue),
("beta", c_element_epilogue),
("alpha_ptr", ctypes.c_void_p),
("beta_ptr", ctypes.c_void_p),
]
def __init__(self, alpha, beta, *args) -> None:
self.alpha = to_ctype_value(alpha, element_epilogue)
self.beta = to_ctype_value(beta, element_epilogue)
class _EpilogueOutputOpParams(ctypes.Structure):
_fields_ = [
("alpha", c_element_epilogue),
("beta", c_element_epilogue),
("alpha_ptr", ctypes.c_void_p),
("beta_ptr", ctypes.c_void_p),
("alpha_ptr_array", ctypes.c_void_p),
("beta_ptr_array", ctypes.c_void_p),
]
def __init__(self, alpha, beta, *args) -> None:
self.alpha = to_ctype_value(alpha, element_epilogue)
self.beta = to_ctype_value(beta, element_epilogue)
def to_evt_params(self) -> _EpilogueOutputOpParamsEVT:
return _EpilogueOutputOpParamsEVT(self.alpha, self.beta)
self.epilogue_type = _EpilogueOutputOpParams
self.epilogue_type_evt = _EpilogueOutputOpParamsEVT
def emit(self):
return super().emit(self.tag, self.template_arguments)
class LinearCombinationClamp(LinearCombination):
"""
Applies a linear combination operator to an array of elements then clamps
the output before converting to the output element type.
D = alpha * accumulator + beta * source + uniform
:param element_output: data type used to load and store tensors
:param epilogue_vector_length: number of elements computed per operation.
Usually it is 128/sizeof_bits_v<ElementOutput_>, but we use 64 and 32 sometimes
when there are not enough data to store
:param element_accumulator: Accumulator data type
:param element_epilogue: data type used to compute linear combination
"""
tag = "cutlass::epilogue::thread::LinearCombinationClamp"
def __init__(
self, element_output, epilogue_vector_length,
element_accumulator=None, element_epilogue=None) -> None:
# Base constructor
super().__init__(
element_output,
epilogue_vector_length,
element_accumulator,
element_epilogue,
)
c_element_epilogue = dtype2ctype[self.element_epilogue]
element_epilogue = self.element_epilogue
class _EpilogueOutputOpParams(ctypes.Structure):
_fields_ = [
("alpha", c_element_epilogue),
("beta", c_element_epilogue),
("alpha_ptr", ctypes.c_void_p),
("beta_ptr", ctypes.c_void_p),
]
def __init__(self, alpha, beta, *args) -> None:
self.alpha = to_ctype_value(alpha, element_epilogue)
self.beta = to_ctype_value(beta, element_epilogue)
self.epilogue_type = _EpilogueOutputOpParams
class FastLinearCombinationClamp(EpilogueFunctorBase):
"""
Applies a linear combination operator to an array of elements then clamps
the output before converting to the output element type.
D = alpha * accumulator + beta * source
Note: The below method only when problem_size_K <= 256 for signed int8 gemm
or problem_size_K <= 128 for unsigned int8 gemm. The default approach is
above.
:param element_output: data type used to load and store tensors
:param epilogue_vector_length: number of elements computed per operation.
Usually it is 128/sizeof_bits_v<ElementOutput_>, but we use 64 and 32 sometimes
when there are not enough data to store
"""
tag = "cutlass::epilogue::thread::FastLinearCombinationClamp"
def __init__(self, element_output, epilogue_vector_length, *args) -> None:
super().__init__()
self.template_arguments = [
DataTypeTag[element_output], str(epilogue_vector_length)
]
self.element_accumulator = DataType.s32
self.element_epilogue = DataType.f32
# get epilogue output op
c_element_epilogue = dtype2ctype[self.element_epilogue]
element_epilogue = self.element_epilogue
class _EpilogueOutputOpParams(ctypes.Structure):
_fields_ = [
("alpha", c_element_epilogue),
("beta", c_element_epilogue),
("alpha_ptr", ctypes.c_void_p),
("beta_ptr", ctypes.c_void_p),
]
def __init__(self, alpha, beta, *args) -> None:
self.alpha = to_ctype_value(alpha, element_epilogue)
self.beta = to_ctype_value(beta, element_epilogue)
self.epilogue_type = _EpilogueOutputOpParams
def emit(self):
return super().emit(self.tag, self.template_arguments)
class LinearCombinationGeneric(LinearCombination):
"""
Applies a linear combination operator followed by an activation function
to an array of elements.
D = activation(alpha * accumulator + beta * source)
:param activation_functor: input activation functor
:param element_output: data type used to load and store tensors
:param epilogue_vector_length: number of elements computed per operation.
Usually it is 128/sizeof_bits_v<ElementOutput_>, but we use 64 and 32 sometimes
when there are not enough data to store
:param element_accumulator: Accumulator data type
:param element_epilogue: data type used to compute linear combination
"""
tag = "cutlass::epilogue::thread::LinearCombinationGeneric"
def __init__(
self, activation_functor,
element_output, epilogue_vector_length,
element_accumulator=None, element_epilogue=None) -> None:
super().__init__(
element_output,
epilogue_vector_length,
element_accumulator,
element_epilogue,
)
self.template_arguments = [
activation_functor.emit()] + self.template_arguments
self.activation_functor = activation_functor
self.element_epilogue = element_epilogue
# get epilogue output op
self.epilogue_type = self.activation_functor.epilogue_output_op(self.element_epilogue)
class ActivationFunctor:
"""
Base class for frequently used activation functions
"""
@staticmethod
def numpy(x: np.ndarray):
raise NotImplementedError()
@classmethod
def emit(cls):
return ActivationOpTag[cls.binding_type]
@staticmethod
def epilogue_output_op(element_epilogue):
c_element_epilogue = dtype2ctype[element_epilogue]
class _EpilogueOutputOpParams(ctypes.Structure):
_fields_ = [
("alpha", c_element_epilogue),
("beta", c_element_epilogue),
("alpha_ptr", ctypes.c_void_p),
("beta_ptr", ctypes.c_void_p),
]
def __init__(self, alpha, beta, *args) -> None:
self.alpha = to_ctype_value(alpha, element_epilogue)
self.beta = to_ctype_value(beta, element_epilogue)
return _EpilogueOutputOpParams
class ActivationMeta(type):
@classmethod
def __call__(cls, x, *args):
if is_numpy_tensor(x):
return cls.numpy(x, *args)
elif is_torch_tensor(x):
return cls.torch(x, *args)
else:
raise NotImplementedError("Unsupported tensor type")
@classmethod
def numpy(cls, *args):
raise NotImplementedError(f"Numpy reference for {cls.__name__[:-4]} is not implemented.")
@classmethod
def torch(cls, *args):
raise NotImplementedError(f"PyTorch reference for {cls.__name__[:-4]} is not implemented.")
##############################################################################
# identity operator
class identityMeta(ActivationMeta):
@classmethod
def numpy(cls, x):
return x
@classmethod
def torch(cls, x):
return x
class identity(ActivationFunctor, metaclass=identityMeta):
binding_type = ActivationOp.Identity
##############################################################################
# ReLu operator
class reluMeta(ActivationMeta):
@classmethod
def numpy(cls, x):
return np.where(x > 0, x, 0)
@classmethod
def torch(cls, x):
return F.relu(x)
class relu(ActivationFunctor, metaclass=reluMeta):
binding_type = ActivationOp.ReLU
##############################################################################
# Leaky ReLu operator
class leakyReLUMeta(ActivationMeta):
@classmethod
def numpy(cls, x, leaky_alpha):
return np.maximum(x, 0) + np.minimum(x, 0) * leaky_alpha
@classmethod
def torch(cls, x, leaky_alpha):
return F.leaky_relu(x, leaky_alpha)
class leaky_relu(ActivationFunctor, metaclass=leakyReLUMeta):
binding_type = ActivationOp.LeakyReLU
@staticmethod
def epilogue_output_op(element_epilogue):
c_element_epilogue = dtype2ctype[element_epilogue]
class _EpilogueOutputOpParams(ctypes.Structure):
_fields_ = [
("alpha", c_element_epilogue),
("beta", c_element_epilogue),
("alpha_ptr", ctypes.c_void_p),
("beta_ptr", ctypes.c_void_p),
("leaky_alpha", c_element_epilogue)
]
def __init__(self, alpha, beta, leaky_alpha=0.2, *args) -> None:
self.alpha = to_ctype_value(alpha, element_epilogue)
self.beta = to_ctype_value(beta, element_epilogue)
self.alpha_ptr = 0
self.beta_ptr = 0
self.leaky_alpha = to_ctype_value(leaky_alpha, element_epilogue)
return _EpilogueOutputOpParams
##############################################################################
# Tanh operator
class tanhMeta(ActivationMeta):
@classmethod
def numpy(cls, x):
return np.tanh(x)
@classmethod
def torch(cls, x):
return torch.tanh(x)
class tanh(ActivationFunctor, metaclass=tanhMeta):
binding_type = ActivationOp.Tanh
##############################################################################
# Sigmoid operator
class sigmoidMeta(ActivationMeta):
@classmethod
def numpy(cls, x):
return 1.0 / (1.0 + np.exp(-x))
@classmethod
def torch(cls, x):
return F.sigmoid(x)
class sigmoid(ActivationFunctor, metaclass=sigmoidMeta):
binding_type = ActivationOp.Sigmoid
##############################################################################
# SiLu operator
class siluMeta(ActivationMeta):
@classmethod
def numpy(cls, x):
return x * sigmoidMeta.numpy()
@classmethod
def silu(cls, x):
return F.silu(x)
class silu(ActivationFunctor, metaclass=siluMeta):
binding_type = ActivationOp.SiLU
##############################################################################
# Hardswish operator
class hardswishMeta(ActivationMeta):
@classmethod
def numpy(cls, x):
relu6 = np.minimum(np.maximum(x + 3.0, 0), 6.0)
return x * relu6 / 6.0
@classmethod
def torch(cls, x):
return F.hardswish(x)
class hardswish(ActivationFunctor, metaclass=hardswishMeta):
binding_type = ActivationOp.HardSwish
##############################################################################
# GELU operator
class geluMeta(ActivationMeta):
@classmethod
def numpy(cls, x):
return 0.5 * x * (1 + erf(x / np.sqrt(2.0)))
@classmethod
def torch(cls, x):
return F.gelu(x)
class gelu(ActivationFunctor, metaclass=geluMeta):
binding_type = ActivationOp.Gelu
| python/cutlass/backend/epilogue.py/0 | {
"file_path": "python/cutlass/backend/epilogue.py",
"repo_id": "python",
"token_count": 7063
} | 50 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Layout manipulation nodes and implementations
The layout Nodes change the layout of intermediate nodes in epilogue visitor graph
"""
from copy import deepcopy
from cutlass_library import LayoutType
from pycute import product, flatten
import cutlass
from cutlass.backend.evt.ir.layout_algorithm import _list_to_tuple, _tuple_to_list
from cutlass.backend.evt.ir.node import NodeBase
from cutlass.backend.evt.ir.tensor import Tensor
class PermutationImpl:
"""
Detailed implementation and helper functions for permutation
"""
def __init__(self, node) -> None:
assert "indices" in node.kwargs.keys()
self.indices = list(node.kwargs["indices"])
self.inverse_indices = self.get_inverse_indices(self.indices)
def get_inverse_impl(self):
inverse_impl = deepcopy(self)
inverse_impl.indices = self.inverse_indices
inverse_impl.inverse_indices = self.indices
return inverse_impl
def update(self, shape):
num_dim = len(shape)
indices = self.indices
num_old_dim = len(indices)
# Add offset
for i, idx in enumerate(indices):
indices[i] = idx + num_dim - num_old_dim
# Add broadcast dims
for i in range(num_dim - num_old_dim):
indices = [i,] + indices
self.indices = indices
self.inverse_indices = self.get_inverse_indices(self.indices)
def get_inverse_indices(self, indices):
"""
Get the indices for inverse permutation
"""
num_dim = len(indices)
inverse_indices = [0] * num_dim
for i in range(num_dim):
inverse_indices[indices[i]] = i
return inverse_indices
def shape_propagation(self, input_node_meta):
input_shape = input_node_meta.tensor.shape
output_shape = tuple([input_shape[idx] for idx in self.indices])
return output_shape
def broadcast(self, shape, node_meta: NodeBase):
"""
Broadcast the inputs based on current shape
"""
self.update(shape)
inverse_shape = tuple([shape[idx] for idx in self.inverse_indices])
node_meta.tensor.broadcast(inverse_shape)
def apply_to_user(self, usr_meta: NodeBase):
"""
Propagate the permutation to the users of the current nodes
"""
usr_meta.tensor.permute(self.inverse_indices)
if hasattr(usr_meta, "store_tensor"):
if usr_meta.store_tensor is not None:
usr_meta.store_tensor.permute(self.inverse_indices)
def apply_to_input(self, input_meta: NodeBase):
"""
Propagate the permutation to inputs of the current nodes
"""
input_meta.tensor.permute(self.indices)
if hasattr(input_meta, "store_tensor"):
if input_meta.store_tensor is not None:
input_meta.store_tensor.permute(self.indices)
class ReshapeImpl:
"""
Detailed implementation and helper functions for reshape
"""
def __init__(self, node) -> None:
self.node = node
assert "new_shape" in node.kwargs.keys()
self.output_shape = _list_to_tuple(node.kwargs["new_shape"])
def get_inverse_impl(self):
inverse_impl = deepcopy(self)
inverse_impl.output_shape = self.input_shape
inverse_impl.input_shape = self.output_shape
return inverse_impl
def shape_propagation(self, input_node_meta):
self.input_shape = input_node_meta.tensor.shape
return _list_to_tuple(self.output_shape)
def broadcast(self, shape, node_meta: NodeBase):
"""
Broadcast the inputs based on current shape.
"""
# Step 1: infer split
flatten_split_shape = self.infer_split(flatten(self.input_shape), flatten(self.output_shape))
split_input_shape = self.infer_merge(flatten_split_shape, self.input_shape)
split_output_shape = self.infer_merge(flatten_split_shape, self.output_shape)
# broadcast shape -> split_output_shape -> flatten_split_shape
if len(shape) - len(split_output_shape) > 0:
for _ in range(len(shape) - len(split_output_shape)):
split_output_shape = [1,] + split_output_shape
flatten_split_shape = [1,] + flatten_split_shape
split_input_shape = [1,] + split_input_shape
broadcast_factor = []
for dim, old_dim in zip(shape, split_output_shape):
if not isinstance(dim, list):
dim = [dim,]
if not isinstance(old_dim, list):
old_dim = [old_dim,]
if product(tuple(dim)) == product(tuple(old_dim)):
broadcast_factor += [1] * len(old_dim)
elif product(tuple(old_dim)) == 1:
assert len(dim) == 1
broadcast_factor.append(dim[0])
else:
raise NotImplementedError(f"Invalid Broadcast: {old_dim} -> {dim}")
# flatten_split_shape -> split_input_shape
factor_idx = 0
broadcast_split_input_shape = []
for dim in split_input_shape:
if isinstance(dim, list):
new_dim = []
for d in dim:
new_dim.append(d * broadcast_factor[factor_idx])
factor_idx += 1
broadcast_split_input_shape.append(new_dim)
else:
broadcast_split_input_shape.append(dim * broadcast_factor[factor_idx])
factor_idx += 1
broadcast_split_input_shape = _list_to_tuple(broadcast_split_input_shape)
node_meta.tensor.reshape(_list_to_tuple(split_input_shape))
node_meta.tensor.broadcast(broadcast_split_input_shape)
# Last reshape op to clean up
broadcast_input_shape = tuple([product(dim) for dim in broadcast_split_input_shape])
node_meta.tensor.reshape(broadcast_input_shape)
# Update the input shape and output shape
self.input_shape = _list_to_tuple(node_meta.tensor.shape)
self.output_shape = _list_to_tuple(shape)
def apply_to_user(self, user_meta: NodeBase):
"""
Propagate the reshape to user nodes
"""
user_meta.tensor.reshape(tuple(self.input_shape))
if hasattr(user_meta, "store_tensor"):
if user_meta.store_tensor is not None:
user_meta.store_tensor.reshape(tuple(self.input_shape))
def apply_to_input(self, input_meta: NodeBase):
"""
Propagate the reshape to input nodes
"""
input_meta.tensor.reshape(tuple(self.output_shape))
if hasattr(input_meta, "store_tensor"):
if input_meta.store_tensor is not None:
input_meta.store_tensor.reshape(tuple(self.output_shape))
#
# Helper functions
#
def infer_split(self, input_shape, output_shape):
"""
Infer the flatten splitted shape that can be merged to both input_shape and output_shape
"""
input_shape = _tuple_to_list(input_shape)
output_shape = _tuple_to_list(output_shape)
if len(input_shape) == 0 and len(output_shape) == 0:
return []
if len(input_shape) == 0:
if product(tuple(output_shape)) != 1:
raise ValueError("Invalid reshape size")
else:
return output_shape
if len(output_shape) == 0:
if product(tuple(input_shape)) != 1:
raise ValueError("Invalid reshape size")
else:
return input_shape
# This is done recursively by only process the last dimension at each time
old_dim = input_shape[-1]
new_dim = output_shape[-1]
# Exact match
if old_dim == new_dim:
return self.infer_split(input_shape[:-1], output_shape[:-1]) + [new_dim,]
# Needs split
if old_dim > new_dim and old_dim % new_dim == 0:
residual = old_dim // new_dim
return self.infer_split(input_shape[:-1] + [residual,], output_shape[:-1]) + [new_dim,]
# Needs merge
if old_dim < new_dim and new_dim % old_dim == 0:
residual = new_dim // old_dim
return self.infer_split(input_shape[:-1], output_shape[:-1] + [residual,]) + [old_dim,]
raise NotImplementedError(f"Unsupported split: {input_shape} -> {output_shape}")
def infer_merge(self, flatten_shape, shape):
flatten_shape = _tuple_to_list(flatten_shape)
shape = _tuple_to_list(shape)
idx_flat = len(flatten_shape) - 1
merged_shape = []
for dim in reversed(shape):
# Exact match
if dim == flatten_shape[idx_flat]:
merged_shape.append(dim)
idx_flat -= 1
# need group
elif dim > flatten_shape[idx_flat] and dim % flatten_shape[idx_flat] == 0:
residual = dim
group = []
while(residual > 1):
group.append(flatten_shape[idx_flat])
residual = residual // flatten_shape[idx_flat]
idx_flat -= 1
merged_shape.append(group[::-1])
else:
raise NotImplementedError(f"Unsupported merge: {flatten_shape} -> {shape}")
return merged_shape[::-1]
class LayoutNode(NodeBase):
"""
Layout manipulation nodes
"""
fn_to_impl = {
"permute": PermutationImpl,
"reshape": ReshapeImpl
}
def __init__(self, name: str, fn, kwargs: dict) -> None:
super().__init__(name)
self.op = "layout"
self.fn = fn
self.kwargs = kwargs
self.underlying_impl = self.fn_to_impl[self.fn.__name__](self)
def get_inverse_node(self):
inverse_node = deepcopy(self)
inverse_node.underlying_impl = self.underlying_impl.get_inverse_impl()
return inverse_node
def shape_propagation(self, input_node_metas):
if self._tensor is not None:
return
assert len(input_node_metas) == 1, "Layout node can only have one input node"
output_shape = self.underlying_impl.shape_propagation(input_node_metas[0])
self._tensor = Tensor(
element=self.element_output,
shape=output_shape, layout_tag=LayoutType.RowMajor
)
return super().shape_propagation(input_node_metas)
def type_propagation(self, input_node_metas: 'list[NodeBase]'):
"""
The store nodes has element_output = element_input
"""
assert len(input_node_metas) == 1, "Layout node can only have one input node"
self.element_output = input_node_metas[0].element_output
def broadcast_propagation(self, input_node_metas: 'list[NodeBase]'):
"""
Propagate the broadcast in the reversed topological order
"""
if self.tensor is None:
raise RuntimeError(f"The tensor of node {self.name} is unknown.")
shape = self.tensor.shape
for child in input_node_metas:
self.underlying_impl.broadcast(shape, child)
def apply_to_user(self, usr_meta: NodeBase):
"""
Propagate the permutation to user nodes
"""
self.underlying_impl.apply_to_user(usr_meta)
def apply_to_input(self, input_meta: NodeBase):
"""
Propagate the permutation to input nodes
"""
self.underlying_impl.apply_to_input(input_meta)
| python/cutlass/backend/evt/ir/layout_nodes.py/0 | {
"file_path": "python/cutlass/backend/evt/ir/layout_nodes.py",
"repo_id": "python",
"token_count": 5691
} | 51 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Compute the shared memory size in bytes
"""
import cutlass_library
from pycute import shape_div, product
import cutlass
from cutlass.backend.evt.ir import TopoVisitorNode, DAGIR
from cutlass.backend.library import DataTypeSize
class GetSmemSize:
"""
Get the size in byte of shared memory used by the kernel
"""
def __init__(self, dag_ir: DAGIR) -> None:
self.dag_ir = dag_ir
self.cc = self.dag_ir.cc
#
# Sm90 epilogue specific
#
def sm90_epilogue_tile(self, tile_description):
# Get the epilogue tile size
schedule = tile_description.epilogue_schedule
if schedule == cutlass_library.EpilogueScheduleType.TmaWarpSpecialized:
epilogue_tile_mn = (64, 32)
elif schedule == cutlass_library.EpilogueScheduleType.TmaWarpSpecializedCooperative:
if tile_description.threadblock_shape[0] >= 128:
epilogue_tile_mn = (128, 32)
else:
epilogue_tile_mn = (64, 32)
else:
raise NotImplementedError(f"Unsupported schedule: {schedule}")
# Get the pipeline stages
stages_d = 2
epi_tiles = product(shape_div(tuple(tile_description.threadblock_shape)[:2], epilogue_tile_mn))
if self.dag_ir.has_node("C"):
element_c = self.dag_ir.get_node_meta("C").element
else:
element_c = None
element_d = self.dag_ir.get_node_meta("D").element
if element_c == element_d:
reuse_smem_c = True
else:
reuse_smem_c = False
stages_c = max(epi_tiles, stages_d + 1) if reuse_smem_c else epi_tiles
# Record the epilogue tile
self.cta_tile_mnk = tuple(tile_description.threadblock_shape)
self.epilogue_tile_mn = epilogue_tile_mn
self.epi_tiles = epi_tiles
self.stages_c = stages_c
self.stages_d = stages_d
self.reuse_smem_c = reuse_smem_c
self.element_c = element_c
self.element_d = element_d
self.is_source_supported = element_c is not None
def sm90_epilogue_smem_size(self, tile_description):
"""
Compute the shared memory size of sm90 collective epilogue
"""
self.sm90_epilogue_tile(tile_description)
# Get the Fusion Storage
nodes = self.dag_ir.nodes_topological_order()
self.smem_types = {}
for node in nodes:
meta = self.dag_ir.get_node_meta(node)
if not meta.disabled:
self.smem_types[node] = meta.underlying_impl.get_smem_size(
self.cta_tile_mnk, self.epilogue_tile_mn,
self.stages_c, self.stages_d, self.epi_tiles)
if node == "D":
continue
if isinstance(meta, TopoVisitorNode):
self.get_dag_smem_type(node)
else:
self.get_evt_smem_type(node)
thread_smem_size = self.smem_types[self.dag_ir.get_all_inputs("D")[0]][0]
# Get the Tensor Storage
tensors = []
if self.is_source_supported:
smem_C = DataTypeSize[self.element_c] * product(self.epilogue_tile_mn) * self.stages_c // 8
tensors.append((smem_C, 128))
else:
tensors.append((0, 1))
if self.reuse_smem_c:
tensors.append((0, 128))
else:
smem_D = DataTypeSize[self.element_d] * product(self.epilogue_tile_mn) * self.stages_d // 8
tensors.append((smem_D, 128))
tensors.append((thread_smem_size, 128))
tensor_smem_size = self.get_struct_size(tensors)
# Get pipeline storage size
# sizeof(uint64_t * stages_c * 2), alignment of uint64_t
# 2 is for FullBarrier and EmptyBarrier
pipeline_smem_size = (8 * self.stages_c * 2, 8)
# get SharedStorage size
smem_size = self.get_struct_size([tensor_smem_size, pipeline_smem_size])
return smem_size[0]
def __call__(self, tile_description):
return getattr(self, f"sm{self.cc}_epilogue_smem_size")(tile_description)
#
# Helper functions
#
@staticmethod
def get_visitor_size(members: list, ebo: bool):
"""
Get the size of struct in bytes
"""
offset = 0
max_alignment = 1
if len(members) > 0:
# Get alignment
for _, alignment in members:
max_alignment = max(max_alignment, alignment)
for type_size, _ in members:
if type_size != 0:
offset = ((offset + max_alignment - 1) // max_alignment) * max_alignment
if type_size == 0 and not ebo:
offset += 1
else:
offset += type_size
offset = ((offset + max_alignment - 1) // max_alignment) * max_alignment
return (offset, max_alignment)
else:
# Struct size is at least 1
return (1, 1)
def get_struct_size(self, members: list):
"""
Get the size of struct in bytes
"""
return self.get_visitor_size(members, False)
def get_evt_smem_type(self, node):
# Sort the input nodes by edge weight
input_types = [self.smem_types[child] for child in self.dag_ir.get_all_inputs(node)]
input_types.append(self.smem_types[node])
if len(input_types) > 1:
ebo = len(input_types) > 4
self.smem_types[node] = self.get_visitor_size(input_types, ebo)
def get_dag_smem_type(self, node):
meta = self.dag_ir.get_node_meta(node)
subgraph = meta.subgraph
subgraph_nodes = subgraph.nodes_topological_order()
# Visit the unvisited nodes in subgraph
for n in subgraph_nodes:
m = subgraph.get_node_meta(n)
if m.disabled:
continue
else:
self.smem_types[n] = m.underlying_impl.get_smem_size(
self.cta_tile_mnk, self.epilogue_tile_mn,
self.stages_c, self.stages_d, self.epi_tiles)
input_types = [self.smem_types[child] for child in subgraph_nodes[:-1]]
if len(input_types) > 0:
ebo = len(input_types) > 4
self.smem_types[node] = self.get_visitor_size(input_types, ebo)
| python/cutlass/backend/evt/passes/smem_size_calculator.py/0 | {
"file_path": "python/cutlass/backend/evt/passes/smem_size_calculator.py",
"repo_id": "python",
"token_count": 3585
} | 52 |
#################################################################################################
#
# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Utilities for emitting CUTLASS >= 3 convolution kernels
"""
import enum
import os.path
import shutil
import logging
from string import Template
try:
import builtins
if hasattr(builtins, "CUTLASS_IGNORE_PACKAGE") and CUTLASS_IGNORE_PACKAGE == True:
raise ImportError("Disabling attempt to import cutlass_library")
from cutlass_library.library import *
except ImportError:
from library import *
_LOGGER = logging.getLogger(__name__)
###################################################################################################
#
# Emits single instances of a CUTLASS device-wide operator
#
###################################################################################################
class EmitConv3xInstance:
def __init__(self):
_LOGGER.debug("*** EmitConv3xInstance::__init__")
# Define epilogue type first, so that the mainloop type
# can use it with StageCountAutoCarveout.
self.template = """
// CUTLASS >= 3 convolution ${conv_kind_name} kernel instance "${operation_name}"
using ${operation_name}_epilogue =
typename cutlass::epilogue::collective::CollectiveBuilder<
${arch},
${opcode_class_epi},
${tile_shape}, // tile shape
${cluster_shape}, // cluster shape
${epi_tile_mn},
${element_accumulator},
${element_compute},
${element_c}, ${layout_c}, 128 / cute::sizeof_bits_v<${element_c}>,
${element_d}, ${layout_d}, 128 / cute::sizeof_bits_v<${element_d}>,
${epilogue_schedule}
// , class FusionOpOrCallbacks = cutlass::epilogue::fusion::LinearCombination<ElementD,ElementCompute>
>::CollectiveOp;
using ${operation_name}_mainloop =
typename cutlass::conv::collective::CollectiveBuilder<
${arch},
${opcode_class_main},
${conv_kind}, // kFprop, kDgrad, or kWgrad
${element_a}, ${layout_a}, 128 / cute::sizeof_bits_v<${element_a}>,
${element_b}, ${layout_b}, 128 / cute::sizeof_bits_v<${element_b}>,
${element_accumulator},
${tile_shape}, // tile shape
${cluster_shape}, // cluster shape
${stages},
${kernel_schedule}
>::CollectiveOp;
// Unit tests call this "ConvKernel".
// Conv operator ${operation_name}
using ${operation_name}_base = cutlass::conv::kernel::ConvUniversal<
${operation_name}_mainloop,
${operation_name}_epilogue,
${tile_scheduler}
>;
"""
def arch_number_to_type(self, arch: int) -> str:
return f"cutlass::arch::Sm{arch}"
def tile_shape(self, operation) -> str:
# For all three kinds of convolutions, the tile shape's K mode
# differs from GEMM in that needs to be wrapped in a Shape.
# For Wgrad convolutions specifically,
# the N tile shape also needs to be wrapped in a Shape.
m_template = 'cute::_${tile_shape_m}'
if operation.conv_kind == ConvKind.Wgrad:
n_template = 'cute::Shape<cute::_${tile_shape_n}>'
else:
n_template = 'cute::_${tile_shape_n}'
k_template = 'cute::Shape<cute::_${tile_shape_k}>'
tile_shape_template = f'cute::Shape<{m_template}, {n_template}, {k_template}>'
values = {
'tile_shape_m': operation.tile_description.tile_shape[0],
'tile_shape_n': operation.tile_description.tile_shape[1],
'tile_shape_k': operation.tile_description.tile_shape[2]
}
return Template(tile_shape_template).substitute(values)
def cluster_shape(self, operation) -> str:
m_template = 'cute::_${cluster_shape_m}'
n_template = 'cute::_${cluster_shape_n}'
k_template = 'cute::_${cluster_shape_k}'
cluster_shape_template = f'cute::Shape<{m_template}, {n_template}, {k_template}>'
values = {
'cluster_shape_m': operation.tile_description.cluster_shape[0],
'cluster_shape_n': operation.tile_description.cluster_shape[1],
'cluster_shape_k': operation.tile_description.cluster_shape[2],
}
return Template(cluster_shape_template).substitute(values)
def stage_count(self, operation) -> str:
# stages == 0 tells builder to pick the number of stages automatically
namespace_prefix = 'cutlass::conv::collective::'
if operation.tile_description.stages > 0:
return f"{namespace_prefix}StageCount<{str(operation.tile_description.stages)}>"
else:
return f"{namespace_prefix}StageCountAutoCarveout<sizeof(typename {operation.procedural_name()}_epilogue::SharedStorage)>"
def emit(self, operation) -> str:
_LOGGER.debug("*** EmitConv3xInstance::emit")
_LOGGER.debug("*** operation: procedural_name()=" + operation.procedural_name())
# Identify the operation as CUTLASS 3 by its is_3x field
if (not hasattr(operation, 'is_3x')) or (not operation.is_3x):
raise RuntimeError("operation must be a CUTLASS 3 operation")
epi_tile_mn = "cutlass::epilogue::collective::EpilogueTileAuto"
opcode_class_main = OpcodeClassTag[operation.tile_description.math_instruction.opcode_class]
opcode_class_epi = opcode_class_main
tile_shape = operation.tile_description.tile_shape
warp_count = operation.tile_description.warp_count
epilogue_schedule = EpilogueScheduleTag[operation.epilogue_schedule]
# KernelScheduleTag and TileSchedulerTag both hard-code the
# namespace qualification of KernelScheduleAuto as
# "cutlass::gemm::collective::" (unless the tag is 'void').
#
# For TileSchedulerTag, this namespace is fine, since CUTLASS 3
# convolutions use the same tile schedulers (from the same
# cutlass::gemm::collective namespace) as GEMMs.
kernel_schedule = KernelScheduleTag[operation.kernel_schedule].replace('gemm::', 'conv::')
tile_scheduler = TileSchedulerTag[operation.tile_scheduler]
opcode_class = OpcodeClassTag[operation.tile_description.math_instruction.opcode_class]
values = {
'operation_name': operation.procedural_name(),
'conv_kind': ConvKindTag[operation.conv_kind],
'conv_kind_name': ConvKindNames[operation.conv_kind].capitalize(),
'element_a': DataTypeTag[operation.A.element],
'layout_a': LayoutTag[operation.A.layout],
'align_a': int(operation.A.alignment),
'element_b': DataTypeTag[operation.B.element],
'layout_b': LayoutTag[operation.B.layout],
'align_b': int(operation.B.alignment),
'element_c': DataTypeTag[operation.C.element],
'layout_c': LayoutTag[operation.C.layout],
'align_c': int(operation.C.alignment),
'element_d': DataTypeTag[operation.D.element],
'layout_d': LayoutTag[operation.D.layout],
'align_d': int(operation.D.alignment),
'element_accumulator': DataTypeTag[operation.accumulator_type()],
'opcode_class': opcode_class,
'arch': self.arch_number_to_type(operation.arch),
'tile_shape': self.tile_shape(operation),
'cluster_shape': self.cluster_shape(operation),
'opcode_class_epi': opcode_class_epi,
'opcode_class_main': opcode_class_main,
'epi_tile_mn': epi_tile_mn,
'stages': self.stage_count(operation),
'kernel_schedule': kernel_schedule,
'epilogue_schedule': epilogue_schedule,
'tile_scheduler': tile_scheduler,
'element_compute': DataTypeTag[operation.element_compute]
}
return Template(self.template).substitute(values)
class EmitConv3xIncludes:
def __init__(self):
_LOGGER.debug("*** EmitConv3xIncludes::__init__")
self.includes = ['conv_operation_3x.hpp',
'cutlass/conv/device/conv_universal_adapter.hpp',
'cutlass/conv/kernel/conv_universal.hpp',
'cutlass/conv/collective/collective_builder.hpp',
'cutlass/epilogue/collective/collective_builder.hpp']
def emit(self, operation) -> str:
_LOGGER.debug("*** EmitConv3xIncludes::emit")
return '\n'.join(f"#include \"{incl}\"" for incl in self.includes) + \
"\n\n///////////////////////////////////////////////////////////////////////////////////////////////////"
| python/cutlass_library/conv3x_emitter.py/0 | {
"file_path": "python/cutlass_library/conv3x_emitter.py",
"repo_id": "python",
"token_count": 3573
} | 53 |
/*
* basic.css
* ~~~~~~~~~
*
* Sphinx stylesheet -- basic theme.
*
* :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details.
*
*/
/* -- main layout ----------------------------------------------------------- */
div.clearer {
clear: both;
}
div.section::after {
display: block;
content: '';
clear: left;
}
/* -- relbar ---------------------------------------------------------------- */
div.related {
width: 100%;
font-size: 90%;
}
div.related h3 {
display: none;
}
div.related ul {
margin: 0;
padding: 0 0 0 10px;
list-style: none;
}
div.related li {
display: inline;
}
div.related li.right {
float: right;
margin-right: 5px;
}
/* -- sidebar --------------------------------------------------------------- */
div.sphinxsidebarwrapper {
padding: 10px 5px 0 10px;
}
div.sphinxsidebar {
float: left;
width: 230px;
margin-left: -100%;
font-size: 90%;
word-wrap: break-word;
overflow-wrap : break-word;
}
div.sphinxsidebar ul {
list-style: none;
}
div.sphinxsidebar ul ul,
div.sphinxsidebar ul.want-points {
margin-left: 20px;
list-style: square;
}
div.sphinxsidebar ul ul {
margin-top: 0;
margin-bottom: 0;
}
div.sphinxsidebar form {
margin-top: 10px;
}
div.sphinxsidebar input {
border: 1px solid #98dbcc;
font-family: sans-serif;
font-size: 1em;
}
div.sphinxsidebar #searchbox form.search {
overflow: hidden;
}
div.sphinxsidebar #searchbox input[type="text"] {
float: left;
width: 80%;
padding: 0.25em;
box-sizing: border-box;
}
div.sphinxsidebar #searchbox input[type="submit"] {
float: left;
width: 20%;
border-left: none;
padding: 0.25em;
box-sizing: border-box;
}
img {
border: 0;
max-width: 100%;
}
/* -- search page ----------------------------------------------------------- */
ul.search {
margin: 10px 0 0 20px;
padding: 0;
}
ul.search li {
padding: 5px 0 5px 20px;
background-image: url(file.png);
background-repeat: no-repeat;
background-position: 0 7px;
}
ul.search li a {
font-weight: bold;
}
ul.search li p.context {
color: #888;
margin: 2px 0 0 30px;
text-align: left;
}
ul.keywordmatches li.goodmatch a {
font-weight: bold;
}
/* -- index page ------------------------------------------------------------ */
table.contentstable {
width: 90%;
margin-left: auto;
margin-right: auto;
}
table.contentstable p.biglink {
line-height: 150%;
}
a.biglink {
font-size: 1.3em;
}
span.linkdescr {
font-style: italic;
padding-top: 5px;
font-size: 90%;
}
/* -- general index --------------------------------------------------------- */
table.indextable {
width: 100%;
}
table.indextable td {
text-align: left;
vertical-align: top;
}
table.indextable ul {
margin-top: 0;
margin-bottom: 0;
list-style-type: none;
}
table.indextable > tbody > tr > td > ul {
padding-left: 0em;
}
table.indextable tr.pcap {
height: 10px;
}
table.indextable tr.cap {
margin-top: 10px;
background-color: #f2f2f2;
}
img.toggler {
margin-right: 3px;
margin-top: 3px;
cursor: pointer;
}
div.modindex-jumpbox {
border-top: 1px solid #ddd;
border-bottom: 1px solid #ddd;
margin: 1em 0 1em 0;
padding: 0.4em;
}
div.genindex-jumpbox {
border-top: 1px solid #ddd;
border-bottom: 1px solid #ddd;
margin: 1em 0 1em 0;
padding: 0.4em;
}
/* -- domain module index --------------------------------------------------- */
table.modindextable td {
padding: 2px;
border-collapse: collapse;
}
/* -- general body styles --------------------------------------------------- */
div.body {
min-width: 360px;
max-width: 800px;
}
div.body p, div.body dd, div.body li, div.body blockquote {
-moz-hyphens: auto;
-ms-hyphens: auto;
-webkit-hyphens: auto;
hyphens: auto;
}
a.headerlink {
visibility: hidden;
}
h1:hover > a.headerlink,
h2:hover > a.headerlink,
h3:hover > a.headerlink,
h4:hover > a.headerlink,
h5:hover > a.headerlink,
h6:hover > a.headerlink,
dt:hover > a.headerlink,
caption:hover > a.headerlink,
p.caption:hover > a.headerlink,
div.code-block-caption:hover > a.headerlink {
visibility: visible;
}
div.body p.caption {
text-align: inherit;
}
div.body td {
text-align: left;
}
.first {
margin-top: 0 !important;
}
p.rubric {
margin-top: 30px;
font-weight: bold;
}
img.align-left, figure.align-left, .figure.align-left, object.align-left {
clear: left;
float: left;
margin-right: 1em;
}
img.align-right, figure.align-right, .figure.align-right, object.align-right {
clear: right;
float: right;
margin-left: 1em;
}
img.align-center, figure.align-center, .figure.align-center, object.align-center {
display: block;
margin-left: auto;
margin-right: auto;
}
img.align-default, figure.align-default, .figure.align-default {
display: block;
margin-left: auto;
margin-right: auto;
}
.align-left {
text-align: left;
}
.align-center {
text-align: center;
}
.align-default {
text-align: center;
}
.align-right {
text-align: right;
}
/* -- sidebars -------------------------------------------------------------- */
div.sidebar,
aside.sidebar {
margin: 0 0 0.5em 1em;
border: 1px solid #ddb;
padding: 7px;
background-color: #ffe;
width: 40%;
float: right;
clear: right;
overflow-x: auto;
}
p.sidebar-title {
font-weight: bold;
}
nav.contents,
aside.topic,
div.admonition, div.topic, blockquote {
clear: left;
}
/* -- topics ---------------------------------------------------------------- */
nav.contents,
aside.topic,
div.topic {
border: 1px solid #ccc;
padding: 7px;
margin: 10px 0 10px 0;
}
p.topic-title {
font-size: 1.1em;
font-weight: bold;
margin-top: 10px;
}
/* -- admonitions ----------------------------------------------------------- */
div.admonition {
margin-top: 10px;
margin-bottom: 10px;
padding: 7px;
}
div.admonition dt {
font-weight: bold;
}
p.admonition-title {
margin: 0px 10px 5px 0px;
font-weight: bold;
}
div.body p.centered {
text-align: center;
margin-top: 25px;
}
/* -- content of sidebars/topics/admonitions -------------------------------- */
div.sidebar > :last-child,
aside.sidebar > :last-child,
nav.contents > :last-child,
aside.topic > :last-child,
div.topic > :last-child,
div.admonition > :last-child {
margin-bottom: 0;
}
div.sidebar::after,
aside.sidebar::after,
nav.contents::after,
aside.topic::after,
div.topic::after,
div.admonition::after,
blockquote::after {
display: block;
content: '';
clear: both;
}
/* -- tables ---------------------------------------------------------------- */
table.docutils {
margin-top: 10px;
margin-bottom: 10px;
border: 0;
border-collapse: collapse;
}
table.align-center {
margin-left: auto;
margin-right: auto;
}
table.align-default {
margin-left: auto;
margin-right: auto;
}
table caption span.caption-number {
font-style: italic;
}
table caption span.caption-text {
}
table.docutils td, table.docutils th {
padding: 1px 8px 1px 5px;
border-top: 0;
border-left: 0;
border-right: 0;
border-bottom: 1px solid #aaa;
}
th {
text-align: left;
padding-right: 5px;
}
table.citation {
border-left: solid 1px gray;
margin-left: 1px;
}
table.citation td {
border-bottom: none;
}
th > :first-child,
td > :first-child {
margin-top: 0px;
}
th > :last-child,
td > :last-child {
margin-bottom: 0px;
}
/* -- figures --------------------------------------------------------------- */
div.figure, figure {
margin: 0.5em;
padding: 0.5em;
}
div.figure p.caption, figcaption {
padding: 0.3em;
}
div.figure p.caption span.caption-number,
figcaption span.caption-number {
font-style: italic;
}
div.figure p.caption span.caption-text,
figcaption span.caption-text {
}
/* -- field list styles ----------------------------------------------------- */
table.field-list td, table.field-list th {
border: 0 !important;
}
.field-list ul {
margin: 0;
padding-left: 1em;
}
.field-list p {
margin: 0;
}
.field-name {
-moz-hyphens: manual;
-ms-hyphens: manual;
-webkit-hyphens: manual;
hyphens: manual;
}
/* -- hlist styles ---------------------------------------------------------- */
table.hlist {
margin: 1em 0;
}
table.hlist td {
vertical-align: top;
}
/* -- object description styles --------------------------------------------- */
.sig {
font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace;
}
.sig-name, code.descname {
background-color: transparent;
font-weight: bold;
}
.sig-name {
font-size: 1.1em;
}
code.descname {
font-size: 1.2em;
}
.sig-prename, code.descclassname {
background-color: transparent;
}
.optional {
font-size: 1.3em;
}
.sig-paren {
font-size: larger;
}
.sig-param.n {
font-style: italic;
}
/* C++ specific styling */
.sig-inline.c-texpr,
.sig-inline.cpp-texpr {
font-family: unset;
}
.sig.c .k, .sig.c .kt,
.sig.cpp .k, .sig.cpp .kt {
color: #0033B3;
}
.sig.c .m,
.sig.cpp .m {
color: #1750EB;
}
.sig.c .s, .sig.c .sc,
.sig.cpp .s, .sig.cpp .sc {
color: #067D17;
}
/* -- other body styles ----------------------------------------------------- */
ol.arabic {
list-style: decimal;
}
ol.loweralpha {
list-style: lower-alpha;
}
ol.upperalpha {
list-style: upper-alpha;
}
ol.lowerroman {
list-style: lower-roman;
}
ol.upperroman {
list-style: upper-roman;
}
:not(li) > ol > li:first-child > :first-child,
:not(li) > ul > li:first-child > :first-child {
margin-top: 0px;
}
:not(li) > ol > li:last-child > :last-child,
:not(li) > ul > li:last-child > :last-child {
margin-bottom: 0px;
}
ol.simple ol p,
ol.simple ul p,
ul.simple ol p,
ul.simple ul p {
margin-top: 0;
}
ol.simple > li:not(:first-child) > p,
ul.simple > li:not(:first-child) > p {
margin-top: 0;
}
ol.simple p,
ul.simple p {
margin-bottom: 0;
}
aside.footnote > span,
div.citation > span {
float: left;
}
aside.footnote > span:last-of-type,
div.citation > span:last-of-type {
padding-right: 0.5em;
}
aside.footnote > p {
margin-left: 2em;
}
div.citation > p {
margin-left: 4em;
}
aside.footnote > p:last-of-type,
div.citation > p:last-of-type {
margin-bottom: 0em;
}
aside.footnote > p:last-of-type:after,
div.citation > p:last-of-type:after {
content: "";
clear: both;
}
dl.field-list {
display: grid;
grid-template-columns: fit-content(30%) auto;
}
dl.field-list > dt {
font-weight: bold;
word-break: break-word;
padding-left: 0.5em;
padding-right: 5px;
}
dl.field-list > dd {
padding-left: 0.5em;
margin-top: 0em;
margin-left: 0em;
margin-bottom: 0em;
}
dl {
margin-bottom: 15px;
}
dd > :first-child {
margin-top: 0px;
}
dd ul, dd table {
margin-bottom: 10px;
}
dd {
margin-top: 3px;
margin-bottom: 10px;
margin-left: 30px;
}
dl > dd:last-child,
dl > dd:last-child > :last-child {
margin-bottom: 0;
}
dt:target, span.highlighted {
background-color: #fbe54e;
}
rect.highlighted {
fill: #fbe54e;
}
dl.glossary dt {
font-weight: bold;
font-size: 1.1em;
}
.versionmodified {
font-style: italic;
}
.system-message {
background-color: #fda;
padding: 5px;
border: 3px solid red;
}
.footnote:target {
background-color: #ffa;
}
.line-block {
display: block;
margin-top: 1em;
margin-bottom: 1em;
}
.line-block .line-block {
margin-top: 0;
margin-bottom: 0;
margin-left: 1.5em;
}
.guilabel, .menuselection {
font-family: sans-serif;
}
.accelerator {
text-decoration: underline;
}
.classifier {
font-style: oblique;
}
.classifier:before {
font-style: normal;
margin: 0 0.5em;
content: ":";
display: inline-block;
}
abbr, acronym {
border-bottom: dotted 1px;
cursor: help;
}
/* -- code displays --------------------------------------------------------- */
pre {
overflow: auto;
overflow-y: hidden; /* fixes display issues on Chrome browsers */
}
pre, div[class*="highlight-"] {
clear: both;
}
span.pre {
-moz-hyphens: none;
-ms-hyphens: none;
-webkit-hyphens: none;
hyphens: none;
white-space: nowrap;
}
div[class*="highlight-"] {
margin: 1em 0;
}
td.linenos pre {
border: 0;
background-color: transparent;
color: #aaa;
}
table.highlighttable {
display: block;
}
table.highlighttable tbody {
display: block;
}
table.highlighttable tr {
display: flex;
}
table.highlighttable td {
margin: 0;
padding: 0;
}
table.highlighttable td.linenos {
padding-right: 0.5em;
}
table.highlighttable td.code {
flex: 1;
overflow: hidden;
}
.highlight .hll {
display: block;
}
div.highlight pre,
table.highlighttable pre {
margin: 0;
}
div.code-block-caption + div {
margin-top: 0;
}
div.code-block-caption {
margin-top: 1em;
padding: 2px 5px;
font-size: small;
}
div.code-block-caption code {
background-color: transparent;
}
table.highlighttable td.linenos,
span.linenos,
div.highlight span.gp { /* gp: Generic.Prompt */
user-select: none;
-webkit-user-select: text; /* Safari fallback only */
-webkit-user-select: none; /* Chrome/Safari */
-moz-user-select: none; /* Firefox */
-ms-user-select: none; /* IE10+ */
}
div.code-block-caption span.caption-number {
padding: 0.1em 0.3em;
font-style: italic;
}
div.code-block-caption span.caption-text {
}
div.literal-block-wrapper {
margin: 1em 0;
}
code.xref, a code {
background-color: transparent;
font-weight: bold;
}
h1 code, h2 code, h3 code, h4 code, h5 code, h6 code {
background-color: transparent;
}
.viewcode-link {
float: right;
}
.viewcode-back {
float: right;
font-family: sans-serif;
}
div.viewcode-block:target {
margin: -1px -10px;
padding: 0 10px;
}
/* -- math display ---------------------------------------------------------- */
img.math {
vertical-align: middle;
}
div.body div.math p {
text-align: center;
}
span.eqno {
float: right;
}
span.eqno a.headerlink {
position: absolute;
z-index: 1;
}
div.math:hover a.headerlink {
visibility: visible;
}
/* -- printout stylesheet --------------------------------------------------- */
@media print {
div.document,
div.documentwrapper,
div.bodywrapper {
margin: 0 !important;
width: 100%;
}
div.sphinxsidebar,
div.related,
div.footer,
#top-link {
display: none;
}
} | python/docs/_static/basic.css/0 | {
"file_path": "python/docs/_static/basic.css",
"repo_id": "python",
"token_count": 6093
} | 54 |
Operations
==========
GEMM
----
.. automodule:: cutlass.op.gemm
:members:
:undoc-members:
:show-inheritance:
Grouped GEMM
------------
.. automodule:: cutlass.op.gemm_grouped
:members:
:undoc-members:
:show-inheritance:
Operation
---------
.. automodule:: cutlass.op.op
:members:
:undoc-members:
:show-inheritance:
| python/docs_src/source/cutlass.op.rst/0 | {
"file_path": "python/docs_src/source/cutlass.op.rst",
"repo_id": "python",
"token_count": 146
} | 55 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
High-level tests for running batched GEMMs
"""
from functools import partial
import logging
from math import prod
import unittest
import cutlass
from cutlass.backend.utils.device import device_cc
import torch
from utils import LayoutCombination
cutlass.set_log_level(logging.WARNING)
torch.manual_seed(2023)
def pytorch_reference(A, B, C, alpha, beta):
# Get the batch count. Assume that any of A, B, and C
# with a batch dimension ahve matching batch count. Thus,
# we break out of the loop once we have found the first
# tensor containing a batch dimension.
batch_count = (1,)
for tensor in [A, B, C]:
if len(tensor.shape) > 2:
batch_count = tensor.shape[:-2]
break
int_batch_count = prod(batch_count)
def add_batch(tensor):
if len(tensor.shape) == 2:
return tensor.unsqueeze(0).repeat(int_batch_count, 1, 1)
else:
return tensor.reshape(-1, tensor.size(-2), tensor.size(-1))
# Reshape tensors to have batch dimension
A = add_batch(A)
B = add_batch(B)
C = add_batch(C)
ret = (torch.bmm(A, B) * alpha) + (C * beta)
reshape_vals = batch_count + C.shape[-2:]
return ret.reshape(*reshape_vals)
def initialize(rows, cols, batch):
tensor = torch.randint(-3, 3, size=(rows*cols*prod(batch),), device='cuda').half()
if len(batch) > 0 and prod(batch) > 1:
reshape_vals = batch + (rows, cols)
return tensor.reshape(*reshape_vals)
else:
return tensor.reshape(rows, cols)
class GemmF16Batched(unittest.TestCase):
def run_batched(self, batch_count: tuple, batch_A: bool, batch_B: bool, batch_C: bool):
M = 512
N = 256
K = 128
alpha = 1.
beta = 2.
A = initialize(M, K, batch_count if batch_A else (1,))
B = initialize(K, N, batch_count if batch_B else (1,))
C = initialize(M, N, batch_count if batch_C else (1,))
D = initialize(M, N, batch_count)
plan = cutlass.op.Gemm(A=A, B=B, C=C, D=D, element_accumulator=cutlass.DataType.f32)
plan.run(A, B, C, D, alpha, beta)
reference = pytorch_reference(A, B, C, alpha, beta)
assert reference.equal(D)
def test_batched_ABC(self):
self.run_batched((3,), True, True, True)
self.run_batched((2, 3), True, True, True)
def test_batched_AB(self):
self.run_batched((3,), True, True, False)
self.run_batched((2, 3), True, True, False)
def test_batched_AC(self):
self.run_batched((3,), True, False, True)
self.run_batched((2, 3), True, False, True)
def test_batched_BC(self):
self.run_batched((3,), False, True, True)
self.run_batched((2, 3), False, True, True)
def test_batched_A(self):
self.run_batched((3,), True, False, False)
self.run_batched((2, 3), True, False, False)
def test_batched_B(self):
self.run_batched((3,), False, True, False)
self.run_batched((2, 3), False, True, False)
if __name__ == '__main__':
unittest.main()
| test/python/cutlass/gemm/gemm_batched.py/0 | {
"file_path": "test/python/cutlass/gemm/gemm_batched.py",
"repo_id": "test",
"token_count": 1824
} | 56 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Tests the high-level GEMM interface
"""
from math import ceil
import unittest
import cutlass
import cutlass.utils.datatypes as datatypes
from cutlass.backend.utils.device import device_cc
from utils import ExpectException
class GemmEquivalence:
"""
Helper class for testing the equivalence of different constructions of the Gemm interface
"""
def __init__(self, element_A, element_B, element_C, element_D, element_accumulator,
layout_A, layout_B, layout_C, alignment_A, alignment_B, alignment_C):
self.element_A = element_A
self.element_B = element_B
self.element_C = element_C
self.element_D = element_D
self.element_accumulator = element_accumulator
self.layout_A = layout_A
self.layout_B = layout_B
self.layout_C = layout_C
self.alignment_A = alignment_A
self.alignment_B = alignment_B
self.alignment_C = alignment_C
self.plan = cutlass.op.Gemm(element_A=element_A, element_B=element_B, element_C=element_C,
element_D=element_D, element_accumulator=element_accumulator,
layout_A=layout_A, layout_B=layout_B, layout_C=layout_C)
self.op = self.plan.construct(alignment_A=alignment_A, alignment_B=alignment_B, alignment_C=alignment_C)
def _plans_equal(self, other_plan) -> bool:
"""
Compares whether two plans are equal
:param other_plan: plan to compare against the default GEMM
:type other_plan: cutlass.op.Gemm
:return: whether `other_plan` is equivalent to `self.plan`
:rtype: bool
"""
other_op = other_plan.construct(alignment_A=self.alignment_A, alignment_B=self.alignment_B, alignment_C=self.alignment_C)
# Compare whether the operations are equal by comparing the C++ code that would be emitted for them
return self.op.rt_module.emit() == other_op.rt_module.emit()
def generic_test(self):
"""
Tests the equivalence of various constructions of the Gemm interface when using CUTLASS data types
and layouts for constructing the Gemm interface
"""
if not datatypes.is_numpy_available():
return
# Test when specifying all parameters
plan_other = cutlass.op.Gemm(element_A=self.element_A, element_B=self.element_B, element_C=self.element_C,
element_D=self.element_D, element_accumulator=self.element_accumulator,
layout_A=self.layout_A, layout_B=self.layout_B, layout_C=self.layout_C)
assert self._plans_equal(plan_other)
# Test when specifying all parameters but A
plan_other = cutlass.op.Gemm(element_B=self.element_B, element_C=self.element_C,
element_D=self.element_D, element_accumulator=self.element_accumulator,
layout_B=self.layout_B, layout_C=self.layout_C,
element=self.element_A, layout=self.layout_A)
assert self._plans_equal(plan_other)
# Test when specifying all parameters but A and B as tensors and using generic element and output
# Only run this test if the layouts and types for A and B are equal.
if self.element_A == self.element_B and self.layout_A == self.layout_B:
plan_other = cutlass.op.Gemm(element_C=self.element_C, element_D=self.element_D, element_accumulator=self.element_accumulator,
layout_C=self.layout_C, element=self.element_A, layout=self.layout_A)
assert self._plans_equal(plan_other)
# Test without explicit accumulator. Only run if the type of C and the accumulator.
if self.element_C == self.element_accumulator:
plan_other = cutlass.op.Gemm(element_A=self.element_A, element_B=self.element_B, element_C=self.element_C,
element_D=self.element_D, layout_A=self.layout_A, layout_B=self.layout_B,
layout_C=self.layout_C)
assert self._plans_equal(plan_other)
# Test with only the generic types and layouts. Only run if types and layouts of A, B, C, and D are the same.
if (self.element_A == self.element_B and self.element_A == self.element_C and self.element_A == self.element_D
and self.element_A == self.element_accumulator and
self.layout_A == self.layout_B and self.layout_A == self.layout_C):
plan_other = cutlass.op.Gemm(element=self.element_A, layout=self.layout_A)
assert self._plans_equal(plan_other)
def numpy_test(self):
"""
Tests the equivalence of various constructions of the Gemm interface when using numpy as a frontend
"""
if not datatypes.is_numpy_available():
return
import numpy as np
type_A = datatypes.numpy_type(self.element_A)
type_B = datatypes.numpy_type(self.element_B)
type_C = datatypes.numpy_type(self.element_C)
type_D = datatypes.numpy_type(self.element_D)
type_accum = datatypes.numpy_type(self.element_accumulator)
layout_to_order = {
cutlass.LayoutType.RowMajor: 'C',
cutlass.LayoutType.ColumnMajor: 'F'
}
size = (2, 2)
A = np.zeros(size, order=layout_to_order[self.layout_A], dtype=type_A)
B = np.zeros(size, order=layout_to_order[self.layout_B], dtype=type_B)
C = np.zeros(size, order=layout_to_order[self.layout_C], dtype=type_C)
D = np.zeros(size, order=layout_to_order[self.layout_C], dtype=type_D)
# Test when specifying all parameters via tensors
plan_np = cutlass.op.Gemm(A=A, B=B, C=C, D=D, element_accumulator=type_accum)
assert self._plans_equal(plan_np)
# Test when specifying all parameters but A as tensors
plan_np = cutlass.op.Gemm(B=B, C=C, D=D, element_accumulator=type_accum, element_A=type_A, layout_A=self.layout_A)
assert self._plans_equal(plan_np)
# Test when specifying all parameters but A and B as tensors and using generic element and output
# Only run this test if the layouts and types for A and B are equal.
if type_A == type_B and self.layout_A == self.layout_B:
plan_np = cutlass.op.Gemm(C=C, D=D, element_accumulator=type_accum, element=type_A, layout=self.layout_A)
assert self._plans_equal(plan_np)
# Test without explicit accumulator. Only run if the type of C and the accumulator.
if type_C == type_accum:
plan_np = cutlass.op.Gemm(A=A, B=B, C=C, D=D)
assert self._plans_equal(plan_np)
# Test with only the generic types and layouts. Only run if types and layouts of A, B, C, and D are the same.
if (type_A == type_B and type_A == type_C and type_A == type_D and type_A == type_accum and
self.layout_A == self.layout_B and self.layout_A == self.layout_C):
plan_np = cutlass.op.Gemm(element=type_A, layout=self.layout_A)
assert self._plans_equal(plan_np)
def test_all(self):
"""
Runs all tests on the Gemm interface
"""
self.generic_test()
self.numpy_test()
class GemmEquivalenceTest(unittest.TestCase):
"""
Tests the equivalence of different constructions of the Gemm interface
"""
@unittest.skipIf(device_cc() < 70, "Device compute capability is insufficient for FP16 Tensor Core tests.")
def test_gemm_equivalence_f16_f16_f16_f16_f16_ttt_8_8_8(self):
gemm_eq = GemmEquivalence(
element_A=cutlass.DataType.f16, element_B=cutlass.DataType.f16, element_C=cutlass.DataType.f16,
element_D=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f16,
layout_A=cutlass.LayoutType.RowMajor, layout_B=cutlass.LayoutType.RowMajor, layout_C=cutlass.LayoutType.RowMajor,
alignment_A=8, alignment_B=8, alignment_C=8)
gemm_eq.test_all()
@unittest.skipIf(device_cc() < 70, "Device compute capability is insufficient for FP16 Tensor Core tests.")
def test_gemm_equivalence_f16_f16_f16_f16_f32_ntn_8_8_8(self):
gemm_eq = GemmEquivalence(
element_A=cutlass.DataType.f16, element_B=cutlass.DataType.f16, element_C=cutlass.DataType.f16,
element_D=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32,
layout_A=cutlass.LayoutType.ColumnMajor, layout_B=cutlass.LayoutType.RowMajor, layout_C=cutlass.LayoutType.ColumnMajor,
alignment_A=8, alignment_B=8, alignment_C=8)
gemm_eq.test_all()
@unittest.skipIf(device_cc() < 70, "Device compute capability is insufficient for FP16 Tensor Core tests.")
def test_gemm_equivalence_f16_f16_f16_f16_f16_ttt_4_4_4(self):
gemm_eq = GemmEquivalence(
element_A=cutlass.DataType.f16, element_B=cutlass.DataType.f16, element_C=cutlass.DataType.f16,
element_D=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f16,
layout_A=cutlass.LayoutType.RowMajor, layout_B=cutlass.LayoutType.RowMajor, layout_C=cutlass.LayoutType.RowMajor,
alignment_A=8, alignment_B=8, alignment_C=8)
gemm_eq.test_all()
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for F64 Tensor Core tests.")
def test_gemm_equivalence_f64_f64_f64_f64_f64_tnt_1_1_1(self):
gemm_eq = GemmEquivalence(
element_A=cutlass.DataType.f64, element_B=cutlass.DataType.f64, element_C=cutlass.DataType.f64,
element_D=cutlass.DataType.f64, element_accumulator=cutlass.DataType.f64,
layout_A=cutlass.LayoutType.RowMajor, layout_B=cutlass.LayoutType.ColumnMajor, layout_C=cutlass.LayoutType.RowMajor,
alignment_A=1, alignment_B=1, alignment_C=1)
gemm_eq.test_all()
class GemmErrorTests(unittest.TestCase):
"""
Tests various error scenarios that arise with the high-level Gemm interface
"""
def test_alignment(self):
"""
Tests case in which the alignment specified is unsupported
"""
plan = cutlass.op.Gemm(element=cutlass.DataType.f16, layout=cutlass.LayoutType.RowMajor)
with ExpectException(True, 'Alignment 16 is not supported for F16. The construction should fail.'):
op = plan.construct(alignment_A=16, alignment_B=16, alignment_C=16)
def test_tensorop_availability(self):
"""
Tests case in which only SIMT operations are available but TensorOp is requested
"""
cc = device_cc()
# F64 Tensor Core operations are only avaiable on devices with CC >= 80
supports_tensorop_f64 = cc >= 80
plan = cutlass.op.Gemm(cc=cc, element=cutlass.DataType.f64, layout=cutlass.LayoutType.RowMajor)
error_msg = f'Incorrectly raised an exception for availability of TensorOp with F64 operands on SM{cc}'
with ExpectException(not supports_tensorop_f64, error_msg):
plan.opclass = cutlass.OpcodeClass.TensorOp
expected_opclass = cutlass.OpcodeClass.TensorOp if supports_tensorop_f64 else cutlass.OpcodeClass.Simt
assert plan.opclass == expected_opclass, f'Expected opclass to be {expected_opclass}, but received {plan.opclass} for SM{cc}'
@unittest.skipIf(device_cc() < 70, "Device compute capability is insufficient for F16 Tensor Core tests.")
def test_opclass_switch(self):
"""
Tests cases in which the opcode class in question is switched (e.g., from TensorOp to SIMT)
"""
plan = cutlass.op.Gemm( element=cutlass.DataType.f16, layout=cutlass.LayoutType.RowMajor)
assert plan.opclass == cutlass.OpcodeClass.TensorOp
# Ensure that all tile descriptions have opclass of TensorOp
for td in plan.tile_descriptions():
assert td.math_instruction.opcode_class == cutlass.OpcodeClass.TensorOp
plan.opclass = cutlass.OpcodeClass.Simt
# Ensure that all tile descriptions have opclass of Simt
for td in plan.tile_descriptions():
assert td.math_instruction.opcode_class == cutlass.OpcodeClass.Simt
def test_invalid_tile_description(self):
"""
Tests scenarios in which an invalid tile description is provided for a given CC
"""
cc = device_cc()
plan = cutlass.op.Gemm(cc=cc, element=cutlass.DataType.f16, layout=cutlass.LayoutType.RowMajor)
td = plan.tile_descriptions()[0]
stages = td.stages
# Zero stage count is valid for SM90+, as this is used to indicate that the builder's auto stage
# count should be used
with ExpectException(cc < 90, f'Requested zero stages'):
td.stages = 0
plan.construct(td)
if cc < 90:
with ExpectException(cc < 80, f'Requested more than 2 stages on SM{cc}'):
td.stages = 3
plan.construct(td)
else:
original_kschedule = td.kernel_schedule
original_eschedule = td.epilogue_schedule
with ExpectException(False, f'Incorrectly flagged an error for insufficient shared memory'):
td.kernel_schedule = cutlass.KernelScheduleType.TmaWarpSpecializedPingpong
td.epilogue_schedule = cutlass.EpilogueScheduleType.NoSmemWarpSpecialized
td.stages = 3
plan.construct(td)
# Reset schedules
td.kernel_schedule = original_kschedule
td.epilogue_schedule = original_eschedule
with ExpectException(True, f'Requested too many stages'):
td.stages = 100
plan.construct(td)
# Reset stage count
td.stages = stages
cluster_shape = td.cluster_shape
with ExpectException(cc < 90, f'Requested non-unit cluster shape on SM{cc}'):
td.cluster_shape = [2, 1, 1]
plan.construct(td)
# Reset cluster shape
td.cluster_shape = cluster_shape
with ExpectException(cc < 90, f'Requested a non-auto schedule on SM{cc}'):
td.kernel_schedule = cutlass.KernelScheduleType.TmaWarpSpecializedPingpong
td.epilogue_schedule = cutlass.EpilogueScheduleType.TmaWarpSpecialized
plan.construct(td)
with ExpectException(True, f'Requested a non-auto kernel schedule with an auto epilogue schedule'):
td.kernel_schedule = cutlass.KernelScheduleType.TmaWarpSpecializedPingpong
td.epilogue_schedule = cutlass.EpilogueScheduleType.ScheduleAuto
plan.construct(td)
with ExpectException(True, f'Requested an auto kernel schedule with a non-auto epilogue schedule'):
td.kernel_schedule = cutlass.KernelScheduleType.ScheduleAuto
td.epilogue_schedule = cutlass.EpilogueScheduleType.TmaWarpSpecialized
plan.construct(td)
with ExpectException(cc < 90, f'Requested a tile scheduler on SM{cc}'):
td.kernel_schedule = cutlass.KernelScheduleType.TmaWarpSpecializedCooperative
td.epilogue_schedule = cutlass.EpilogueScheduleType.TmaWarpSpecializedCooperative
td.tile_scheduler = cutlass.TileSchedulerType.StreamK
plan.construct(td)
# Ensure that all returned tile descriptions are unique
ops = {}
for i, td in enumerate(plan.tile_descriptions()):
op = plan.construct(td)
code_str = op.rt_module.emit()
if code_str in ops:
conflicting_td = ops[code_str]
assert False, f'Multiple tile descriptions emitted {code_str}\nTile descriptions are:\n{td}\n{conflicting_td}'
if __name__ == '__main__':
unittest.main()
| test/python/cutlass/interface/gemm_interface.py/0 | {
"file_path": "test/python/cutlass/interface/gemm_interface.py",
"repo_id": "test",
"token_count": 7400
} | 57 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Helper to construct cached name for
*/
#pragma once
#include <typeinfo>
#include <fstream>
#include <list>
#include <utility>
#include <sstream>
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/conv3d_problem_size.h"
#include "cutlass/core_io.h"
#include "cutlass/util/tensor_view_io.h"
#include "thrust/universal_vector.h"
#ifndef CUTLASS_TEST_ENABLE_CACHED_RESULTS
#define CUTLASS_TEST_ENABLE_CACHED_RESULTS false
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test::conv::device {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Result of a test
struct CachedTestKey {
std::string op; ///< Concatenated string representation of operation performed
std::string problem; ///< Concatenated string representation of problem description
std::string types; ///< Concatenated string representation of operand types
uint32_t A; ///< Hashed result of tensor A
uint32_t B; ///< Hashed result of tensor B
uint32_t C; ///< Hashed result of tensor C
//
// Methods
//
inline CachedTestKey(): A(), B(), C() { }
inline CachedTestKey(
std::string op, ///< Concatenated string representation of operation performed
std::string problem, ///< Concatenated string representation of problem description
std::string types, ///< Concatenated string representation of operand types
uint32_t A, ///< Hashed result of tensor A
uint32_t B, ///< Hashed result of tensor B
uint32_t C ///< Hashed result of tensor C
):
op(op), problem(problem), types(types), A(A), B(B), C(C)
{ }
/// Checks for equality of the problem
bool operator==(CachedTestKey const &rhs) const {
return op == rhs.op && problem == rhs.problem && types == rhs.types && A == rhs.A && B == rhs.B && C == rhs.C;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
inline std::istream &operator>>(std::istream &in, CachedTestKey &result) {
in >> result.op;
in >> result.problem;
in >> result.types;
in >> result.A;
in >> result.B;
in >> result.C;
return in;
}
inline std::ostream &operator<<(std::ostream &out, CachedTestKey const &result) {
out << result.op << " ";
out << result.problem << " ";
out << result.types << " ";
out << result.A << " ";
out << result.B << " ";
out << result.C << " ";
return out;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
struct CachedTestResult {
uint32_t D;
//
// Methods
//
CachedTestResult(): D()
{ }
CachedTestResult(uint32_t D): D(D)
{ }
operator bool() const {
return bool(D);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
inline std::istream &operator>>(std::istream &in, CachedTestResult &result) {
in >> result.D;
return in;
}
inline std::ostream &operator<<(std::ostream &out, CachedTestResult const &result) {
out << result.D;
return out;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
struct CachedTestResultListing {
std::list<std::pair<CachedTestKey, CachedTestResult>> results;
//
// Methods
//
inline CachedTestResultListing(std::string const &path) {
std::ifstream file(path);
while (file.good()) {
CachedTestKey key;
file >> key;
CachedTestResult result;
file >> result;
if (result) {
results.push_back(std::make_pair(key, result));
}
}
}
/// Returns the cached result
std::pair<bool, CachedTestResult> find(CachedTestKey const &rhs) const {
for (auto const & result : results) {
if (result.first == rhs) {
return std::make_pair(true, result.second);
}
}
return std::make_pair(false, CachedTestResult());
}
/// Appends an entry
void append(CachedTestKey const &key, CachedTestResult const &result) {
if (result) {
results.push_back(std::make_pair(key, result));
}
}
/// Writes the entire listing to a file
bool write(std::string const &path) {
std::ofstream file(path);
if (!file.good()) {
return false;
}
for (auto const &result : results) {
file << result.first << result.second << std::endl;
}
return true;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Element>
struct ScalarEncoder {
Element scalar;
ScalarEncoder(Element s): scalar(s) { }
std::string str() const {
std::stringstream ss;
Element s = scalar;
if (s < Element()) {
s = -s;
ss << "n";
}
ss << s;
return ss.str();
}
};
template <typename Element>
ScalarEncoder<Element> EncodeScalar(Element a) {
return ScalarEncoder<Element>(a);
}
template <typename Element>
struct ScalarEncoder<cutlass::complex<Element>> {
cutlass::complex<Element> scalar;
ScalarEncoder(cutlass::complex<Element> s): scalar(s) { }
std::string str() const {
std::stringstream ss;
ss << EncodeScalar<Element>(scalar.real()) << "_" << EncodeScalar<Element>(scalar.imag()) << "i";
return ss.str();
}
};
template <typename Element>
std::ostream &operator<<(std::ostream &out, ScalarEncoder<Element> const &scalar) {
out << scalar.str();
return out;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
inline char const *EncodeOperator(cutlass::conv::Operator conv_op) {
switch (conv_op) {
case cutlass::conv::Operator::kFprop: return "fprop";
case cutlass::conv::Operator::kDgrad: return "dgrad";
case cutlass::conv::Operator::kWgrad: return "wgrad";
case cutlass::conv::Operator::kDeconv: return "deconv";
}
return "conv_unknown";
}
/////////////////////////////////////////////////////////////////////////////////////////////////
// Encode GemmCoord (Gemm problem size)
inline std::ostream &EncodeProblemSize(
std::ostream &out,
cutlass::gemm::GemmCoord const &problem) {
out << problem.m() << "x" << problem.n() << "x" << problem.k() << "_";
return out;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
// Encode Conv2dProblemSize
inline std::ostream &EncodeProblemSize(
std::ostream &out,
cutlass::conv::Conv2dProblemSize const &problem) {
out << problem.N << "x" << problem.H << "x" << problem.W << "x" << problem.C << "_"
<< problem.P << "x" << problem.Q << "_" << problem.K << "x" << problem.R << "x" << problem.S << "_";
out << "pad_h" << problem.pad_h << "w" << problem.pad_w << "_";
out << "stride_h" << problem.stride_h << "w" << problem.stride_w << "_";
out << "dil_h" << problem.dilation_h << "w" << problem.dilation_w << "_";
switch (problem.mode) {
case cutlass::conv::Mode::kCrossCorrelation:
out << "corr";
break;
case cutlass::conv::Mode::kConvolution:
out << "conv";
break;
}
return out;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
// Encode Conv3dProblemSize
inline std::ostream &EncodeProblemSize(
std::ostream &out,
cutlass::conv::Conv3dProblemSize const &problem) {
out << problem.N << "x" << problem.D << "x" << problem.H << "x" << problem.W << "x" << problem.C << "_"
<< problem.Z << problem.P << "x" << problem.Q << "_" << problem.K << "x" << problem.R << "x" << problem.S << "_";
out << "pad_d" << problem.pad_h << "h" << problem.pad_h << "w" << problem.pad_w << "_";
out << "stride_d" << problem.stride_d << "h" << problem.stride_h << "w" << problem.stride_w << "_";
out << "dil_d" << problem.dilation_d << "h" << problem.dilation_h << "w" << problem.dilation_w << "_";
switch (problem.mode) {
case cutlass::conv::Mode::kCrossCorrelation:
out << "corr";
break;
case cutlass::conv::Mode::kConvolution:
out << "conv";
break;
}
return out;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
// Encode 3.x ConvNd ProblemShape
template <class ProblemShape>
inline std::ostream &EncodeProblemSize(
std::ostream &out,
ProblemShape const& problem_shape) {
out << problem_shape.shape_A << "_";
out << problem_shape.shape_B << "_";
out << "padl" << problem_shape.lower_padding << "_";
out << "padu" << problem_shape.upper_padding << "_";
out << "str" << problem_shape.traversal_stride << "_";
out << "dil" << problem_shape.dilation << "_";
switch (problem_shape.mode) {
case cutlass::conv::Mode::kCrossCorrelation:
out << "corr";
break;
case cutlass::conv::Mode::kConvolution:
out << "conv";
break;
}
return out;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Element>
inline std::string ElementTypeName() {
return std::string(typeid(Element).name());
}
template <>
inline std::string ElementTypeName<cutlass::half_t>() {
return "h";
}
template <>
inline std::string ElementTypeName<cutlass::complex<cutlass::half_t>>() {
return "ch";
}
template <>
inline std::string ElementTypeName<cutlass::bfloat16_t>() {
return "bf16";
}
template <>
inline std::string ElementTypeName<cutlass::complex<cutlass::bfloat16_t>>() {
return "cbf16";
}
template <>
inline std::string ElementTypeName<cutlass::tfloat32_t>() {
return "tf32";
}
template <>
inline std::string ElementTypeName<cutlass::complex<cutlass::tfloat32_t>>() {
return "ctf32";
}
template <>
inline std::string ElementTypeName<cutlass::complex<float>>() {
return "c";
}
template <>
inline std::string ElementTypeName<cutlass::complex<double>>() {
return "z";
}
template <>
inline std::string ElementTypeName<cutlass::Quaternion<float>>() {
return "q";
}
template <>
inline std::string ElementTypeName<int8_t>() {
return "s8";
}
template <>
inline std::string ElementTypeName<uint8_t>() {
return "u8";
}
template <>
inline std::string ElementTypeName<cutlass::int4b_t>() {
return "s4";
}
template <>
inline std::string ElementTypeName<cutlass::uint4b_t>() {
return "u4";
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Layout>
inline std::string LayoutTypeName() {
return std::string(typeid(Layout).name());
}
template <>
inline std::string LayoutTypeName<cutlass::layout::ColumnMajor>() {
return "n";
}
template <>
inline std::string LayoutTypeName<cutlass::layout::RowMajor>() {
return "t";
}
template <>
inline std::string LayoutTypeName<cutlass::layout::TensorNHWC>() {
return "nhwc";
}
template <>
inline std::string LayoutTypeName<cutlass::layout::TensorNCxHWx<32>>() {
return "nc32hw32";
}
template <>
inline std::string LayoutTypeName<cutlass::layout::TensorNCxHWx<64>>() {
return "nc64hw64";
}
template <>
inline std::string LayoutTypeName<cutlass::layout::TensorCxRSKx<32>>() {
return "c32rsk32";
}
template <>
inline std::string LayoutTypeName<cutlass::layout::TensorCxRSKx<64>>() {
return "c64rsk64";
}
template <>
inline std::string LayoutTypeName<cutlass::layout::TensorNDHWC>() {
return "ndhwc";
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Element, typename Layout>
inline std::string TensorTypeName() {
std::stringstream ss;
ss << ElementTypeName<Element>() << LayoutTypeName<Layout>();
return ss.str();
}
template <typename Element>
inline std::string TensorTypeName() {
std::stringstream ss;
ss << ElementTypeName<Element>();
return ss.str();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Hash function on a byte array
struct CRC32 {
uint32_t table[256];
//
// Methods
//
CRC32() {
uint32_t rem;
int i, j;
for (i = 0; i < 256; i++) {
rem = i;
for (j = 0; j < 8; j++) {
if (rem & 1) {
rem >>= 1;
rem ^= 0xedb88320;
} else
rem >>= 1;
}
table[i] = rem;
}
}
/// Computes the CRC of an array of bytes
uint32_t operator()(void const *start, size_t length, uint32_t crc = uint32_t()) const {
uint8_t const *p = static_cast<uint8_t const *>(start);
uint8_t const *q = static_cast<uint8_t const *>(start) + length;
crc = ~crc;
for (; p != q; ++p) {
uint8_t octet = *p;
crc = (crc >> 8) ^ table[(crc & 0xff) ^ octet];
}
return ~crc;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Element, typename Layout
>
uint32_t TensorHash(
cutlass::TensorView<Element, Layout> view,
CRC32 const &hash = CRC32(),
uint32_t crc = uint32_t()
) {
return hash(view.data(), view.capacity() * cutlass::sizeof_bits<Element>::value / 8, crc);
}
template <typename Element>
uint32_t TensorHash(
thrust::universal_vector<Element>& tensor,
CRC32 const &hash = CRC32(),
uint32_t crc = uint32_t()
) {
return hash(tensor.data().get(), tensor.size() * cutlass::sizeof_bits<Element>::value / 8, crc);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA, typename LayoutA,
typename ElementB, typename LayoutB,
typename ElementC, typename LayoutC,
typename ElementAccumulator,
typename ElementCompute
>
inline std::ostream &EncodeTypes(
std::ostream &out
) {
out << TensorTypeName<ElementA, LayoutA>() << "_"
<< TensorTypeName<ElementB, LayoutB>() << "_"
<< TensorTypeName<ElementC, LayoutC>() << "_"
<< ElementTypeName<ElementAccumulator>() << "_"
<< ElementTypeName<ElementCompute>();
return out;
}
template <
typename ElementA,
typename ElementB,
typename ElementC,
typename ElementD
>
inline std::ostream &EncodeTypes(
std::ostream &out
) {
out << TensorTypeName<ElementA>() << "_"
<< TensorTypeName<ElementB>() << "_"
<< TensorTypeName<ElementC>() << "_"
<< ElementTypeName<ElementD>();
return out;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA, typename LayoutA,
typename ElementB, typename LayoutB,
typename ElementC, typename LayoutC,
typename ElementAccumulator,
typename ElementCompute
>
inline CachedTestKey CreateCachedGemmTestKey(
cutlass::gemm::GemmCoord const &problem,
ElementCompute alpha,
ElementCompute beta,
cutlass::TensorView<ElementA, LayoutA> A,
cutlass::TensorView<ElementA, LayoutB> B,
cutlass::TensorView<ElementC, LayoutC> C
) {
CachedTestKey key;
// Encode gemm operator and problem sizes
key.op = "gemm";
std::stringstream ss_problem;
EncodeProblemSize(ss_problem, problem);
ss_problem << "_alpha" << EncodeScalar(alpha) << "_beta" << EncodeScalar(beta);
key.problem = ss_problem.str();
// Encode problem data types
std::stringstream ss_types;
EncodeTypes<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementAccumulator,
ElementCompute>(ss_types);
key.types = ss_types.str();
// Encode hash for problem data
CRC32 crc_hash;
key.A = TensorHash(A, crc_hash);
key.B = TensorHash(B, crc_hash);
key.C = TensorHash(C, crc_hash);
return key;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA, typename LayoutA,
typename ElementB, typename LayoutB,
typename ElementC, typename LayoutC,
typename ElementAccumulator,
typename ElementCompute
>
inline CachedTestKey CreateCachedConv2dTestKey(
cutlass::conv::Operator conv_operator,
cutlass::conv::Conv2dProblemSize const &problem,
ElementCompute alpha,
ElementCompute beta,
cutlass::TensorView<ElementA, LayoutA> A,
cutlass::TensorView<ElementA, LayoutB> B,
cutlass::TensorView<ElementC, LayoutC> C
) {
CachedTestKey key;
// Encode conv2d operator and problem sizes
key.op = "conv2d";
std::stringstream ss_problem;
ss_problem << EncodeOperator(conv_operator) << "_";
EncodeProblemSize(ss_problem, problem);
ss_problem << "_alpha" << EncodeScalar(alpha) << "_beta" << EncodeScalar(beta);
key.problem = ss_problem.str();
// Encode problem data types
std::stringstream ss_types;
EncodeTypes<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementAccumulator,
ElementCompute>(ss_types);
key.types = ss_types.str();
// Encode hash for problem data
CRC32 crc_hash;
key.A = TensorHash(A, crc_hash);
key.B = TensorHash(B, crc_hash);
key.C = TensorHash(C, crc_hash);
return key;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA, typename LayoutA,
typename ElementB, typename LayoutB,
typename ElementC, typename LayoutC,
typename ElementAccumulator,
typename ElementCompute
>
inline CachedTestKey CreateCachedConv2dWithBroadcastTestKey(
cutlass::conv::Operator conv_operator,
cutlass::conv::Conv2dProblemSize const &problem,
ElementCompute alpha,
ElementCompute beta,
cutlass::TensorView<ElementA, LayoutA> A,
cutlass::TensorView<ElementA, LayoutB> B,
cutlass::TensorView<ElementC, LayoutC> C
) {
CachedTestKey key;
// Encode conv2d operator and problem sizes
key.op = "conv2d_with_broadcast";
std::stringstream ss_problem;
ss_problem << EncodeOperator(conv_operator) << "_";
EncodeProblemSize(ss_problem, problem);
ss_problem << "_alpha" << EncodeScalar(alpha) << "_beta" << EncodeScalar(beta);
key.problem = ss_problem.str();
// Encode problem data types
std::stringstream ss_types;
EncodeTypes<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementAccumulator,
ElementCompute>(ss_types);
key.types = ss_types.str();
// Encode hash for problem data
CRC32 crc_hash;
key.A = TensorHash(A, crc_hash);
key.B = TensorHash(B, crc_hash);
key.C = TensorHash(C, crc_hash);
return key;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA, typename LayoutA,
typename ElementB, typename LayoutB,
typename ElementC, typename LayoutC,
typename ElementAccumulator,
typename ElementCompute
>
inline CachedTestKey CreateCachedConv2dWithReductionTestKey(
cutlass::conv::Operator conv_operator,
cutlass::conv::Conv2dProblemSize const &problem,
ElementCompute alpha,
ElementCompute beta,
cutlass::TensorView<ElementA, LayoutA> A,
cutlass::TensorView<ElementA, LayoutB> B,
cutlass::TensorView<ElementC, LayoutC> C
) {
CachedTestKey key;
// Encode conv2d operator and problem sizes
key.op = "conv2d_with_reduction";
std::stringstream ss_problem;
ss_problem << EncodeOperator(conv_operator) << "_";
EncodeProblemSize(ss_problem, problem);
ss_problem << "_alpha" << EncodeScalar(alpha) << "_beta" << EncodeScalar(beta);
key.problem = ss_problem.str();
// Encode problem data types
std::stringstream ss_types;
EncodeTypes<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementAccumulator,
ElementCompute>(ss_types);
key.types = ss_types.str();
// Encode hash for problem data
CRC32 crc_hash;
key.A = TensorHash(A, crc_hash);
key.B = TensorHash(B, crc_hash);
key.C = TensorHash(C, crc_hash);
return key;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA, typename LayoutA,
typename ElementB, typename LayoutB,
typename ElementC, typename LayoutC,
typename ElementAccumulator,
typename ElementCompute
>
inline CachedTestKey CreateCachedConv3dTestKey(
cutlass::conv::Operator conv_operator,
cutlass::conv::Conv3dProblemSize const &problem,
ElementCompute alpha,
ElementCompute beta,
cutlass::TensorView<ElementA, LayoutA> A,
cutlass::TensorView<ElementA, LayoutB> B,
cutlass::TensorView<ElementC, LayoutC> C
) {
CachedTestKey key;
// Encode conv3d operator and problem sizes
key.op = "conv3d";
std::stringstream ss_problem;
ss_problem << EncodeOperator(conv_operator) << "_";
EncodeProblemSize(ss_problem, problem);
ss_problem << "_alpha" << EncodeScalar(alpha) << "_beta" << EncodeScalar(beta);
key.problem = ss_problem.str();
// Encode problem data types
std::stringstream ss_types;
EncodeTypes<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementAccumulator,
ElementCompute>(ss_types);
key.types = ss_types.str();
// Encode problem data
CRC32 crc_hash;
key.A = TensorHash(A, crc_hash);
key.B = TensorHash(B, crc_hash);
key.C = TensorHash(C, crc_hash);
return key;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
class ProblemShape,
typename ElementA,
typename ElementB,
typename ElementC,
typename ElementD
>
inline CachedTestKey CreateCachedConvNd3xTestKey(
cutlass::conv::Operator conv_operator,
ProblemShape const& problem_shape,
double alpha,
double beta,
thrust::universal_vector<ElementA> A,
thrust::universal_vector<ElementB> B,
thrust::universal_vector<ElementC> C
) {
CachedTestKey key;
// Encode convNd operator and problem sizes
std::stringstream ss_op;
ss_op << "conv" << ProblemShape::RankS << "d";
key.op = ss_op.str();
std::stringstream ss_problem;
ss_problem << EncodeOperator(conv_operator) << "_";
EncodeProblemSize(ss_problem, problem_shape);
ss_problem << "_alpha" << EncodeScalar(alpha) << "_beta" << EncodeScalar(beta);
key.problem = ss_problem.str();
// Encode problem data types
std::stringstream ss_types;
EncodeTypes<
ElementA,
ElementB,
ElementC,
ElementD>(ss_types);
key.types = ss_types.str();
// Encode problem data
CRC32 crc_hash;
key.A = TensorHash(A, crc_hash);
key.B = TensorHash(B, crc_hash);
key.C = TensorHash(C, crc_hash);
return key;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace test::conv::device
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/conv/cache_testbed_output.h/0 | {
"file_path": "test/unit/conv/cache_testbed_output.h",
"repo_id": "test",
"token_count": 8379
} | 58 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for conversion operators.
*/
#include "../common/cutlass_unit_test.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/util/host_tensor.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace core {
namespace kernel {
/// Simple conversion function
template <typename Destination, typename Source, int Count>
__global__ void convert(
cutlass::Array<Destination, Count> *destination,
cutlass::Array<Source, Count> const *source) {
cutlass::FastNumericArrayConverter<Destination, Source, Count> convert;
*destination = convert(*source);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Destination, typename Source, int Count>
void run_test_integer_range_limited() {
const int kN = Count;
dim3 grid(1, 1);
dim3 block(1, 1);
cutlass::HostTensor<Destination, cutlass::layout::RowMajor> destination({1, kN});
cutlass::HostTensor<Source, cutlass::layout::RowMajor> source({1, kN});
for (int i = 0; i < kN; ++i) {
source.host_data()[i] = Source(i % 4);
}
source.sync_device();
convert<Destination, Source, kN><<< grid, block >>>(
reinterpret_cast<cutlass::Array<Destination, kN> *>(destination.device_data()),
reinterpret_cast<cutlass::Array<Source, kN> const *>(source.device_data())
);
destination.sync_host();
for (int i = 0; i < kN; ++i) {
EXPECT_TRUE(float(destination.host_data()[i]) == float(source.host_data()[i]));
}
}
template <typename Destination, typename Source, int Count>
void run_test_integer_range_all() {
const int kN = Count;
dim3 grid(1, 1);
dim3 block(1, 1);
cutlass::HostTensor<Destination, cutlass::layout::RowMajor> destination({1, kN});
cutlass::HostTensor<Source, cutlass::layout::RowMajor> source({1, kN});
int const kIntSourceMin = std::numeric_limits<Source>::min();
int const kIntSourceMax = std::numeric_limits<Source>::max();
int const kIntRange = kIntSourceMax - kIntSourceMin + 1;
for (int i = 0; i < kN; ++i) {
source.host_data()[i] = Source(kIntSourceMin + (i % kIntRange));
}
source.sync_device();
convert<Destination, Source, kN><<< grid, block >>>(
reinterpret_cast<cutlass::Array<Destination, kN> *>(destination.device_data()),
reinterpret_cast<cutlass::Array<Source, kN> const *>(source.device_data())
);
destination.sync_host();
// Verify conversion
bool passed = true;
for (int i = 0; i < kN; ++i) {
if(!(float(destination.host_data()[i]) == float(source.host_data()[i]))) {
passed = false;
break;
}
}
EXPECT_TRUE(passed) << " FastNumericArrayConverter failed";
// Print out results for the failed conversion.
if (!passed) {
for (int i = 0; i < kN; ++i) {
std::cout << "source(" << float(source.host_data()[i]) << ") -> "
<< "destination ("<< float(destination.host_data()[i]) << ")" << std::endl;
}
}
std::flush(std::cout);
}
} // namespace kernel
} // namespace core
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(FastNumericConversion, s32_to_f32) {
int const kN = 4;
using Source = int;
using Destination = float;
test::core::kernel::run_test_integer_range_limited<Destination, Source, kN>();
}
TEST(FastNumericConversion, s8_to_f32_array) {
int const kN = 256;
using Source = int8_t;
using Destination = float;
test::core::kernel::run_test_integer_range_all<Destination, Source, kN>();
}
TEST(FastNumericConversion, u8_to_f32_array) {
int const kN = 256;
using Source = uint8_t;
using Destination = float;
test::core::kernel::run_test_integer_range_all<Destination, Source, kN>();
}
TEST(FastNumericConversion, s8_to_f16_array) {
int const kN = 256;
using Source = int8_t;
using Destination = cutlass::half_t;
test::core::kernel::run_test_integer_range_all<Destination, Source, kN>();
}
TEST(FastNumericConversion, u8_to_f16_array) {
int const kN = 256;
using Source = uint8_t;
using Destination = cutlass::half_t;
test::core::kernel::run_test_integer_range_all<Destination, Source, kN>();
}
TEST(FastNumericConversion, u8_to_bf16_array) {
int const kN = 256;
using Source = uint8_t;
using Destination = cutlass::bfloat16_t;
test::core::kernel::run_test_integer_range_all<Destination, Source, kN>();
}
TEST(FastNumericConversion, s8_to_bf16_array) {
int const kN = 256;
using Source = int8_t;
using Destination = cutlass::bfloat16_t;
test::core::kernel::run_test_integer_range_all<Destination, Source, kN>();
}
| test/unit/core/fast_numeric_conversion.cu/0 | {
"file_path": "test/unit/core/fast_numeric_conversion.cu",
"repo_id": "test",
"token_count": 2117
} | 59 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cutlass_unit_test.h"
#include <cute/layout.hpp>
TEST(CuTe_core, WeaklyCongruent)
{
using namespace cute;
auto a = _1{};
auto b = _2{};
EXPECT_TRUE (weakly_congruent(a, a));
EXPECT_TRUE (weakly_congruent(b, b));
EXPECT_TRUE (weakly_congruent(a, b));
auto a0 = Shape<_1>{};
auto b0 = Shape<_2>{};
EXPECT_TRUE (weakly_congruent(a , a0));
EXPECT_TRUE (weakly_congruent(b , b0));
EXPECT_TRUE (weakly_congruent(a , b0));
EXPECT_TRUE (weakly_congruent(b , a0));
EXPECT_FALSE(weakly_congruent(a0, a ));
EXPECT_FALSE(weakly_congruent(b0, b ));
EXPECT_FALSE(weakly_congruent(a0, b ));
EXPECT_FALSE(weakly_congruent(b0, a ));
EXPECT_TRUE (weakly_congruent(a0, a0));
EXPECT_TRUE (weakly_congruent(b0, b0));
EXPECT_TRUE (weakly_congruent(a0, b0));
auto a1 = Shape<_1, _1>{};
EXPECT_TRUE (weakly_congruent(a , a1));
EXPECT_FALSE(weakly_congruent(a0, a1));
EXPECT_TRUE (weakly_congruent(a1, a1));
auto a2 = Shape<_1, Shape<_1,_1>>{};
EXPECT_TRUE (weakly_congruent(a , a2));
EXPECT_FALSE(weakly_congruent(a0, a2));
EXPECT_TRUE (weakly_congruent(a1, a2));
auto b1 = Shape<_2, _2>{};
EXPECT_TRUE (weakly_congruent(b , b1));
EXPECT_FALSE(weakly_congruent(b0, b1));
EXPECT_TRUE (weakly_congruent(a1, b1));
auto b2 = Shape<_2, Shape<_2,_2>>{};
EXPECT_FALSE(weakly_congruent(a2, b0));
EXPECT_FALSE(weakly_congruent(a2, a1));
EXPECT_TRUE (weakly_congruent(a2, b2));
auto b3 = Shape<Shape<_2,_2>, Shape<_2,_2>>{};
EXPECT_FALSE(weakly_congruent(a0, b3));
EXPECT_TRUE (weakly_congruent(a1, b3));
EXPECT_TRUE (weakly_congruent(a2, b3));
}
TEST(CuTe_core, WeaklyCompatible)
{
using namespace cute;
auto a = _16{};
auto b = _12{};
auto c = _8{};
EXPECT_TRUE (weakly_compatible(a, a));
EXPECT_TRUE (weakly_compatible(b, b));
EXPECT_TRUE (weakly_compatible(c, c));
EXPECT_FALSE(weakly_compatible(a, b));
EXPECT_FALSE(weakly_compatible(a, c));
EXPECT_TRUE (weakly_compatible(c, a));
auto a0 = Shape<_16>{};
EXPECT_TRUE (weakly_compatible(a0, a0));
EXPECT_TRUE (weakly_compatible(a , a0));
EXPECT_FALSE(weakly_compatible(a0, a ));
EXPECT_TRUE (weakly_compatible(c , a0));
EXPECT_FALSE(weakly_compatible(a0, c ));
EXPECT_FALSE(weakly_compatible(b , a0));
EXPECT_FALSE(weakly_compatible(a0, b ));
auto a1 = Shape<_2,_8>{};
EXPECT_TRUE (weakly_compatible(a1, a1));
EXPECT_TRUE (weakly_compatible(a , a1));
EXPECT_FALSE(weakly_compatible(a0, a1));
EXPECT_FALSE(weakly_compatible(a1, a0));
EXPECT_TRUE (weakly_compatible(a1, Shape<_2,Shape<_2,_4>>{}));
auto a2 = Shape<Shape<_2,_8>>{};
EXPECT_TRUE (weakly_compatible(a2, a2));
EXPECT_TRUE (weakly_compatible(a , a2));
EXPECT_TRUE (weakly_compatible(c , a2));
EXPECT_TRUE (weakly_compatible(a0, a2));
EXPECT_FALSE(weakly_compatible(a2, a0));
auto a3 = Shape<Shape<_2,Shape<_4,_2>>>{};
EXPECT_TRUE (weakly_compatible(a3, a3));
EXPECT_TRUE (weakly_compatible(a , a3));
EXPECT_TRUE (weakly_compatible(c , a3));
EXPECT_TRUE (weakly_compatible(a0, a3));
EXPECT_FALSE(weakly_compatible(a3, a0));
EXPECT_TRUE (weakly_compatible(a2, a3));
EXPECT_FALSE(weakly_compatible(a3, a2));
}
| test/unit/cute/core/int_tuple.cpp/0 | {
"file_path": "test/unit/cute/core/int_tuple.cpp",
"repo_id": "test",
"token_count": 1935
} | 60 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cutlass_unit_test.h"
#include "../hopper/tma_load_testbed.hpp"
using namespace cute;
using namespace cutlass::test;
#if CUDA_12_0_SM90_FEATURES_SUPPORTED
template <class T, class TmaType = T, class GMEM_Layout, class SMEM_Layout, class CTA_Tile>
auto
test_tma_load(GMEM_Layout const& gmem_layout,
SMEM_Layout const& smem_layout,
CTA_Tile const& cta_tile)
{
return test_tma_load<T, TmaType>(SM90_TMA_LOAD{}, gmem_layout, smem_layout, cta_tile);
}
template <class T, class TmaType = T, class GMEM_Layout, class SMEM_Layout>
auto
test_tma_load(GMEM_Layout const& gmem_layout,
SMEM_Layout const& smem_layout)
{
return test_tma_load<T, TmaType>(gmem_layout, smem_layout, product_each(shape(smem_layout)));
}
TEST(SM90_CuTe_Hopper, Tma_Load_1D)
{
{
Layout smem_layout = Layout<_256, _1>{};
{
Layout gmem_layout = smem_layout;
test_tma_load<int8_t>(gmem_layout, smem_layout);
test_tma_load<half_t>(gmem_layout, smem_layout);
test_tma_load< float>(gmem_layout, smem_layout);
test_tma_load<double>(gmem_layout, smem_layout);
}
{
Layout gmem_layout = make_layout(128, GenColMajor{});
test_tma_load<int8_t>(gmem_layout, smem_layout);
test_tma_load<half_t>(gmem_layout, smem_layout);
test_tma_load< float>(gmem_layout, smem_layout);
test_tma_load<double>(gmem_layout, smem_layout);
}
{
Layout gmem_layout = make_layout(384, GenColMajor{});
test_tma_load<int8_t>(gmem_layout, smem_layout);
test_tma_load<half_t>(gmem_layout, smem_layout);
test_tma_load< float>(gmem_layout, smem_layout);
test_tma_load<double>(gmem_layout, smem_layout);
}
}
{
Layout smem_layout = Layout<Shape<_8,_8>, Stride<_1,_8>>{};
{
Layout gmem_layout = smem_layout;
test_tma_load<int8_t>(gmem_layout, smem_layout);
test_tma_load<half_t>(gmem_layout, smem_layout);
test_tma_load< float>(gmem_layout, smem_layout);
test_tma_load<double>(gmem_layout, smem_layout);
}
// This doesn't result in a 1D TMA, even though it could/should...
{
Layout gmem_layout = tile_to_shape(smem_layout, Shape<_16,_16>{});
test_tma_load<int8_t>(gmem_layout, smem_layout);
test_tma_load<half_t>(gmem_layout, smem_layout);
test_tma_load< float>(gmem_layout, smem_layout);
test_tma_load<double>(gmem_layout, smem_layout);
}
}
}
TEST(SM90_CuTe_Hopper, Tma_Load_32x32_Col)
{
Layout smem_layout = Layout<Shape<_32,_32>, Stride<_1,_32>>{};
{
Layout gmem_layout = smem_layout;
test_tma_load<int8_t>(gmem_layout, smem_layout);
test_tma_load<half_t>(gmem_layout, smem_layout);
test_tma_load< float>(gmem_layout, smem_layout);
test_tma_load<double>(gmem_layout, smem_layout);
}
{
Layout gmem_layout = make_layout(make_shape(32,32), GenColMajor{});
test_tma_load<int8_t>(gmem_layout, smem_layout);
test_tma_load<half_t>(gmem_layout, smem_layout);
test_tma_load< float>(gmem_layout, smem_layout);
test_tma_load<double>(gmem_layout, smem_layout);
}
{
Layout gmem_layout = make_layout(make_shape(32,32), make_stride(Int<1>{}, 1024));
test_tma_load<int8_t>(gmem_layout, smem_layout);
test_tma_load<half_t>(gmem_layout, smem_layout);
test_tma_load< float>(gmem_layout, smem_layout);
test_tma_load<double>(gmem_layout, smem_layout);
}
}
TEST(SM90_CuTe_Hopper, Tma_Load_32x32_Row)
{
Layout smem_layout = Layout<Shape<_32,_32>, Stride<_32,_1>>{};
{
Layout gmem_layout = smem_layout;
test_tma_load<int8_t>(gmem_layout, smem_layout);
test_tma_load<half_t>(gmem_layout, smem_layout);
test_tma_load< float>(gmem_layout, smem_layout);
test_tma_load<double>(gmem_layout, smem_layout);
}
{
Layout gmem_layout = make_layout(make_shape(32,32), GenRowMajor{});
test_tma_load<int8_t>(gmem_layout, smem_layout);
test_tma_load<half_t>(gmem_layout, smem_layout);
test_tma_load< float>(gmem_layout, smem_layout);
test_tma_load<double>(gmem_layout, smem_layout);
}
{
Layout gmem_layout = make_layout(make_shape(32,32), make_stride(1024, Int<1>{}));
test_tma_load<int8_t>(gmem_layout, smem_layout);
test_tma_load<half_t>(gmem_layout, smem_layout);
test_tma_load< float>(gmem_layout, smem_layout);
test_tma_load<double>(gmem_layout, smem_layout);
}
}
template <class T, template <typename> typename SWIZZLE_ATOM>
void
test_tma_load_swizzle_atom_mn()
{
auto smem_layout = SWIZZLE_ATOM<T>{};
{ // Static gmem
//Layout gmem_layout = make_layout(shape(smem_layout), GenColMajor{});
//test_tma_load<T>(gmem_layout, smem_layout);
}
{ // Dynamic gmem
Layout gmem_layout = make_layout(make_shape(2*uint32_t(size<0>(smem_layout)), 2*uint32_t(size<1>(smem_layout))),
GenColMajor{});
test_tma_load<T>(gmem_layout, smem_layout);
}
}
template <class T, template <typename> typename SWIZZLE_ATOM>
void
test_tma_load_swizzle_atom_k()
{
auto smem_layout = SWIZZLE_ATOM<T>{};
{ // Static gmem
//Layout gmem_layout = make_layout(shape(smem_layout), GenRowMajor{});
//test_tma_load<T>(gmem_layout, smem_layout);
}
{ // Dynamic gmem
Layout gmem_layout = make_layout(make_shape(2*uint32_t(size<0>(smem_layout)), 2*uint32_t(size<1>(smem_layout))),
GenRowMajor{});
test_tma_load<T>(gmem_layout, smem_layout);
}
}
TEST(SM90_CuTe_Hopper, Tma_Load_Swizzle_Atoms)
{
test_tma_load_swizzle_atom_mn<int8_t, GMMA::Layout_MN_SW128_Atom>();
test_tma_load_swizzle_atom_mn<half_t, GMMA::Layout_MN_SW128_Atom>();
test_tma_load_swizzle_atom_mn< float, GMMA::Layout_MN_SW128_Atom>();
test_tma_load_swizzle_atom_mn<double, GMMA::Layout_MN_SW128_Atom>();
test_tma_load_swizzle_atom_mn<int8_t, GMMA::Layout_MN_SW64_Atom>();
test_tma_load_swizzle_atom_mn<half_t, GMMA::Layout_MN_SW64_Atom>();
test_tma_load_swizzle_atom_mn< float, GMMA::Layout_MN_SW64_Atom>();
test_tma_load_swizzle_atom_mn<double, GMMA::Layout_MN_SW64_Atom>();
test_tma_load_swizzle_atom_mn<int8_t, GMMA::Layout_MN_SW32_Atom>();
test_tma_load_swizzle_atom_mn<half_t, GMMA::Layout_MN_SW32_Atom>();
test_tma_load_swizzle_atom_mn< float, GMMA::Layout_MN_SW32_Atom>();
test_tma_load_swizzle_atom_mn<double, GMMA::Layout_MN_SW32_Atom>();
test_tma_load_swizzle_atom_mn<int8_t, GMMA::Layout_MN_INTER_Atom>();
test_tma_load_swizzle_atom_mn<half_t, GMMA::Layout_MN_INTER_Atom>();
test_tma_load_swizzle_atom_mn< float, GMMA::Layout_MN_INTER_Atom>();
test_tma_load_swizzle_atom_mn<double, GMMA::Layout_MN_INTER_Atom>();
test_tma_load_swizzle_atom_k<int8_t, GMMA::Layout_K_SW128_Atom>();
test_tma_load_swizzle_atom_k<half_t, GMMA::Layout_K_SW128_Atom>();
test_tma_load_swizzle_atom_k< float, GMMA::Layout_K_SW128_Atom>();
test_tma_load_swizzle_atom_k<double, GMMA::Layout_K_SW128_Atom>();
test_tma_load_swizzle_atom_k<int8_t, GMMA::Layout_K_SW64_Atom>();
test_tma_load_swizzle_atom_k<half_t, GMMA::Layout_K_SW64_Atom>();
test_tma_load_swizzle_atom_k< float, GMMA::Layout_K_SW64_Atom>();
test_tma_load_swizzle_atom_k<double, GMMA::Layout_K_SW64_Atom>();
test_tma_load_swizzle_atom_k<int8_t, GMMA::Layout_K_SW32_Atom>();
test_tma_load_swizzle_atom_k<half_t, GMMA::Layout_K_SW32_Atom>();
test_tma_load_swizzle_atom_k< float, GMMA::Layout_K_SW32_Atom>();
test_tma_load_swizzle_atom_k<double, GMMA::Layout_K_SW32_Atom>();
test_tma_load_swizzle_atom_k<int8_t, GMMA::Layout_K_INTER_Atom>();
test_tma_load_swizzle_atom_k<half_t, GMMA::Layout_K_INTER_Atom>();
test_tma_load_swizzle_atom_k< float, GMMA::Layout_K_INTER_Atom>();
test_tma_load_swizzle_atom_k<double, GMMA::Layout_K_INTER_Atom>();
}
template <class T, template <typename> typename SWIZZLE_ATOM>
auto
test_tma_load_swizzle_tile_mn()
{
auto smem_layout = tile_to_shape(SWIZZLE_ATOM<T>{}, Shape<_128,_128>{});
Layout gmem_layout = make_layout(make_shape(int(size<0>(smem_layout)), int(size<1>(smem_layout))), GenColMajor{});
return test_tma_load<T>(gmem_layout, smem_layout);
}
template <class T, template <typename> typename SWIZZLE_ATOM>
auto
test_tma_load_swizzle_tile_k()
{
auto smem_layout = tile_to_shape(SWIZZLE_ATOM<T>{}, Shape<_128,_128>{});
Layout gmem_layout = make_layout(make_shape(int(size<0>(smem_layout)), int(size<1>(smem_layout))), GenRowMajor{});
return test_tma_load<T>(gmem_layout, smem_layout);
}
TEST(SM90_CuTe_Hopper, Tma_Load_Swizzle_Tiles)
{
// Other T-types use too much smem
test_tma_load_swizzle_tile_mn<int8_t, GMMA::Layout_MN_SW128_Atom>();
test_tma_load_swizzle_tile_mn<half_t, GMMA::Layout_MN_SW128_Atom>();
test_tma_load_swizzle_tile_mn<int8_t, GMMA::Layout_MN_SW64_Atom>();
test_tma_load_swizzle_tile_mn<half_t, GMMA::Layout_MN_SW64_Atom>();
test_tma_load_swizzle_tile_mn<int8_t, GMMA::Layout_MN_SW32_Atom>();
test_tma_load_swizzle_tile_mn<half_t, GMMA::Layout_MN_SW32_Atom>();
test_tma_load_swizzle_tile_mn<int8_t, GMMA::Layout_MN_INTER_Atom>();
test_tma_load_swizzle_tile_mn<half_t, GMMA::Layout_MN_INTER_Atom>();
test_tma_load_swizzle_tile_k<int8_t, GMMA::Layout_K_SW128_Atom>();
test_tma_load_swizzle_tile_k<half_t, GMMA::Layout_K_SW128_Atom>();
test_tma_load_swizzle_tile_k<int8_t, GMMA::Layout_K_SW64_Atom>();
test_tma_load_swizzle_tile_k<half_t, GMMA::Layout_K_SW64_Atom>();
test_tma_load_swizzle_tile_k<int8_t, GMMA::Layout_K_SW32_Atom>();
test_tma_load_swizzle_tile_k<half_t, GMMA::Layout_K_SW32_Atom>();
test_tma_load_swizzle_tile_k<int8_t, GMMA::Layout_K_INTER_Atom>();
test_tma_load_swizzle_tile_k<half_t, GMMA::Layout_K_INTER_Atom>();
}
// Tensor by-mode
TEST(SM90_CuTe_Hopper, Tma_Load_Tensor)
{
// 3-mode TMA
{
Layout gmem_layout = make_layout(make_shape(128, 64, 5));
auto cta_tile = Shape<_64, _32>{}; // GMEM Tiling:
// Take 64-elem from m
// Take 32-elem from k
auto smem_layout = make_layout(Shape<_64,_32>{});
test_tma_load<half_t>(gmem_layout, smem_layout, cta_tile);
}
// 4-mode TMA
{
Layout gmem_layout = make_layout(make_shape(make_shape(80,40),make_shape(32,12)));
auto cta_tile = Shape<Shape<_16,_8>,Shape<_32,_2>>{}; // GMEM Tiling:
// Take 16-elem from m0, 8-elem from m1,
// Take 32-elem from k0, 2-elem from k1
auto smem_layout = make_layout(Shape<_128,_64>{});
test_tma_load<half_t>(gmem_layout, smem_layout, cta_tile);
}
// 5-mode TMA
{
Layout gmem_layout = make_layout(make_shape(make_shape(32,32,32),make_shape(32,12)));
auto cta_tile = Shape<Shape<_16,_4,_2>,Shape<_16,_2>>{}; // GMEM Tiling:
// Take 4-elem from m0, 4-elem from m1, 5-elem from m2
// Take 32-elem from k0, 2-elem from k1
auto smem_layout = make_layout(Shape<_128,_32>{});
test_tma_load<half_t>(gmem_layout, smem_layout, cta_tile);
}
}
// Tensor Multimode -- TMA with more than 5 modes in GMEM (packs residual modes into last TMA mode)
TEST(SM90_CuTe_Hopper, Tma_Load_Tensor_Multimode)
{
{
Layout gmem_layout = make_layout(make_shape(make_shape(32,3,2,2),make_shape(32,4,2)));
auto cta_tile = Shape<Shape<_32>, Shape<_32,_2>>{}; // GMEM Tiling:
// Take 32-elem from m0
// Take 32-elem from k0, 2-elem from k1
auto smem_layout = make_layout(Shape<_32,_64>{});
test_tma_load<half_t>(gmem_layout, smem_layout, cta_tile);
}
{
Layout gmem_layout = make_layout(make_shape(make_shape(64,3,2,2),make_shape(32,4,2)));
auto cta_tile = Shape<Shape<_32,_3>, Shape<_32,_2>>{}; // GMEM Tiling:
// Take 32-elem from m0, 3-elem from m1
// Take 32-elem from k0, 2-elem from k1
auto smem_layout = make_layout(Shape<_96,_64>{});
test_tma_load<half_t>(gmem_layout, smem_layout, cta_tile);
}
{
Layout gmem_layout = make_layout(make_shape(make_shape(64,3,2,3,2),make_shape(32,4,2,2)));
auto cta_tile = Shape<Shape<_32>, Shape<_16,_2>>{}; // GMEM Tiling:
// Take 32-elem from m0
// Take 16-elem from k0, 2-elem from k1
auto smem_layout = make_layout(Shape<_32,_32>{});
test_tma_load<half_t>(gmem_layout, smem_layout, cta_tile);
}
}
TEST(SM90_CuTe_Hopper, Tma_Load_Coalesce)
{
// Interleaved ColMajor
{
Layout gmem_layout = make_layout(make_shape ( 128, make_shape (_4{}, 128)),
make_stride( _4{}, make_stride(_1{}, 512)));
auto smem_layout = make_layout(make_shape (_32{}, make_shape (_4{}, _32{})),
make_stride( _4{}, make_stride(_1{}, _128{})));
// By default, uses cta_tile = Shape<_32,_128>
auto tma = test_tma_load<int8_t>(gmem_layout, smem_layout);
// Check the TMA rank
EXPECT_EQ(rank(tma.get_tma_tensor(shape(gmem_layout))(0)), 2);
}
// Interleaved RowMajor
{
Layout gmem_layout = make_layout(make_shape (make_shape (_4{}, 128), 128),
make_stride(make_stride(_1{}, 512), _4{}));
auto smem_layout = make_layout(make_shape (make_shape (_4{}, _32{}), _32{}),
make_stride(make_stride(_1{}, _128{}), _4{}));
// By default, uses cta_tile = Shape<_128,_32>
auto tma = test_tma_load<int8_t>(gmem_layout, smem_layout);
// Check the TMA rank
EXPECT_EQ(rank(tma.get_tma_tensor(shape(gmem_layout))(0)), 2);
}
// Account for stride-0 modes within the TMA tile
{
Layout gmem_layout = make_layout(make_shape ( 128, make_shape (_32{}, 4)),
make_stride( _1{}, make_stride( _0{}, 128)));
auto smem_layout = make_layout(make_shape (_64{}, make_shape (_32{} )),
make_stride( _1{}, make_stride( _0{} )));
// By default, uses cta_tile = Shape<_64,_32>
auto tma = test_tma_load<uint16_t>(gmem_layout, smem_layout);
// Check the TMA rank
EXPECT_EQ(rank(tma.get_tma_tensor(shape(gmem_layout))(0)), 2);
}
// Coalesce many modes and account for stride-0 modes within the TMA tile
{
Layout gmem_layout = make_layout(make_shape (make_shape (_32{},_4{}, 4), _32{}, make_shape (_4{}, 4)),
make_stride(make_stride(_16{},_4{}, 2048), _0{}, make_stride(_1{}, _512{})));
auto smem_layout = make_layout(make_shape (make_shape (_32{},_4{} ), _32{}, make_shape (_4{} )),
make_stride(make_stride(_16{},_4{} ), _0{}, make_stride(_1{} )));
// By default, uses cta_tile = Shape<_128,_32,_4>
auto tma = test_tma_load<int8_t>(gmem_layout, smem_layout);
// Check the TMA rank (Could be 3 instead of 4 with even better coalescing...?)
EXPECT_EQ(rank(tma.get_tma_tensor(shape(gmem_layout))(0)), 4);
}
}
TEST(SM90_CuTe_Hopper, Tma_Load_InternalType)
{
Layout smem_layout = Layout<Shape<_32,_32>, Stride<_1,_32>>{};
Layout gmem_layout = make_layout(make_shape(64, 64));
// Downcasted tensors to smaller TmaTypes
{
test_tma_load<int8_t, uint8_t>(gmem_layout, smem_layout);
test_tma_load<half_t, uint8_t>(gmem_layout, smem_layout);
test_tma_load< float, uint8_t>(gmem_layout, smem_layout);
test_tma_load<double, uint8_t>(gmem_layout, smem_layout);
}
// Upcasted tensors to larger TmaTypes
{
test_tma_load<int8_t, uint64_t>(gmem_layout, smem_layout);
test_tma_load<half_t, uint64_t>(gmem_layout, smem_layout);
test_tma_load< float, uint64_t>(gmem_layout, smem_layout);
test_tma_load<double, uint64_t>(gmem_layout, smem_layout);
}
// Complex<double> is 128bit, which the TMA has no concept of
{
test_tma_load<complex<double>, uint64_t>(gmem_layout, smem_layout);
test_tma_load<complex<double>, uint32_t>(gmem_layout, smem_layout);
}
}
#endif
| test/unit/cute/hopper/tma_load.cu/0 | {
"file_path": "test/unit/cute/hopper/tma_load.cu",
"repo_id": "test",
"token_count": 8354
} | 61 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for thread-level GEMM
*/
#include "../../common/cutlass_unit_test.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/half.h"
#include "cutlass/gemm/warp/mma_tensor_op_sm70.h"
#include "cutlass/epilogue/warp/fragment_iterator_volta_tensor_op.h"
#include "cutlass/core_io.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/gemm.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM70_Epilogue_warp_FragmentIterator, mma_f16_64x64x4) {
using Shape = cutlass::gemm::GemmShape<64, 64, 4>;
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementC = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>;
using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>;
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
cutlass::layout::ColumnMajor,
ElementB,
cutlass::layout::RowMajor,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using MmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
Shape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
Policy
>;
cutlass::HostTensor<cutlass::half_t, cutlass::layout::RowMajor> accumulator_tensor({Shape::kM, Shape::kN});
cutlass::reference::host::TensorFill(accumulator_tensor.host_view(), ElementC(-1));
for (int tid = 0; tid < 1; ++tid) {
typename MmaTensorOp::IteratorC::Fragment accumulator_tile;
CUTLASS_PRAGMA_UNROLL
for (size_t i = 0; i < accumulator_tile.size(); ++i) {
accumulator_tile[i] = static_cast<ElementC>(int(i));
}
using FragmentIterator = cutlass::epilogue::warp::FragmentIteratorVoltaTensorOp<
cutlass::gemm::GemmShape<64, 64, 4>,
cutlass::gemm::GemmShape<32, 32, 4>,
cutlass::half_t,
cutlass::layout::RowMajor
>;
FragmentIterator frag_iterator(accumulator_tile);
typename FragmentIterator::Fragment frag;
for (int iter = 0; iter < FragmentIterator::kIterations; ++iter) {
frag_iterator.load(frag);
++frag_iterator;
#if 0
std::cout << "T" << tid << ": ";
for (size_t i = 0; i < frag.size(); ++i) {
std::cout << " " << frag[i];
}
std::cout << std::endl;
#endif
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM70_Epilogue_warp_FragmentIterator, mma_f32_64x64x4) {
using Shape = cutlass::gemm::GemmShape<64, 64, 4>;
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>;
using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>;
using LayoutC = cutlass::layout::RowMajor;
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
cutlass::layout::ColumnMajor,
ElementB,
cutlass::layout::RowMajor,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using MmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
Shape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
Policy
>;
cutlass::HostTensor<ElementC, LayoutC> accumulator_tensor({Shape::kM, Shape::kN});
cutlass::reference::host::TensorFill(accumulator_tensor.host_view(), ElementC(-1));
for (int tid = 0; tid < 1; ++tid) {
typename MmaTensorOp::IteratorC::Fragment accumulator_tile;
CUTLASS_PRAGMA_UNROLL
for (size_t i = 0; i < accumulator_tile.size(); ++i) {
accumulator_tile[i] = static_cast<ElementC>(i);
}
typename MmaTensorOp::IteratorC iterator_C(accumulator_tensor.host_ref(), tid);
iterator_C.store(accumulator_tile);
}
/*
std::ofstream output("volta_mma_f32_64x64x4.csv");
output << accumulator_tensor.host_view() << std::endl;
*/
for (int tid = 0; tid < 1; ++tid) {
typename MmaTensorOp::IteratorC::Fragment accumulator_tile;
using FragmentIterator = cutlass::epilogue::warp::FragmentIteratorVoltaTensorOp<
cutlass::gemm::GemmShape<64, 64, 4>,
cutlass::gemm::GemmShape<32, 32, 4>,
ElementC,
LayoutC
>;
FragmentIterator frag_iterator(accumulator_tile);
for (int iter = 0; iter < FragmentIterator::kIterations; ++iter) {
typename FragmentIterator::Fragment frag;
frag_iterator.load(frag);
++frag_iterator;
#if 0
std::cout << "Iteration: " << iter << " - T" << tid << ": ";
for (int i = 0; i < frag.size(); ++i) {
std::cout << " " << frag[i];
}
std::cout << std::endl;
#endif
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/epilogue/warp/fragment_iterator_volta_tensor_op.cu/0 | {
"file_path": "test/unit/epilogue/warp/fragment_iterator_volta_tensor_op.cu",
"repo_id": "test",
"token_count": 2712
} | 62 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Testbed and host reference for EVT unittest
*/
#pragma once
#include "gemm_testbed_3x.hpp"
namespace test {
namespace gemm {
namespace device {
/// Host-side tapply, tapply in cute is HOST_DEVICE
template <class T, class F, class G, int... I>
constexpr auto
tapply(T&& t, F&& f, G&& g, cute::seq<I...>)
{
return g(f(std::get<I>(static_cast<T&&>(t)))...);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// EVT: Base class for EVT Node
template <
typename Gemm_
>
class HostEVTNodeBase {
public:
using Gemm = Gemm_;
using TestBedImpl = typename detail::TestbedImpl<Gemm, cutlass::epilogue::thread::Identity, true>;
using Kernel = typename Gemm::GemmKernel;
using Epilogue = typename Kernel::CollectiveEpilogue;
using ElementCompute = typename TestBedImpl::ElementCompute;
using ElementScalar = typename TestBedImpl::ElementScalar;
using ElementAccumulator = typename Kernel::ElementAccumulator;
using ElementC = typename Kernel::ElementC;
using ElementD = typename Kernel::ElementD;
using LayoutTagC = typename TestBedImpl::LayoutTagC;
using LayoutTagD = typename TestBedImpl::LayoutTagD;
private:
bool _check_relative_equality;
// Factors used for calculating relative equality. These default
// values are borrowed from those used by default in the CUTLASS
// profiler for performing relative equality checks.
float _epsilon = 0.05f;
float _nonzero_floor = 1.0f / 256.0f;
public:
HostEVTNodeBase(){}
HostEVTNodeBase(bool check_relative_equality):
_check_relative_equality(check_relative_equality) { }
template <
class Element,
class Layout
>
bool equality_check(
cutlass::TensorView<Element, Layout> const& lhs,
cutlass::TensorView<Element, Layout> const& rhs) const {
if (_check_relative_equality) {
return cutlass::reference::host::TensorRelativelyEquals(
lhs, rhs, Element(_epsilon), Element(_nonzero_floor)
);
}
else {
return cutlass::reference::host::TensorEquals(lhs, rhs);
}
}
void* get_tensor_C_ptr() {
return nullptr;
}
void* get_tensor_D_ptr() {
return nullptr;
}
bool compare_reference(std::stringstream& error_ss) {
return true;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// EVT - Accumulator
template <
typename Gemm
>
class HostAccumulator: public HostEVTNodeBase<Gemm> {
public:
using Base = HostEVTNodeBase<Gemm>;
using TestBedImpl = typename Base::TestBedImpl;
using ElementAccumulator = typename Base::ElementAccumulator;
using ElementCompute = typename Base::ElementCompute;
struct Arguments { };
private:
cutlass::NumericConverter<ElementCompute, ElementAccumulator> accumulator_converter;
public:
HostAccumulator(){}
template<typename ProblemShapeType>
HostAccumulator(ProblemShapeType problem_size, TestBedImpl impl, bool check_relative_equality=false)
:Base(check_relative_equality) {}
ElementCompute visit(
int64_t m, int64_t n, int64_t l, int m_b, int n_b,
ElementAccumulator acc) {
return accumulator_converter(acc);
}
Arguments get_arguments() {
return Arguments{};
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// EVT - Scalar Broadcast
template <
typename Gemm,
int Value,
int BroadcastCount = 1,
template <class> class ReductionFn = cutlass::multiplies
>
class HostScalarBroadcast : public HostEVTNodeBase<Gemm> {
public:
using Base = HostEVTNodeBase<Gemm>;
using ElementCompute = typename Base::ElementCompute;
struct Arguments {
ElementCompute scalar[BroadcastCount] = {0};
ElementCompute const* scalar_ptrs[BroadcastCount] = { nullptr };
cute::Stride<cute::_0,cute::_0,cute::_0> dScalar{};
};
private:
ElementCompute _scalar{};
public:
HostScalarBroadcast(){}
template<typename ProblemShapeType, typename TestBedImpl>
HostScalarBroadcast(ProblemShapeType problem_size, TestBedImpl impl, bool check_relative_equality=false)
: Base(check_relative_equality), _scalar(ElementCompute(Value)) {}
template <class ElementAccumulator>
ElementCompute visit(
int64_t m, int64_t n, int64_t l, int m_b, int n_b,
ElementAccumulator acc) {
return _scalar;
}
bool compare_reference(std::stringstream& error_ss) {
error_ss << "Scalar: " << float(_scalar) << "\n\n";
return true;
}
Arguments get_arguments() {
if constexpr (BroadcastCount == 1)
return Arguments{{_scalar}, {nullptr}};
else if constexpr (BroadcastCount == 2)
return Arguments{{_scalar, _scalar}, {nullptr, nullptr}};
else if constexpr (BroadcastCount == 3)
return Arguments{{_scalar, _scalar, _scalar}, {nullptr, nullptr, nullptr}};
else
return Arguments{{_scalar}, {nullptr}};
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// EVT - Row Broadcast
template <
typename Gemm,
typename ElementBias_=void
>
class HostRowBroadcast: public HostEVTNodeBase<Gemm> {
public:
using Base = HostEVTNodeBase<Gemm>;
using ElementBias = std::conditional_t<std::is_void_v<ElementBias_>,
typename Base::ElementC,
ElementBias_>;
using TestBedImpl = typename Base::TestBedImpl;
using ElementCompute = typename Base::ElementCompute;
using LayoutTagVector = cutlass::layout::PackedVectorLayout;
struct Arguments {
ElementBias const* ptr_row = nullptr;
ElementBias null_default = ElementBias(0);
cute::Stride<cute::_0,cute::_1,cute::_0> dRow = {};
};
private:
cutlass::NumericConverter<ElementCompute, ElementBias> _bias_converter;
cutlass::HostTensor<ElementBias, LayoutTagVector> _bias;
int _N;
TestBedImpl impl_;
public:
HostRowBroadcast(){}
template<typename ProblemShapeType>
HostRowBroadcast(ProblemShapeType problem_size, TestBedImpl impl, bool check_relative_equality=false)
: Base(check_relative_equality), impl_(impl) {
auto problem_shape_MNKL = cute::append<4>(problem_size, 1);
_N = cute::get<1>(problem_shape_MNKL);
_bias.resize(cutlass::Coord<1>(_N));
EXPECT_TRUE(
detail::initialize_tensor(
_bias.host_view(), cutlass::Distribution::Uniform,
impl_.collective_mma_inputs.seed + 2023
)
);
_bias.sync_device();
}
template <class ElementAccumulator>
ElementCompute visit(
int64_t m, int64_t n, int64_t l, int m_b, int n_b,
ElementAccumulator acc) {
auto TensorBias = cute::make_tensor(_bias.host_data(),
cute::make_layout(cute::make_shape(cute::_1{}, _N)));
return _bias_converter(TensorBias(1, n + n_b));
}
bool compare_reference(std::stringstream& error_ss) {
error_ss
<< "PerColumnBias = \n" << _bias.host_view() << "\n\n";
return true;
}
Arguments get_arguments() {
return {_bias.device_data()};
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// EVT - Column Broadcast
template <
typename Gemm,
typename ElementBias_=void
>
class HostColBroadcast: public HostEVTNodeBase<Gemm> {
public:
using Base = HostEVTNodeBase<Gemm>;
using ElementBias = std::conditional_t<std::is_void_v<ElementBias_>,
typename Base::ElementC,
ElementBias_>;
using TestBedImpl = typename Base::TestBedImpl;
using ElementCompute = typename Base::ElementCompute;
using LayoutTagVector = cutlass::layout::PackedVectorLayout;
struct Arguments {
ElementBias const* ptr_row = nullptr;
ElementBias null_default = ElementBias(0);
cute::Stride<cute::_1,cute::_0,cute::_0> dRow = {};
};
private:
cutlass::NumericConverter<ElementCompute, ElementBias> _bias_converter;
cutlass::HostTensor<ElementBias, LayoutTagVector> _bias;
int _M;
TestBedImpl impl_;
public:
HostColBroadcast(){}
template<typename ProblemShapeType>
HostColBroadcast(ProblemShapeType problem_size, TestBedImpl impl, bool check_relative_equality=false)
: Base(check_relative_equality), impl_(impl) {
auto problem_shape_MNKL = cute::append<4>(problem_size, 1);
_M = cute::get<0>(problem_shape_MNKL);
_bias.resize(cutlass::Coord<1>(_M));
EXPECT_TRUE(
detail::initialize_tensor(
_bias.host_view(), cutlass::Distribution::Uniform,
impl_.collective_mma_inputs.seed + 2023
)
);
_bias.sync_device();
}
template <class ElementAccumulator>
ElementCompute visit(
int64_t m, int64_t n, int64_t l, int m_b, int n_b,
ElementAccumulator acc) {
auto TensorBias = cute::make_tensor(_bias.host_data(),
cute::make_layout(cute::make_shape(_M, cute::_1{})));
return _bias_converter(TensorBias(m + m_b, 1));
}
bool compare_reference(std::stringstream& error_ss) {
error_ss
<< "PerRowBias = \n" << _bias.host_view() << "\n\n";
return true;
}
Arguments get_arguments() {
return {_bias.device_data()};
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// EVT - Aux Load
template <
typename Gemm,
bool isC=false,
typename ElementAuxLoad_=void,
typename LayoutTagAux_=void
>
class HostAuxLoad: public HostEVTNodeBase<Gemm> {
public:
using ElementAuxLoad = std::conditional_t<std::is_void_v<ElementAuxLoad_>,
typename HostEVTNodeBase<Gemm>::ElementC,
ElementAuxLoad_>;
using LayoutTagAux = std::conditional_t<std::is_void_v<LayoutTagAux_>,
typename HostEVTNodeBase<Gemm>::LayoutTagC,
LayoutTagAux_>;
using Base = HostEVTNodeBase<Gemm>;
using TestBedImpl = typename Base::TestBedImpl;
using ElementCompute = typename Base::ElementCompute;
using StrideAux = cutlass::gemm::TagToStrideC_t<LayoutTagAux>;
struct Arguments_Aux {
ElementAuxLoad const *ptr_aux = nullptr;
ElementAuxLoad null_default = ElementAuxLoad(0);
StrideAux dAux = {};
};
struct Arguments_C {};
using Arguments = cute::conditional_t<isC, Arguments_C, Arguments_Aux>;
private:
cutlass::NumericConverter<ElementCompute, ElementAuxLoad> _aux_load_converter;
cutlass::HostTensor<ElementAuxLoad, LayoutTagAux> _tensor_aux_load;
int _M, _N, _L;
TestBedImpl impl_;
StrideAux _stride_aux;
public:
HostAuxLoad(){}
template<typename ProblemShapeType>
HostAuxLoad(ProblemShapeType problem_size, TestBedImpl impl, bool check_relative_equality=false)
: Base(check_relative_equality), impl_(impl) {
auto problem_shape_NMKL = cute::append<4>(problem_size, 1);
auto [_M, _N, K, _L] = problem_shape_NMKL;
auto aux_coord = cutlass::make_Coord(_M * _L, _N);
_tensor_aux_load.resize(
aux_coord,
cutlass::layout::Affine2Layout_Factory<LayoutTagAux>::layout_factory(
aux_coord, typename LayoutTagAux::Stride()
)
);
EXPECT_TRUE(
detail::initialize_tensor(
_tensor_aux_load.host_view(),
cutlass::Distribution::Uniform,
impl_.collective_mma_inputs.seed + 2023
)
);
_tensor_aux_load.sync_device();
_stride_aux = cutlass::make_cute_packed_stride(StrideAux{}, cute::make_shape(_M, _N, _L));
}
template <class ElementAccumulator>
ElementCompute visit(
int64_t m, int64_t n, int64_t l, int m_b, int n_b,
ElementAccumulator acc) {
auto TensorAuxLoad = cute::make_tensor(_tensor_aux_load.host_data(),
cute::make_layout(cute::make_shape(_M, _N, _L), _stride_aux));
return _aux_load_converter(TensorAuxLoad(m + m_b, n + n_b, l));
}
bool compare_reference(std::stringstream& error_ss) {
if constexpr (!isC) {
error_ss
<< "AuxLoad = \n" << _tensor_aux_load.host_view()<< "\n\n";
}
return true;
}
void* get_tensor_C_ptr() {
if constexpr (isC) {
return static_cast<void*>(_tensor_aux_load.device_data());
} else {
return nullptr;
}
}
Arguments get_arguments() {
if constexpr (isC)
return {};
else
return {_tensor_aux_load.device_data(), ElementAuxLoad(0), _stride_aux};
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// EVT - Compute
template<typename T>
T* findNonNullPtr(T* first_ptr) {
return first_ptr;
}
template <typename T, typename... Args>
T* findNonNullPtr(T* first_ptr, Args... args) {
if (first_ptr) {
return first_ptr;
}
return findNonNullPtr(args...);
}
template <
typename Gemm,
template <class> class ComputeOp_
>
class HostCompute: public HostEVTNodeBase<Gemm> {
public:
using Base = HostEVTNodeBase<Gemm>;
using ElementCompute = typename Base::ElementCompute;
using ComputeOp = ComputeOp_<ElementCompute>;
struct Arguments {
struct OpArgs {} op;
};
private:
ComputeOp _op;
public:
HostCompute(){}
template <typename ProblemShapeType, typename TestBedImpl>
HostCompute(ProblemShapeType problem_size, TestBedImpl impl, bool check_relative_equality=false):
Base(check_relative_equality) { }
template <class ElementAccumulator, typename... Args>
ElementCompute visit(
int64_t m, int64_t n, int64_t l, int m_b, int n_b,
ElementAccumulator acc, Args... frg_inputs) {
return _op(frg_inputs...);
}
Arguments get_arguments(){
return {};
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// EVT - Unary Compute
template <
typename Gemm,
template <class> class ComputeOp_,
typename Child0
>
class HostUnaryCompute: public HostEVTNodeBase<Gemm> {
public:
using Base = HostEVTNodeBase<Gemm>;
using ElementCompute = typename Base::ElementCompute;
using ComputeOp = ComputeOp_<ElementCompute>;
struct Arguments {
typename Child0::Arguments child_0_args;
struct OpArgs {} op;
};
private:
ComputeOp _op;
Child0 _child_0;
public:
HostUnaryCompute(){}
template <typename ProblemShapeType, typename TestBedImpl>
HostUnaryCompute(ProblemShapeType problem_size, TestBedImpl impl, bool check_relative_equality=false):
Base(check_relative_equality),
_child_0(problem_size, impl, check_relative_equality) { }
template <class ElementAccumulator>
ElementCompute visit(
int64_t m, int64_t n, int64_t l, int m_b, int n_b,
ElementAccumulator acc) {
ElementCompute child_0_result = _child_0.visit(m, n, l, m_b, n_b, acc);
return _op(child_0_result);
}
Arguments get_arguments(){
return {
_child_0.get_arguments(),
{},
};
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// EVT - Aux Store
template <
typename Gemm,
bool isD=false,
class ElementAuxStore_=void,
typename LayoutTagAux_=void
>
class HostAuxStore: public HostEVTNodeBase<Gemm> {
public:
using ElementAuxStore = std::conditional_t<std::is_void_v<ElementAuxStore_>,
typename HostEVTNodeBase<Gemm>::ElementD,
ElementAuxStore_>;
using LayoutTagAux = std::conditional_t<std::is_void_v<LayoutTagAux_>,
typename HostEVTNodeBase<Gemm>::LayoutTagD,
LayoutTagAux_>;
using Base = HostEVTNodeBase<Gemm>;
using TestBedImpl = typename Base::TestBedImpl;
using ElementCompute = typename Base::ElementCompute;
using StrideAux = cutlass::gemm::TagToStrideC_t<LayoutTagAux>;
struct Arguments_Aux {
struct OpArgs {
ElementAuxStore* ptr_aux = nullptr;
StrideAux dAux = {};
} op;
};
struct Arguments_D {};
using Arguments = cute::conditional_t<isD, Arguments_D, Arguments_Aux>;
private:
cutlass::NumericConverter<ElementAuxStore, ElementCompute> destination_converter;
cutlass::HostTensor<ElementAuxStore, LayoutTagAux> _tensor_aux_store;
cutlass::HostTensor<ElementAuxStore, LayoutTagAux> _reference_aux_store;
int _M, _N, _L;
TestBedImpl impl_;
StrideAux _stride_aux;
public:
HostAuxStore(){}
template <typename ProblemShapeType>
HostAuxStore(ProblemShapeType problem_size, TestBedImpl impl, bool check_relative_equality=false):
Base(check_relative_equality),
impl_(impl) {
auto problem_shape_MNKL = cute::append<4>(problem_size, 1);
auto [_M, _N, K, _L] = problem_shape_MNKL;
auto aux_coord = cutlass::make_Coord(_M * _L, _N);
_tensor_aux_store.resize(
aux_coord,
cutlass::layout::Affine2Layout_Factory<LayoutTagAux>::layout_factory(
aux_coord, typename LayoutTagAux::Stride()
)
);
_reference_aux_store.resize(
aux_coord,
cutlass::layout::Affine2Layout_Factory<LayoutTagAux>::layout_factory(
aux_coord, typename LayoutTagAux::Stride()
)
);
_tensor_aux_store.sync_device();
_stride_aux = cutlass::make_cute_packed_stride(StrideAux{}, cute::make_shape(_M, _N, _L));
}
template <class ElementAccumulator>
ElementCompute visit(
int64_t m, int64_t n, int64_t l, int m_b, int n_b,
ElementAccumulator acc, ElementCompute child_0_result) {
auto TensorAuxStore = cute::make_tensor(static_cast<ElementAuxStore*>(_reference_aux_store.host_data()),
cute::make_layout(cute::make_shape(_M, _N, _L), _stride_aux));
TensorAuxStore(m + m_b, n + n_b, l) = destination_converter(child_0_result);
return child_0_result;
}
bool compare_reference(std::stringstream& error_ss) {
// Verify the store node
_tensor_aux_store.sync_host();
bool equal = this->equality_check(_reference_aux_store.host_view(), _tensor_aux_store.host_view());
if (!equal) {
error_ss
<< "\n\nReference =\n" << _reference_aux_store.host_view()
<< "\n\nComputed =\n" << _tensor_aux_store.host_view() << "\n\n";
}
return equal;
}
void* get_tensor_D_ptr() {
if constexpr (isD)
return static_cast<void*>(_tensor_aux_store.device_data());
else
return nullptr;
}
Arguments get_arguments() {
if constexpr (isD) {
return {};
} else {
return {_tensor_aux_store.device_data(), _stride_aux};
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// EVT - Row Reduce
template <
typename Gemm,
template <class> class ReduceFn,
typename ElementReduce
>
class HostRowReduce: public HostEVTNodeBase<Gemm> {
public:
using Base = HostEVTNodeBase<Gemm>;
using TestBedImpl = typename Base::TestBedImpl;
using ElementCompute = typename Base::ElementCompute;
using ElementOutput = typename Base::ElementD;
using LayoutTagVector = cutlass::layout::PackedVectorLayout;
struct Arguments {
struct OpArgs {
ElementReduce* ptr_row = nullptr;
ElementCompute reduce_identity = 0;
cute::Stride<cute::_0, cute::_1, cute::_0> dRow = {};
} op;
};
private:
cutlass::NumericConverter<ElementReduce, ElementCompute> destination_converter;
cutlass::HostTensor<ElementReduce, LayoutTagVector> _tensor_row_reduce;
cutlass::HostTensor<ElementCompute, LayoutTagVector> _reduce_buffer;
cutlass::HostTensor<ElementReduce, LayoutTagVector> _reference_row_reduce;
int _N;
TestBedImpl impl_;
ReduceFn<ElementCompute> reduce_fn;
public:
HostRowReduce(){}
template <typename ProblemShapeType>
HostRowReduce(ProblemShapeType problem_size, TestBedImpl impl, bool check_relative_equality=false):
Base(check_relative_equality),
impl_(impl) {
auto problem_shape_MNKL = cute::append<4>(problem_size, 1);
_N = cute::get<1>(problem_shape_MNKL);
_tensor_row_reduce.resize(cutlass::Coord<1>(_N));
_reference_row_reduce.resize(cutlass::Coord<1>(_N));
_reduce_buffer.resize(cutlass::Coord<1>(_N));
_tensor_row_reduce.sync_device();
}
template <class ElementAccumulator>
ElementCompute visit(
int64_t m, int64_t n, int64_t l, int m_b, int n_b,
ElementAccumulator acc, ElementCompute child_0_result) {
auto TensorRowReduce = cute::make_tensor(_reduce_buffer.host_data(),
cute::make_layout(cute::make_shape(cute::_1{}, _N)));
TensorRowReduce(1, n + n_b) = reduce_fn(TensorRowReduce(1, n + n_b), child_0_result);
return child_0_result;
}
bool compare_reference(std::stringstream& error_ss) {
// Verify the store node
_tensor_row_reduce.sync_host();
auto TensorRowReduce = cute::make_tensor(_reference_row_reduce.host_data(),
cute::make_layout(cute::make_shape(cute::_1{}, _N)));
auto TensorReduceBuffer = cute::make_tensor(_reduce_buffer.host_data(),
cute::make_layout(cute::make_shape(cute::_1{}, _N)));
// Filling the reference tensor with the reduce buffer
for (int n = 0; n < _N; n ++) {
TensorRowReduce(1, n) = destination_converter(TensorReduceBuffer(1, n));
}
bool equal = this->equality_check(_reference_row_reduce.host_view(), _tensor_row_reduce.host_view());
if (!equal) {
error_ss
<< "\n\nRow Reduce Reference =\n" << _reference_row_reduce.host_view()
<< "\n\nRow Reduce Computed =\n" << _tensor_row_reduce.host_view() << "\n\n";
}
return equal;
}
Arguments get_arguments() {
return {_tensor_row_reduce.device_data()};
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// EVT - Column Reduce
template <
typename Gemm,
template <class> class ReduceFn,
typename ElementReduce
>
class HostColumnReduce: public HostEVTNodeBase<Gemm> {
public:
using Base = HostEVTNodeBase<Gemm>;
using TestBedImpl = typename Base::TestBedImpl;
using ElementCompute = typename Base::ElementCompute;
using ElementOutput = typename Base::ElementD;
using LayoutTagVector = cutlass::layout::PackedVectorLayout;
struct Arguments {
struct OpArgs {
ElementReduce* ptr_col = nullptr;
ElementCompute reduce_identity = 0;
cute::Stride<cute::_1, cute::_0, cute::_0> dRow = {};
} op;
};
private:
cutlass::NumericConverter<ElementReduce, ElementCompute> destination_converter;
cutlass::HostTensor<ElementReduce, LayoutTagVector> _tensor_column_reduce;
cutlass::HostTensor<ElementCompute, LayoutTagVector> _reduce_buffer;
cutlass::HostTensor<ElementReduce, LayoutTagVector> _reference_column_reduce;
int _M;
TestBedImpl impl_;
ReduceFn<ElementCompute> reduce_fn;
public:
HostColumnReduce(){}
template <typename ProblemShapeType>
HostColumnReduce(ProblemShapeType problem_size, TestBedImpl impl, bool check_relative_equality=false):
Base(check_relative_equality),
impl_(impl) {
auto problem_shape_MNKL = cute::append<4>(problem_size, 1);
_M = cute::get<0>(problem_shape_MNKL);
_tensor_column_reduce.resize(cutlass::Coord<1>(_M));
_reference_column_reduce.resize(cutlass::Coord<1>(_M));
_reduce_buffer.resize(cutlass::Coord<1>(_M));
_tensor_column_reduce.sync_device();
}
template <class ElementAccumulator>
ElementCompute visit(
int64_t m, int64_t n, int64_t l, int m_b, int n_b,
ElementAccumulator acc, ElementCompute child_0_result) {
auto TensorColReduce = cute::make_tensor(_reduce_buffer.host_data(),
cute::make_layout(cute::make_shape(_M, cute::_1{})));
TensorColReduce(m + m_b, 1) = reduce_fn(TensorColReduce(m + m_b, 1), child_0_result);
return child_0_result;
}
bool compare_reference(std::stringstream& error_ss) {
// Verify the store node
_tensor_column_reduce.sync_host();
auto TensorColReduce = cute::make_tensor(_reference_column_reduce.host_data(),
cute::make_layout(cute::make_shape(_M, cute::_1{})));
auto TensorReduceBuffer = cute::make_tensor(_reduce_buffer.host_data(),
cute::make_layout(cute::make_shape(_M, cute::_1{})));
// Filling the reference tensor with the reduce buffer
for (int m = 0; m < _M; m ++) {
TensorColReduce(m, 1) = destination_converter(TensorReduceBuffer(m, 1));
}
bool equal = this->equality_check(_reference_column_reduce.host_view(), _tensor_column_reduce.host_view());
if (!equal) {
error_ss
<< "\n\nColumn Reduce Reference =\n" << _reference_column_reduce.host_view()
<< "\n\nColumn Reduce Computed =\n" << _tensor_column_reduce.host_view() << "\n\n";
}
return equal;
}
Arguments get_arguments() {
return {_tensor_column_reduce.device_data()};
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// EVT - Scalar Reduce
template <
typename Gemm,
template <class> class ReduceFn,
typename ElementReduce
>
class HostScalarReduce: public HostEVTNodeBase<Gemm> {
public:
using Base = HostEVTNodeBase<Gemm>;
using TestBedImpl = typename Base::TestBedImpl;
using ElementCompute = typename Base::ElementCompute;
using ElementOutput = typename Base::ElementD;
using LayoutTagVector = cutlass::layout::PackedVectorLayout;
struct Arguments {
struct OpArgs {
ElementReduce* ptr_scalar = nullptr;
ElementCompute reduce_identity = 0;
cute::Stride<cute::_0, cute::_0, cute::_0> dScalar = {};
} op;
};
private:
cutlass::NumericConverter<ElementReduce, ElementCompute> destination_converter;
cutlass::HostTensor<ElementReduce, LayoutTagVector> _tensor_scalar_reduce;
cutlass::HostTensor<ElementCompute, LayoutTagVector> _reduce_buffer;
cutlass::HostTensor<ElementReduce, LayoutTagVector> _reference_scalar_reduce;
ReduceFn<ElementCompute> reduce_fn;
TestBedImpl impl_;
public:
HostScalarReduce(){}
template <typename ProblemShapeType>
HostScalarReduce(ProblemShapeType problem_size, TestBedImpl impl, bool check_relative_equality=false):
Base(check_relative_equality),
impl_(impl) {
_tensor_scalar_reduce.resize(cutlass::Coord<1>(1));
_reference_scalar_reduce.resize(cutlass::Coord<1>(1));
_reduce_buffer.resize(cutlass::Coord<1>(1));
_tensor_scalar_reduce.sync_device();
}
template <class ElementAccumulator>
ElementCompute visit(
int64_t m, int64_t n, int64_t l, int m_b, int n_b,
ElementAccumulator acc, ElementCompute child_0_result) {
auto TensorRowReduce = cute::make_tensor(_reduce_buffer.host_data(),
cute::make_layout(cute::make_shape(cute::_1{})));
TensorRowReduce(0) = reduce_fn(TensorRowReduce(0), child_0_result);
return child_0_result;
}
bool compare_reference(std::stringstream& error_ss) {
// Verify the store node
_tensor_scalar_reduce.sync_host();
auto TensorRowReduce = cute::make_tensor(_reference_scalar_reduce.host_data(),
cute::make_layout(cute::make_shape(cute::_1{})));
auto TensorReduceBuffer = cute::make_tensor(_reduce_buffer.host_data(),
cute::make_layout(cute::make_shape(cute::_1{})));
// Filling the reference tensor with the reduce buffer
TensorRowReduce(0) = destination_converter(TensorReduceBuffer(0));
bool equal = this->equality_check(_reference_scalar_reduce.host_view(), _tensor_scalar_reduce.host_view());
if (!equal) {
error_ss
<< "\n\nScalar Reduce Reference =\n" << _reference_scalar_reduce.host_view()
<< "\n\nScalar Reduce Computed =\n" << _tensor_scalar_reduce.host_view() << "\n\n";
}
return equal;
}
Arguments get_arguments() {
return {_tensor_scalar_reduce.device_data()};
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Host EVT wrapper
/// The ArgumentPack is used to model the alignment when num ops <= 4
template <typename... Ops>
struct ArgumentPack;
template <typename T>
struct ArgumentPack<T> {
T arg;
ArgumentPack(T first):
arg(first) {}
};
template <typename First, typename... Rest>
struct ArgumentPack<First, Rest...> {
First arg;
ArgumentPack<Rest...> rest_args;
ArgumentPack(First first, Rest... rest) :
arg(first), rest_args(rest...) {}
};
/// Base class for Host Visitor
template <typename Gemm, class... Ops>
struct HostVisitorBase: public HostEVTNodeBase<Gemm> {
public:
using Base = HostEVTNodeBase<Gemm>;
using ElementCompute = typename Base::ElementCompute;
using Arguments_struct = ArgumentPack<typename Ops::Arguments...>;
using Arguments_tuple = cute::tuple<typename Ops::Arguments...>;
constexpr static int Rm1 = sizeof...(Ops);
constexpr static bool cond = Rm1 > 4;
using Arguments = cute::conditional_t<cond, Arguments_tuple, Arguments_struct>;
std::tuple<Ops...> ops;
HostVisitorBase(){}
template<typename ProblemShapeType, typename TestBedImpl>
HostVisitorBase(ProblemShapeType problem_size, TestBedImpl impl, bool check_relative_equality=false)
:Base(check_relative_equality),
ops(test::gemm::device::tapply(std::tuple<Ops...>{},
[&] (auto&& op) {
using Op = cute::remove_cvref_t<decltype(op)>;
return Op(problem_size, impl, check_relative_equality);
},
[] (auto&&... _ops) {
return std::make_tuple(_ops...);
},
cute::make_seq<Rm1>{}
)){ }
bool compare_reference(std::stringstream& error_ss) {
return cute::detail::tapply(ops,
[&](auto& op) {
return op.compare_reference(error_ss);
},
[&] (auto&&... inputs) {
return arrayAnd(inputs...);
},
cute::make_seq<Rm1>{}
);
}
void* get_tensor_C_ptr() {
return cute::detail::tapply(ops,
[&](auto& op) {
return op.get_tensor_C_ptr();
},
[&] (auto&&... inputs) {
return findNonNullPtr(inputs...);
},
cute::make_seq<Rm1>{}
);
}
void* get_tensor_D_ptr() {
return cute::detail::tapply(ops,
[&](auto& op) {
return op.get_tensor_D_ptr();
},
[&] (auto&&... inputs) {
return findNonNullPtr(inputs...);
},
cute::make_seq<Rm1>{}
);
}
Arguments get_arguments() {
return test::gemm::device::tapply(ops,
[&](auto& op) {
return op.get_arguments();
},
[&] (auto&&... args) {
if constexpr (Rm1 > 4) {
return cute::make_tuple(args...);
} else {
return Arguments(args...);
}
},
cute::make_seq<Rm1>{}
);
}
bool arrayAnd(bool passed) {
return passed;
}
template <typename... Args>
bool arrayAnd(bool first_passed, Args... passed) {
if (first_passed) {
return arrayAnd(passed...);
}
return first_passed;
}
};
/// Tree-struct visitor
template <class NodeOp, class... ChildOps>
struct HostTreeVisitor: public HostVisitorBase<typename NodeOp::Base::Gemm, ChildOps..., NodeOp> {
public:
using Gemm = typename NodeOp::Base::Gemm;
using Base = HostVisitorBase<Gemm, ChildOps..., NodeOp>;
using ElementCompute = typename Base::ElementCompute;
using Arguments = typename Base::Arguments;
constexpr static int Rm1 = sizeof...(ChildOps);
HostTreeVisitor(){}
template<typename ProblemShapeType, typename TestBedImpl>
HostTreeVisitor(ProblemShapeType problem_size, TestBedImpl impl, bool check_relative_equality=false)
:Base(problem_size, impl, check_relative_equality){ }
template <class ElementAccumulator>
ElementCompute visit(
int64_t m, int64_t n, int64_t l, int m_b, int n_b,
ElementAccumulator acc) {
return cute::detail::tapply(this->ops,
[&] (auto& op) {
return op.visit(m, n, l, m_b, n_b, acc);
},
[&] (auto&&... frg_inputs) {
return std::get<Rm1>(this->ops).visit(m, n, l, m_b, n_b, acc, frg_inputs...);
},
cute::make_seq<Rm1>{}
);
}
};
/// General Graph visitor
template <class Gemm, class EdgeTuple, class... Ops>
struct HostTopoVisitor: public HostVisitorBase<Gemm, Ops...> {
public:
using Base = HostVisitorBase<Gemm, Ops...>;
using ElementCompute = typename Base::ElementCompute;
constexpr static int Rm1 = Base::Rm1;
using Arguments = typename Base::Arguments;
private:
ElementCompute frg_outputs[Rm1];
public:
HostTopoVisitor(){}
template<typename ProblemShapeType, typename TestBedImpl>
HostTopoVisitor(ProblemShapeType problem_size, TestBedImpl impl, bool check_relative_equality=false)
:Base(problem_size, impl, check_relative_equality) { }
template<class ElementAccumulator, int I>
ElementCompute visit_(
int64_t m, int64_t n, int64_t l, int m_b, int n_b,
ElementAccumulator acc) {
frg_outputs[I] = cute::transform_apply(cute::get<I>(EdgeTuple{}),
[&] (auto&& _E) {
constexpr int e = cute::remove_cvref_t<decltype(_E)>::value;
return frg_outputs[e];
},
[&] (auto const&... frg_inputs) {
ElementCompute res = std::get<I>(this->ops).visit(m, n, l, m_b, n_b, acc, frg_inputs...);
return res;
}
);
if constexpr (I < Rm1 - 1) {
return visit_<ElementAccumulator, I+1>(m, n, l, m_b, n_b, acc);
} else {
return frg_outputs[I];
}
}
template <class ElementAccumulator>
ElementCompute visit(
int64_t m, int64_t n, int64_t l, int m_b, int n_b,
ElementAccumulator acc) {
return visit_<ElementAccumulator, 0>(m, n, l, m_b, n_b, acc);
}
};
/// SplitTree visitor
template <class Gemm, class InputTree, class OutputTree, class... AuxOutTrees>
struct HostSplitTreeVisitor: public HostVisitorBase<Gemm, InputTree, AuxOutTrees..., OutputTree> {
public:
using Base = HostVisitorBase<Gemm, InputTree, AuxOutTrees..., OutputTree>;
using ElementCompute = typename Base::ElementCompute;
using Arguments = typename Base::Arguments;
constexpr static int Rm2 = sizeof...(AuxOutTrees);
private:
ElementCompute frg_input;
public:
HostSplitTreeVisitor(){}
template<typename ProblemShapeType, typename TestBedImpl>
HostSplitTreeVisitor(ProblemShapeType problem_size, TestBedImpl impl, bool check_relative_equality=false)
:Base(problem_size, impl, check_relative_equality) { }
template<class ElementAccumulator, int I>
void visitAux(
int64_t m, int64_t n, int64_t l, int m_b, int n_b,
ElementAccumulator frag) {
std::get<I+1>(this->ops).visit(m, n, l, m_b, n_b, frag);
if constexpr (I < Rm2 - 1) {
return visitAux<ElementAccumulator, I+1>(m, n, l, m_b, n_b, frag);
} else {
return;
}
}
template<class ElementAccumulator>
ElementCompute visit(
int64_t m, int64_t n, int64_t l, int m_b, int n_b,
ElementAccumulator acc) {
/// Compute the input tree
frg_input = std::get<0>(this->ops).visit(m, n, l, m_b, n_b, acc);
/// Compute the aux out tree
visitAux<ElementAccumulator, 0>(m, n, l, m_b, n_b, frg_input);
/// Visit the output tree
return std::get<Rm2+1>(this->ops).visit(m, n, l, m_b, n_b, frg_input);
}
};
/// Universal testbed for EVT
template <class Gemm, typename EVT>
class Testbed3xEVT {
public:
// The EVT Module to test
using EVTModule = typename EVT::EVTModule;
using TestBedImpl = typename detail::TestbedImpl<Gemm, cutlass::epilogue::thread::Identity, true>;
using Kernel = typename Gemm::GemmKernel;
using Epilogue = typename Gemm::GemmKernel::CollectiveEpilogue;
using ElementAccumulator = typename Kernel::ElementAccumulator;
using ElementC = typename Kernel::ElementC;
using ElementD = typename Kernel::ElementD;
using ProblemShapeType = typename Kernel::ProblemShape;
using LayoutTagA = typename TestBedImpl::LayoutTagA;
using LayoutTagB = typename TestBedImpl::LayoutTagB;
using LayoutTagC = typename TestBedImpl::LayoutTagC;
using LayoutTagD = typename TestBedImpl::LayoutTagD;
//
// Methods
//
Testbed3xEVT(
bool check_relative_equality_,
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint64_t seed_ = TestBedImpl::kDefaultSeed
) :
impl_((check_relative_equality_ ? CheckEquality::RELATIVE : CheckEquality::EXACT), ScalarLoc::ON_DEVICE, VectorBeta::ENABLED,
init_A_, init_B_, init_C_, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform, seed_),
check_relative_equality(check_relative_equality_) { }
Testbed3xEVT(
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint64_t seed_ = TestBedImpl::kDefaultSeed
) :
impl_(CheckEquality::EXACT, ScalarLoc::ON_DEVICE, VectorBeta::ENABLED,
init_A_, init_B_, init_C_, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform, seed_),
check_relative_equality(false) { }
Testbed3xEVT(
typename LayoutTagA::Stride stride_factor_A_,
typename LayoutTagB::Stride stride_factor_B_,
typename LayoutTagC::Stride stride_factor_C_,
typename LayoutTagD::Stride stride_factor_D_,
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint64_t seed_ = TestBedImpl::kDefaultSeed
) :
impl_(stride_factor_A_, stride_factor_B_, stride_factor_C_, stride_factor_D_,
CheckEquality::EXACT, ScalarLoc::ON_DEVICE, VectorBeta::ENABLED,
init_A_, init_B_, init_C_, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform, seed_),
check_relative_equality(false) { }
/// Initializes data structures
void initialize(ProblemShapeType problem_size) {
//
// Allocate the GEMM workspace for A/B tensor
//
impl_.initialize(problem_size);
}
// Detail Implementation
TestBedImpl impl_;
// Whether to use relative equality checks
bool check_relative_equality;
bool verify(ProblemShapeType problem_size, EVTModule& host_reference) {
auto problem_shape_MNKL = cute::append<4>(problem_size, 1);
auto M = cute::get<0>(problem_shape_MNKL);
auto N = cute::get<1>(problem_shape_MNKL);
auto K = cute::get<2>(problem_shape_MNKL);
auto L = cute::get<3>(problem_shape_MNKL);
auto A = cute::make_tensor(impl_.collective_mma_inputs.tensor_A.host_data(),
cute::make_layout(cute::make_shape(M, K, L), impl_.collective_mma_inputs.stride_a));
auto B = cute::make_tensor(impl_.collective_mma_inputs.tensor_B.host_data(),
cute::make_layout(cute::make_shape(N, K, L), impl_.collective_mma_inputs.stride_b));
auto LayoutD = cute::make_layout(cute::make_shape(M, N, L), impl_.collective_epilogue.stride_d);
cutlass::reference::host::GettMainloopParams<ElementAccumulator, decltype(A), decltype(B)> mainloop_params{A, B};
/// Reference Kernel
static int constexpr kBlockM = 64;
static int constexpr kBlockN = 64;
#if defined(_OPENMP)
#pragma omp parallel for collapse(3)
#endif
for (int64_t l = 0; l < cute::size<2>(mainloop_params.A.layout()); ++l) {
for (int64_t m = 0; m < cute::size<0>(mainloop_params.A.layout()); m += kBlockM) {
for (int64_t n = 0; n < cute::size<0>(mainloop_params.B.layout()); n += kBlockN) {
ElementAccumulator acc[kBlockM][kBlockN];
gett_mainloop(mainloop_params, m, n, l, acc);
/// Epilogue EVT
for (int n_b = 0; n_b < kBlockN; ++n_b) {
for (int m_b = 0; m_b < kBlockM; ++m_b) {
if (m + m_b < cute::size<0>(LayoutD) && n + n_b < cute::size<1>(LayoutD)) {
host_reference.visit(m, n, l, m_b, n_b, acc[m_b][n_b]);
}
}
}
}
}
}
std::stringstream error_ss;
bool passed = host_reference.compare_reference(error_ss);
if (!passed) {
std::stringstream fname;
fname << "error_Gemm_device_"
<< M << "x" << N << "x" << K << "x" << L << "_"
<< cute::get<0>(typename Gemm::GemmKernel::TileShape{}) << "_"
<< cute::get<1>(typename Gemm::GemmKernel::TileShape{}) << "_"
<< cute::get<2>(typename Gemm::GemmKernel::TileShape{}) << ".txt";
std::ofstream file(fname.str());
file
<< "problem: " << ' ' << M << "x" << N << "x" << K
<< ", Batch count = " << L << "\n\n";
file
<< "A =\n" << impl_.collective_mma_inputs.tensor_A.host_view()
<< "\nB =\n" << impl_.collective_mma_inputs.tensor_B.host_view()
<< "\nC =\n" << impl_.collective_epilogue.tensor_C.host_view() << "\n\n";
file << error_ss.str();
}
return passed;
}
bool run(
ProblemShapeType problem_size,
bool profiling = false,
int iterations = 20,
int splits = 1) {
// Fail test if insufficient CUDA device
if (!impl_.sufficient()) {
std::cout << "Test failed due to insufficient CUDA device." << std::endl;
return false;
}
//
// Initialize the Gemm operator
//
typename Gemm::Arguments arguments;
cutlass::KernelHardwareInfo hw_info;
hw_info.device_id = 0;
if (not profiling) {
impl_.sm_count = std::min(impl_.MaxSmCount, cutlass::KernelHardwareInfo::query_device_multiprocessor_count(hw_info.device_id));
hw_info.sm_count = impl_.sm_count;
}
else {
impl_.sm_count = cutlass::KernelHardwareInfo::query_device_multiprocessor_count(hw_info.device_id);
hw_info.sm_count = impl_.sm_count;
}
typename Gemm::GemmKernel::TileScheduler::Arguments scheduler_args;
if constexpr (cute::is_same_v<typename Gemm::GemmKernel::TileSchedulerTag, cutlass::gemm::StreamKScheduler>) {
scheduler_args = { splits };
}
/// Initializes data structures
/// A/B/C/D Tensor
initialize(problem_size);
/// Initialize the epilogue arguments
EVTModule host_reference(problem_size, impl_, check_relative_equality);
arguments = typename Gemm::Arguments{
cutlass::gemm::GemmUniversalMode::kGemm,
problem_size,
{
impl_.collective_mma_inputs.tensor_A.device_data(), impl_.collective_mma_inputs.stride_a,
impl_.collective_mma_inputs.tensor_B.device_data(), impl_.collective_mma_inputs.stride_b
},
{ // Epilogue arguments
{}, // thread
static_cast<ElementC*>(host_reference.get_tensor_C_ptr()),
impl_.collective_epilogue.stride_c,
static_cast<ElementD*>(host_reference.get_tensor_D_ptr()),
impl_.collective_epilogue.stride_d
}, // Epilogue arguments end
hw_info,
scheduler_args
};
// Filling in the thread arguments
typename EVTModule::Arguments epilogue_args = host_reference.get_arguments();
std::memcpy(&arguments.epilogue.thread, &epilogue_args.arg, sizeof(epilogue_args.arg));
Gemm gemm_op;
size_t workspace_size = Gemm::get_workspace_size(arguments);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
cutlass::Status status = gemm_op.can_implement(arguments);
if (status != cutlass::Status::kSuccess) {
cudaError_t error = cudaGetLastError();
std::cerr << "This test is not supported: " << cudaGetErrorString(error) << "\n";
return true;
}
//
// Run the GEMM
//
if (profiling) {
return impl_.profile(problem_size, iterations, gemm_op, arguments, workspace);
}
else {
cudaError_t result;
status = gemm_op.initialize(arguments, workspace.get());
status = gemm_op.run();
result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
EXPECT_EQ(result, cudaSuccess) << "Error at Kernel Sync.";
return false;
}
}
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
//
// Verify
//
bool passed = this->verify(problem_size, host_reference);
if (!passed) {
std::cout << "Error : Failed \n";
}
return passed;
}
};
template <typename Gemm, typename EVT>
bool TestAllEVT(bool check_relative_equality=false) {
using ProblemShapeType = typename Gemm::GemmKernel::ProblemShape;
int max_alignment = std::max(Gemm::kAlignmentA, Gemm::kAlignmentB);
std::vector<int> problem_size_m = {max_alignment, 512 - 3 * max_alignment};
std::vector<int> problem_size_n = {max_alignment, 512 - 2 * max_alignment};
if constexpr (cute::is_same_v<typename Gemm::GemmKernel::DispatchPolicy::Schedule,
cutlass::gemm::KernelTmaWarpSpecializedPingpong>) {
problem_size_m.push_back(768);
problem_size_n.push_back(768);
}
constexpr int Stages = Gemm::GemmKernel::DispatchPolicy::Stages;
constexpr int TileShapeK = cute::size<2>(typename Gemm::GemmKernel::TileShape{});
std::vector<int> problem_size_k = {max_alignment, TileShapeK * (Stages + 1) - max_alignment};
Testbed3xEVT<Gemm, EVT> testbed(check_relative_equality);
bool passed = true;
for (int m : problem_size_m) {
for (int n : problem_size_n) {
for (int k : problem_size_k) {
ProblemShapeType problem_size;
if constexpr (cute::rank(ProblemShapeType{}) == 4) {
problem_size = ProblemShapeType{m, n, k, /* l */ 1};
}
else {
problem_size = ProblemShapeType{m, n, k};
}
passed = testbed.run(problem_size);
if (!passed) {
return false;
}
}
}
}
// if we do support batched GEMM, just run one test on it to save on test time
if constexpr (cute::rank(ProblemShapeType{}) == 4) {
auto problem_size = ProblemShapeType{256 + max_alignment, 256 + max_alignment, 160 + max_alignment, /* l */ 3};
passed = testbed.run(
problem_size
);
if (!passed) {
return false;
}
}
return passed;
}
} // namespace device
} // namespace gemm
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/gemm/device/gemm_testbed_3x_evt.hpp/0 | {
"file_path": "test/unit/gemm/device/gemm_testbed_3x_evt.hpp",
"repo_id": "test",
"token_count": 18157
} | 63 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide GEMM interface
*/
#include <iostream>
#include "cutlass/cutlass.h"
#include "cute/tensor.hpp"
#include "cute/atom/mma_atom.hpp"
#include "cutlass/numeric_types.h"
#include "cutlass/gemm/device/gemm_universal_adapter.h"
#include "cutlass/gemm/kernel/gemm_universal.hpp"
#include "cutlass/gemm/collective/collective_builder.hpp"
#include "cutlass/epilogue/collective/collective_builder.hpp"
#include "../../common/cutlass_unit_test.h"
#include "gemm_testbed_3x.hpp"
#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
using namespace cute;
///////////////////////////////////////////////////////////////////////////////
//////////////////////////////// output: E4M3 /////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
///////////////////////////// e4m3 = e4m3 * e4m3 (TN) /////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e4m3t_e4m3n_e4m3n_tensor_op_gmma_f32, 64x128x128) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct<
cutlass::epilogue::thread::Identity, cutlass::float_e4m3_t, float, float>;
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e4m3_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////// e4m3 = e5m2 * e4m3 (TN) /////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e5m2t_e4m3n_e4m3n_tensor_op_gmma_f32, 64x128x128) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct<
cutlass::epilogue::thread::Identity, cutlass::float_e4m3_t, float, float>;
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e5m2_t, LayoutA, 16,
cutlass::float_e4m3_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////// e4m3 = e4m3 * e5m2 (TN) /////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e4m3t_e5m2n_e4m3n_tensor_op_gmma_f32, 64x128x128) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct<
cutlass::epilogue::thread::Identity, cutlass::float_e4m3_t, float, float>;
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e5m2_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////// Cluster 2x2x1 //////////////////////////////
///////////////////////////// e4m3 = e4m3 * e4m3 (TN) /////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e4m3t_e4m3n_e4m3n_tensor_op_gmma_f32, 64x128x128_2x2x1) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct<
cutlass::epilogue::thread::Identity, cutlass::float_e4m3_t, float, float>;
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_2,_2,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e4m3_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_2,_2,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////// Cluster 1x4x1 //////////////////////////////
///////////////////////////// e4m3 = e4m3 * e4m3 (TN) /////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e4m3t_e4m3n_e4m3n_tensor_op_gmma_f32, 64x128x128_1x4x1) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct<
cutlass::epilogue::thread::Identity, cutlass::float_e4m3_t, float, float>;
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_1,_4,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e4m3_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_1,_4,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////// Cluster 4x1x1 //////////////////////////////
///////////////////////////// e4m3 = e4m3 * e4m3 (TN) /////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e4m3t_e4m3n_e4m3n_tensor_op_gmma_f32, 64x128x128_4x1x1) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct<
cutlass::epilogue::thread::Identity, cutlass::float_e4m3_t, float, float>;
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_4,_1,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e4m3_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_4,_1,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////// Cluster 2x4x1 //////////////////////////////
///////////////////////////// e4m3 = e4m3 * e4m3 (TN) /////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e4m3t_e4m3n_e4m3n_tensor_op_gmma_f32, 64x128x128_2x4x1) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct<
cutlass::epilogue::thread::Identity, cutlass::float_e4m3_t, float, float>;
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_2,_4,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e4m3_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_2,_4,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
//////////////////////////////// output: E5M2 /////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
///////////////////////////// e5m2 = e4m3 * e4m3 (TN) /////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e4m3t_e4m3n_e5m2n_tensor_op_gmma_f32, 64x128x128) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct<
cutlass::epilogue::thread::Identity, cutlass::float_e5m2_t, float, float>;
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t),
cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e4m3_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////// e5m2 = e5m2 * e4m3 (TN) /////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e5m2t_e4m3n_e5m2n_tensor_op_gmma_f32, 64x128x128) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct<
cutlass::epilogue::thread::Identity, cutlass::float_e5m2_t, float, float>;
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t),
cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e5m2_t, LayoutA, 16,
cutlass::float_e4m3_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////// e5m2 = e4m3 * e5m2 (TN) /////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e4m3t_e5m2n_e5m2n_tensor_op_gmma_f32, 64x128x128) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct<
cutlass::epilogue::thread::Identity, cutlass::float_e5m2_t, float, float>;
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t),
cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e5m2_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////// Cluster 2x2x1 //////////////////////////////
///////////////////////////// e5m2 = e4m3 * e4m3 (TN) /////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e4m3t_e4m3n_e5m2n_tensor_op_gmma_f32, 64x128x128_2x2x1) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct<
cutlass::epilogue::thread::Identity, cutlass::float_e5m2_t, float, float>;
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_2,_2,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t),
cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e4m3_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_2,_2,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////// Cluster 1x4x1 //////////////////////////////
///////////////////////////// e5m2 = e4m3 * e4m3 (TN) /////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e4m3t_e4m3n_e5m2n_tensor_op_gmma_f32, 64x128x128_1x4x1) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct<
cutlass::epilogue::thread::Identity, cutlass::float_e5m2_t, float, float>;
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_1,_4,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t),
cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e4m3_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_1,_4,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////// Cluster 4x1x1 //////////////////////////////
///////////////////////////// e5m2 = e4m3 * e4m3 (TN) /////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e4m3t_e4m3n_e5m2n_tensor_op_gmma_f32, 64x128x128_4x1x1) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct<
cutlass::epilogue::thread::Identity, cutlass::float_e5m2_t, float, float>;
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_4,_1,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t),
cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e4m3_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_4,_1,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////// Cluster 2x4x1 //////////////////////////////
///////////////////////////// e5m2 = e4m3 * e4m3 (TN) /////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e4m3t_e4m3n_e5m2n_tensor_op_gmma_f32, 64x128x128_2x4x1) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct<
cutlass::epilogue::thread::Identity, cutlass::float_e5m2_t, float, float>;
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_2,_4,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t),
cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e4m3_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_2,_4,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////// Cluster 2x4x1 //////////////////////////////
///////////////////////////// e5m2 = e4m3 * e4m3 (TN) /////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e4m3t_e4m3n_e5m2n_tensor_op_gmma_f32, 64x128x128_2x4x1_persistent) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct<
cutlass::epilogue::thread::Identity, cutlass::float_e5m2_t, float, float>;
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_2,_4,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t),
cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e4m3_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_2,_4,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::KernelTmaWarpSpecializedPingpong
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////// Cluster 2x4x1 //////////////////////////////
///////////////////////////// e5m2 = e4m3 * e4m3 (TN) /////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e4m3t_e4m3n_e5m2n_tensor_op_gmma_f32, 64x128x128_2x4x1_non_warpspecialized) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct<
cutlass::epilogue::thread::Identity, cutlass::float_e5m2_t, float, float>;
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_2,_4,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t),
cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e4m3_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_2,_4,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
// Use Hopper FP8+AUX from 12.1
#if (!((__CUDACC_VER_MAJOR__ == 12) && (__CUDACC_VER_MINOR__ == 0)))
///////////////////////////////////////////////////////////////////////////////
///////////////////////// output: E4M3 + Aux Tensor ///////////////////////////
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
///////////////////////////// e4m3 = e4m3 * e4m3 (TN) /////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e4m3t_e4m3n_e4m3n_tensor_op_gmma_f32, 64x128x128_aux_tensor_e4m3) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltActAmaxAux<
LayoutC, cutlass::epilogue::thread::Identity, cutlass::float_e4m3_t, float, cutlass::float_e4m3_t>;
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e4m3_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
#endif
///////////////////////////////////////////////////////////////////////////////
////////////////////////////////// FP8 Accum /////////////////////////////////
///////////////////////////// e5m2 = e4m3 * e4m3 (TN) /////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e4m3t_e4m3n_e5m2n_tensor_op_gmma_f32, 64x128x128_2x4x1_persistent_fp8_fast_accum) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct<
cutlass::epilogue::thread::Identity, cutlass::float_e5m2_t, float, float>;
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_2,_4,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t),
cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e4m3_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_2,_4,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::KernelTmaWarpSpecializedPingpongFP8FastAccum
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e4m3t_e4m3n_e5m2n_tensor_op_gmma_f32, 64x128x128_2x4x1_fp8_fast_accum) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct<
cutlass::epilogue::thread::Identity, cutlass::float_e5m2_t, float, float>;
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_2,_4,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t),
cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e4m3_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_2,_4,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::KernelTmaWarpSpecializedFP8FastAccum
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
////////////////////////// output: E4M3 + Bias ///////////////////////////////
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
///////////////////////////// e4m3 = e4m3 * e4m3 (TN) /////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e4m3t_e4m3n_e4m3n_tensor_op_gmma_f32, 64x128x128_bias_bf16) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct<
cutlass::epilogue::thread::Identity, cutlass::float_e4m3_t, float, cutlass::bfloat16_t>;
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e4m3_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
////////////////////////// output: E4M3 + Bias + Relu ////////////////////////
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
///////////////////////////// e4m3 = e4m3 * e4m3 (TN) /////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e4m3t_e4m3n_e4m3n_tensor_op_gmma_f32, 64x128x128_bias_bf16_relu) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct<
cutlass::epilogue::thread::ReLu, cutlass::float_e4m3_t, float, cutlass::bfloat16_t>;
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e4m3_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
// Use Hopper FP8+AUX from 12.1
#if (!((__CUDACC_VER_MAJOR__ == 12) && (__CUDACC_VER_MINOR__ == 0)))
///////////////////////////////////////////////////////////////////////////////
///////////////////// output: E4M3 + Aux Tensor + Bias/////////////////////////
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
///////////////////////////// e4m3 = e4m3 * e5m2 (TN) /////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e4m3t_e5m2n_e4m3n_tensor_op_gmma_f32, 64x128x128_aux_tensor_f16_bias_f16) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltActAmaxAux<
LayoutC, cutlass::epilogue::thread::Identity,
cutlass::float_e4m3_t, // ElementOutput
float, // ElementCompute
cutlass::half_t, // ElementAux
float, // ElementAmax
cutlass::half_t>; // ElementBias
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e5m2_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
///////////////////// output: E4M3 + Aux Tensor + Bias + Relu/////////////////////////
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
///////////////////////////// e4m3 = e4m3 * e5m2 (TN) /////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e4m3t_e5m2n_e4m3n_tensor_op_gmma_f32, 64x128x128_aux_tensor_f16_relu) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltActAmaxAux<
LayoutC, cutlass::epilogue::thread::ReLu,
cutlass::float_e4m3_t, // ElementOutput
float, // ElementCompute
cutlass::half_t, // ElementAux
float, // ElementAmax
float>; // ElementBias
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e5m2_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////// e4m3 = e4m3 * e5m2 (TN) /////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e4m3t_e5m2n_e4m3n_tensor_op_gmma_f32, 64x128x128_aux_tensor_f16_bias_f16_relu) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltActAmaxAux<
LayoutC, cutlass::epilogue::thread::ReLu,
cutlass::float_e4m3_t, // ElementOutput
float, // ElementCompute
cutlass::half_t, // ElementAux
float, // ElementAmax
cutlass::half_t>; // ElementBias
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e5m2_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
#endif
///////////////////////////////////////////////////////////////////////////////
//////////////////////////////// TMA epilogue /////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e4m3t_e4m3n_e4m3n_tensor_op_gmma_f32, 64x128x128_tma_epilogue) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e4m3_t, LayoutC, 16,
cutlass::float_e4m3_t, LayoutC, 16,
cutlass::epilogue::TmaWarpSpecialized
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e4m3_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::KernelTmaWarpSpecializedPingpong
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>());
}
TEST(SM90_Device_Gemm_e4m3t_e4m3n_e4m3t_tensor_op_gmma_f32, 64x128x128_tma_epilogue) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e4m3_t, LayoutC, 16,
cutlass::float_e4m3_t, LayoutC, 16,
cutlass::epilogue::TmaWarpSpecialized
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e4m3_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::KernelTmaWarpSpecializedPingpong
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>());
}
#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
| test/unit/gemm/device/sm90_gemm_f8_f8_f8_tensor_op_fp32.cu/0 | {
"file_path": "test/unit/gemm/device/sm90_gemm_f8_f8_f8_tensor_op_fp32.cu",
"repo_id": "test",
"token_count": 20775
} | 64 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide GEMM interface
*/
#pragma once
#include <iostream>
#include <fstream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/kernel/gemm_grouped.h"
#include "cutlass/gemm/kernel/default_gemm_grouped.h"
#include "cutlass/gemm/device/gemm_grouped.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/gemm_complex.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/tensor_view_io.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Gemm>
struct TestbedGrouped {
//
// Type definitions
//
using ElementA = typename Gemm::ElementA;
using ElementB = typename Gemm::ElementB;
using ElementC = typename Gemm::ElementC;
using ElementAccumulator = typename Gemm::ElementAccumulator;
using EpilogueOutputOp = typename Gemm::GemmKernel::Epilogue::OutputOp;
using ElementCompute = typename EpilogueOutputOp::ElementCompute;
using LayoutA = typename Gemm::LayoutA;
using LayoutB = typename Gemm::LayoutB;
using LayoutC = typename Gemm::LayoutC;
using MatrixCoord = typename LayoutC::TensorCoord;
//
// Data members
//
/// Initialization
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::Distribution::Kind init_C;
uint32_t seed;
int problem_count;
std::vector<cutlass::gemm::GemmCoord> problem_sizes_host;
cutlass::DeviceAllocation<cutlass::gemm::GemmCoord> problem_sizes_device;
std::vector<int64_t> offset_A;
std::vector<int64_t> offset_B;
std::vector<int64_t> offset_C;
std::vector<int64_t> offset_D;
std::vector<int64_t> lda_host;
std::vector<int64_t> ldb_host;
std::vector<int64_t> ldc_host;
std::vector<int64_t> ldd_host;
cutlass::DeviceAllocation<int64_t> lda;
cutlass::DeviceAllocation<int64_t> ldb;
cutlass::DeviceAllocation<int64_t> ldc;
cutlass::DeviceAllocation<int64_t> ldd;
cutlass::DeviceAllocation<ElementA> block_A;
cutlass::DeviceAllocation<ElementB> block_B;
cutlass::DeviceAllocation<ElementC> block_C;
cutlass::DeviceAllocation<ElementC> block_D;
cutlass::DeviceAllocation<ElementA *> ptr_A;
cutlass::DeviceAllocation<ElementB *> ptr_B;
cutlass::DeviceAllocation<ElementC *> ptr_C;
cutlass::DeviceAllocation<ElementC *> ptr_D;
//
// Methods
//
TestbedGrouped(
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint32_t seed_ = 3080
):
init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { }
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
bool initialize_tensor(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint32_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
double scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
int bits_output = cutlass::sizeof_bits<typename Gemm::ElementC>::value;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
} else if (bits_input <= 8) {
scope_max = 2;
scope_min = -2;
} else if (bits_output == 16) {
if (cutlass::sizeof_bits<ElementAccumulator>::value <= 16) {
scope_max = 5;
scope_min = -5;
}
else {
scope_max = 8;
scope_min = -8;
}
} else {
scope_max = 8;
scope_min = -8;
}
cutlass::reference::host::TensorFillRandomUniform(
view, seed, scope_max, scope_min, 0);
}
else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5);
}
else if (dist_kind == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(
view.data(), view.capacity());
}
else {
// no fill - remain zero
}
return true;
}
/// Initializes data structures
void initialize() {
//
// Choose random problem sizes
//
// construct a few problems of random sizes
srand(seed);
int64_t total_elements_A = 0;
int64_t total_elements_B = 0;
int64_t total_elements_C = 0;
int64_t total_elements_D = 0;
lda_host.resize(problem_count);
ldb_host.resize(problem_count);
ldc_host.resize(problem_count);
ldd_host.resize(problem_count);
problem_sizes_host.clear();
problem_sizes_host.resize(problem_count);
for (int32_t i = 0; i < problem_count; ++i) {
cutlass::gemm::GemmCoord problem(
8 * (rand() % 64) + 24,
8 * (rand() % 64) + 24,
8 * (rand() % 64) + 24);
if (!i) {
problem = cutlass::gemm::GemmCoord(48, 16, 8);
}
problem_sizes_host.at(i) = problem;
// std::cout << "Problem[" << i << "]: " << problem << std::endl;
lda_host.at(i) = LayoutA::packed({problem.m(), problem.k()}).stride(0);
ldb_host.at(i) = LayoutB::packed({problem.k(), problem.n()}).stride(0);
ldc_host.at(i) = LayoutC::packed({problem.m(), problem.n()}).stride(0);
ldd_host.at(i) = LayoutC::packed({problem.m(), problem.n()}).stride(0);
offset_A.push_back(total_elements_A);
offset_B.push_back(total_elements_B);
offset_C.push_back(total_elements_C);
offset_D.push_back(total_elements_D);
int64_t elements_A = problem.m() * problem.k();
int64_t elements_B = problem.k() * problem.n();
int64_t elements_C = problem.m() * problem.n();
int64_t elements_D = problem.m() * problem.n();
total_elements_A += elements_A;
total_elements_B += elements_B;
total_elements_C += elements_C;
total_elements_D += elements_D;
// Random strides between problems?
}
problem_sizes_device.reset(problem_count);
problem_sizes_device.copy_from_host(problem_sizes_host.data());
lda.reset(problem_count);
ldb.reset(problem_count);
ldc.reset(problem_count);
ldd.reset(problem_count);
lda.copy_from_host(lda_host.data());
ldb.copy_from_host(ldb_host.data());
ldc.copy_from_host(ldc_host.data());
ldd.copy_from_host(ldd_host.data());
//
// Assign pointers
//
block_A.reset(total_elements_A);
block_B.reset(total_elements_B);
block_C.reset(total_elements_C);
block_D.reset(total_elements_D);
std::vector<ElementA *> ptr_A_host(problem_count);
std::vector<ElementB *> ptr_B_host(problem_count);
std::vector<ElementC *> ptr_C_host(problem_count);
std::vector<ElementC *> ptr_D_host(problem_count);
for (int32_t i = 0; i < problem_count; ++i) {
ptr_A_host.at(i) = block_A.get() + offset_A.at(i);
ptr_B_host.at(i) = block_B.get() + offset_B.at(i);
ptr_C_host.at(i) = block_C.get() + offset_C.at(i);
ptr_D_host.at(i) = block_D.get() + offset_D.at(i);
}
ptr_A.reset(problem_count);
ptr_A.copy_from_host(ptr_A_host.data());
ptr_B.reset(problem_count);
ptr_B.copy_from_host(ptr_B_host.data());
ptr_C.reset(problem_count);
ptr_C.copy_from_host(ptr_C_host.data());
ptr_D.reset(problem_count);
ptr_D.copy_from_host(ptr_D_host.data());
//
// Initialize the problems of the workspace
//
for (int32_t i = 0; i < problem_count; ++i) {
cutlass::gemm::GemmCoord problem = problem_sizes_host.at(i);
LayoutA layout_A(lda_host.at(i));
LayoutB layout_B(ldb_host.at(i));
LayoutC layout_C(ldc_host.at(i));
LayoutC layout_D(ldd_host.at(i));
MatrixCoord extent_A{problem.m(), problem.k()};
MatrixCoord extent_B{problem.k(), problem.n()};
MatrixCoord extent_C{problem.m(), problem.n()};
std::vector<ElementA> matrix_A(layout_A.capacity(extent_A));
std::vector<ElementB> matrix_B(layout_B.capacity(extent_B));
std::vector<ElementC> matrix_C(layout_C.capacity(extent_C));
std::vector<ElementC> matrix_D(layout_D.capacity(extent_C));
initialize_tensor(cutlass::TensorView<ElementA, LayoutA>(matrix_A.data(), layout_A, extent_A), init_A, seed * 2021);
initialize_tensor(cutlass::TensorView<ElementB, LayoutB>(matrix_B.data(), layout_B, extent_B), init_B, seed * 2022);
initialize_tensor(cutlass::TensorView<ElementC, LayoutC>(matrix_C.data(), layout_C, extent_C), init_C, seed * 2023);
cutlass::device_memory::copy_to_device(ptr_A_host.at(i), matrix_A.data(), matrix_A.size());
cutlass::device_memory::copy_to_device(ptr_B_host.at(i), matrix_B.data(), matrix_B.size());
cutlass::device_memory::copy_to_device(ptr_C_host.at(i), matrix_C.data(), matrix_C.size());
cutlass::device_memory::copy_to_device(ptr_D_host.at(i), matrix_D.data(), matrix_D.size());
}
}
/// Verifies the result is a GEMM
bool verify(
ElementCompute alpha,
ElementCompute beta) {
bool passed = true;
for (int32_t i = 0; i < problem_count; ++i) {
cutlass::gemm::GemmCoord problem = problem_sizes_host.at(i);
LayoutA layout_A(lda_host.at(i));
LayoutB layout_B(ldb_host.at(i));
LayoutC layout_C(ldc_host.at(i));
LayoutC layout_D(ldd_host.at(i));
MatrixCoord extent_A{problem.m(), problem.k()};
MatrixCoord extent_B{problem.k(), problem.n()};
MatrixCoord extent_C{problem.m(), problem.n()};
std::vector<ElementA> matrix_A(layout_A.capacity(extent_A));
std::vector<ElementB> matrix_B(layout_B.capacity(extent_B));
std::vector<ElementC> matrix_C(layout_C.capacity(extent_C));
std::vector<ElementC> matrix_D(layout_D.capacity(extent_C));
std::vector<ElementC> matrix_Ref(layout_D.capacity(extent_C));
cutlass::device_memory::copy_to_host(matrix_A.data(), block_A.get() + offset_A.at(i), matrix_A.size());
cutlass::device_memory::copy_to_host(matrix_B.data(), block_B.get() + offset_B.at(i), matrix_B.size());
cutlass::device_memory::copy_to_host(matrix_C.data(), block_C.get() + offset_C.at(i), matrix_C.size());
cutlass::device_memory::copy_to_host(matrix_D.data(), block_D.get() + offset_D.at(i), matrix_D.size());
cutlass::TensorView<ElementA, LayoutA> view_A(matrix_A.data(), layout_A, extent_A);
cutlass::TensorView<ElementB, LayoutB> view_B(matrix_B.data(), layout_B, extent_B);
cutlass::TensorView<ElementC, LayoutC> view_C(matrix_C.data(), layout_C, extent_C);
cutlass::TensorView<ElementC, LayoutC> view_D(matrix_D.data(), layout_D, extent_C);
cutlass::TensorView<ElementC, LayoutC> view_Ref(matrix_Ref.data(), layout_D, extent_C);
// Reference GEMM
cutlass::reference::host::GemmComplex<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementCompute, ElementAccumulator
>(
problem,
alpha,
view_A,
Gemm::kTransformA,
view_B,
Gemm::kTransformB,
beta,
view_C,
view_Ref,
ElementAccumulator(0)
);
// Ensure that no input or output is entirely zero
EXPECT_GT(cutlass::reference::host::TensorNorm(view_A), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(view_B), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(view_C), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(view_D), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(view_Ref), 0);
// Compare against reference
passed = cutlass::reference::host::TensorEquals(view_D, view_Ref);
if (!passed) {
std::ofstream file("testbed_grouped_errors.txt");
file
<< "problem: " << problem << " [group: " << i << "]\n"
<< ", alpha: " << alpha << ", beta: " << beta << "\n\n";
file
<< "A =\n" << view_A
<< "\nB =\n" << view_B
<< "\nC =\n" << view_C
<< "\n\nReference =\n" << view_Ref
<< "\nComputed =\n" << view_D;
return passed;
}
}
return passed;
}
/// Executes one test
bool run(
int problem_count,
ElementCompute alpha = ElementCompute(1),
ElementCompute beta = ElementCompute(0)) {
this->problem_count = problem_count;
// Initialize the problem
initialize();
int threadblock_count = Gemm::sufficient(problem_sizes_host.data(), problem_count);
// Early exit
if (!threadblock_count) {
if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) {
std::cerr << "Test waived due to insufficient CUDA device resources." << std::endl;
}
return true;
}
// Configure the GEMM arguments
typename EpilogueOutputOp::Params epilogue_op(alpha, beta);
// Configure GEMM arguments
typename Gemm::Arguments args(
problem_sizes_device.get(),
problem_count,
threadblock_count,
epilogue_op,
ptr_A.get(),
ptr_B.get(),
ptr_C.get(),
ptr_D.get(),
lda.get(),
ldb.get(),
ldc.get(),
ldd.get(),
problem_sizes_host.data()
);
// Initialize the GEMM object
Gemm gemm;
size_t workspace_size = gemm.get_workspace_size(args);
cutlass::DeviceAllocation<uint8_t> workspace(workspace_size);
cutlass::Status status = gemm.initialize(args, workspace.get());
if (status != cutlass::Status::kSuccess) {
return false;
}
// Run the GEMM object
status = gemm.run();
if (status != cutlass::Status::kSuccess) {
return false;
}
// Wait for completion
cudaError_t result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess)
<< "Kernel execution error: " << cudaGetErrorString(result);
if (result != cudaSuccess) {
return false;
}
// Verify correctness
return verify(alpha, beta);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // device
} // gemm
} // test
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/gemm/device/testbed_grouped.h/0 | {
"file_path": "test/unit/gemm/device/testbed_grouped.h",
"repo_id": "test",
"token_count": 6616
} | 65 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for threadblock-level GEMM
*/
#include "mma_multistage_testbed.h"
#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED)
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous, tensor_op_16x128x64_16x32x64_16x8x16_3stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(32, 256, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC,
cutlass::arch::OpClassTensorOp, Stages>;
dim3 grid(2, 2);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous, tensor_op_128x16x64_32x16x64_16x8x16_3stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(256, 32, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 16, 64>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC,
cutlass::arch::OpClassTensorOp, Stages>;
dim3 grid(2, 2);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous, tensor_op_32x128x32_32x32x32_16x8x16_3stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 256, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC,
cutlass::arch::OpClassTensorOp, Stages>;
dim3 grid(2, 2);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous, tensor_op_128x32x32_32x32x32_16x8x16_3stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(256, 64, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC,
cutlass::arch::OpClassTensorOp, Stages>;
dim3 grid(2, 2);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
tensor_op_64x64x64_64x64x64_16x8x16_3stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 256);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 1, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
tensor_op_128x64x64_64x32x64_16x8x16_3stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 64, 256);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
tensor_op_64x128x64_32x64x64_16x8x16_3stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 128, 256);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
tensor_op_128x128x64_64x64x64_16x8x16_3stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 128, 256);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
multicta_256x256x384_128x128x64_64x64x64_16x8x16_3stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(256, 256, 384);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
multicta_512x256x384_256x128x64_64x64x64_16x8x16_3stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(512, 256, 384);
using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 8, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
tensor_op_64x64x32_64x64x32_16x8x16_4stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 256);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 1, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
tensor_op_128x64x32_64x32x32_16x8x16_4stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 64, 256);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
tensor_op_64x128x32_32x64x32_16x8x16_4stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 128, 256);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
tensor_op_128x128x32_64x64x32_16x8x16_4stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 128, 384);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
multicta_256x256x384_128x128x32_64x64x32_16x8x16_4stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(256, 256, 384);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
multicta_512x256x768_256x128x32_64x64x32_16x8x16_4stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(512, 256, 768);
using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 8, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
tensor_op_64x64x32_64x64x32_16x8x8_3stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 1, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
tensor_op_128x64x32_64x32x32_16x8x8_3stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 64, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
tensor_op_64x128x32_32x64x32_16x8x8_3stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 128, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
tensor_op_128x128x32_64x64x32_16x8x8_3stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 128, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
multicta_256x256x192_128x128x32_64x64x32_16x8x8_3stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(256, 256, 192);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
multicta_512x256x384_256x128x32_64x64x32_16x8x8_3stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(512, 256, 192);
using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 8, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
tensor_op_64x64x16_64x64x16_16x8x8_4stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 1, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
tensor_op_128x64x16_64x32x16_16x8x8_4stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 64, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
tensor_op_64x128x16_32x64x16_16x8x8_4stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 128, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 16>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
tensor_op_128x128x16_64x64x16_16x8x8_4stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 128, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
multicta_256x256x192_128x128x16_64x64x16_16x8x8_4stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(256, 256, 192);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
multicta_512x256x384_256x128x16_64x64x16_16x8x8_4stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(512, 256, 384);
using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 8, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x64x64_64x64x64_16x8x16_3stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 256);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 1, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x64x64_32x32x64_16x8x16_3stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 256);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 64>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_128x64x64_64x32x64_16x8x16_3stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 64, 256);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x128x64_32x64x64_16x8x16_3stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 128, 256);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_128x128x64_64x64x64_16x8x16_3stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 128, 384);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
multicta_256x256x384_128x128x64_64x64x64_16x8x16_3stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(256, 256, 384);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
multicta_512x256x768_256x128x64_64x64x64_16x8x16_3stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(512, 256, 768);
using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 8, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x64x32_64x64x32_16x8x16_4stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 256);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 1, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x64x32_32x32x32_16x8x16_4stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 256);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_128x64x32_64x32x32_16x8x16_4stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 64, 256);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x128x32_32x64x32_16x8x16_4stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 128, 256);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_128x128x32_64x64x32_16x8x16_4stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 128, 384);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
multicta_256x256x384_128x128x32_64x64x32_16x8x16_4stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(256, 256, 384);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
multicta_512x256x768_256x128x32_64x64x32_16x8x16_4stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(512, 256, 768);
using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 8, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x64x32_64x64x32_16x8x8_3stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 1, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x64x32_32x32x32_16x8x8_3stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_128x64x32_64x32x32_16x8x8_3stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 64, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x128x32_32x64x32_16x8x8_3stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 128, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_128x128x32_64x64x32_16x8x8_3stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 128, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
multicta_256x256x192_128x128x32_64x64x32_16x8x8_3stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(256, 256, 192);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
multicta_512x256x192_256x128x32_64x64x32_16x8x8_3stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(512, 256, 192);
using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 8, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x64x16_64x64x16_16x8x8_4stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 1, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x64x16_32x32x16_16x8x8_4stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_128x64x16_64x32x16_16x8x8_4stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 64, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x128x16_32x64x16_16x8x8_4stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 128, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 16>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_128x128x16_64x64x16_16x8x8_4stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 128, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
multicta_256x256x192_128x128x16_64x64x16_16x8x8_4stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(256, 256, 192);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
multicta_512x256x192_256x128x16_64x64x16_16x8x8_4stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(512, 256, 192);
using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 8, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x64x128_64x64x128_16x8x32_3stage) {
using ElementA = int8_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = int8_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 512);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 128>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 1, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x64x128_32x32x128_16x8x32_3stage) {
using ElementA = int8_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = int8_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 512);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 128>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_128x64x128_64x32x128_16x8x32_3stage) {
using ElementA = int8_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = int8_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 64, 512);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 128>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x128x128_32x64x128_16x8x32_3stage) {
using ElementA = int8_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = int8_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 128, 512);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 128>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_128x128x128_64x64x128_16x8x32_3stage) {
using ElementA = int8_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = int8_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 128, 512);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 128>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
multicta_256x256x768_128x128x128_64x64x128_16x8x32_3stage) {
using ElementA = int8_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = int8_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(256, 256, 768);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 128>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
multicta_512x256x768_256x128x128_64x64x128_16x8x32_3stage) {
using ElementA = int8_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = int8_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(512, 256, 768);
using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 128>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 8, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x64x64_64x64x64_16x8x32_4stage) {
using ElementA = int8_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = int8_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 512);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 1, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x64x64_32x32x64_16x8x32_4stage) {
using ElementA = int8_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = int8_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 512);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 64>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_128x64x64_64x32x64_16x8x32_4stage) {
using ElementA = int8_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = int8_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 64, 512);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x128x64_32x64x64_16x8x32_4stage) {
using ElementA = int8_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = int8_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 128, 512);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_128x128x64_64x64x64_16x8x32_4stage) {
using ElementA = int8_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = int8_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 128, 512);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
multicta_256x256x768_128x128x64_64x64x64_16x8x32_4stage) {
using ElementA = int8_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = int8_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(256, 256, 768);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
multicta_512x256x768_256x128x64_64x64x64_16x8x32_4stage) {
using ElementA = int8_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = int8_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(512, 256, 768);
using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 8, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x64x256_64x64x256_16x8x64_3stage) {
using ElementA = cutlass::int4b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::int4b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 1024);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 256>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 256>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 1, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x64x256_32x32x256_16x8x64_3stage) {
using ElementA = cutlass::int4b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::int4b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 1024);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 256>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 256>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_128x64x256_64x32x256_16x8x64_3stage) {
using ElementA = cutlass::int4b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::int4b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 64, 1024);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 256>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 256>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x256x256_32x64x256_16x8x64_3stage) {
using ElementA = cutlass::int4b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::int4b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 128, 1024);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 256>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 256>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_128x256x256_64x64x256_16x8x64_3stage) {
using ElementA = cutlass::int4b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::int4b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 128, 1024);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 256>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 256>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
multicta_256x256x1536_128x256x256_64x64x256_16x8x64_3stage) {
using ElementA = cutlass::int4b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::int4b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(256, 256, 1536);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 256>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 256>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
multicta_512x256x1536_256x256x256_64x64x256_16x8x64_3stage) {
using ElementA = cutlass::int4b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::int4b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(512, 256, 1536);
using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 256>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 256>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 8, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x64x128_64x64x128_16x8x64_4stage) {
using ElementA = cutlass::int4b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::int4b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 1024);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 128>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 1, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x64x128_32x32x128_16x8x64_4stage) {
using ElementA = cutlass::int4b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::int4b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 1024);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 128>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_128x64x128_64x32x128_16x8x64_4stage) {
using ElementA = cutlass::int4b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::int4b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 64, 1024);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 128>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x256x128_32x64x128_16x8x64_4stage) {
using ElementA = cutlass::int4b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::int4b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 128, 1024);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 128>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_128x256x128_64x64x128_16x8x64_4stage) {
using ElementA = cutlass::int4b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::int4b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 128, 1024);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 128>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
multicta_256x256x1536_128x256x128_64x64x128_16x8x64_4stage) {
using ElementA = cutlass::int4b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::int4b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(256, 256, 1536);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 128>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
multicta_512x256x1536_256x256x128_64x64x128_16x8x64_4stage) {
using ElementA = cutlass::int4b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::int4b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(512, 256, 1536);
using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 128>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 8, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x64x1024_64x64x1024_16x8x256_3stage) {
using ElementA = cutlass::uint1b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::uint1b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 4096);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 1024>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 1024>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 1, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x64x1024_32x32x1024_16x8x256_3stage) {
using ElementA = cutlass::uint1b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::uint1b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 4096);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 1024>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 1024>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_128x64x1024_64x32x1024_16x8x256_3stage) {
using ElementA = cutlass::uint1b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::uint1b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 64, 4096);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 1024>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 1024>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x1024x1024_32x64x1024_16x8x256_3stage) {
using ElementA = cutlass::uint1b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::uint1b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 128, 4096);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 1024>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 1024>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_128x1024x1024_64x64x1024_16x8x256_3stage) {
using ElementA = cutlass::uint1b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::uint1b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 128, 4096);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 1024>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 1024>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
multicta_256x256x6144_128x1024x1024_64x64x1024_16x8x256_3stage) {
using ElementA = cutlass::uint1b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::uint1b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(256, 256, 6144);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 1024>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 1024>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
multicta_512x256x6144_256x1024x1024_64x64x1024_16x8x256_3stage) {
using ElementA = cutlass::uint1b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::uint1b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(512, 256, 6144);
using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 1024>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 1024>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 8, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x64x512_64x64x512_16x8x256_4stage) {
using ElementA = cutlass::uint1b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::uint1b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 4096);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 512>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 512>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 1, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x64x512_32x32x512_16x8x256_4stage) {
using ElementA = cutlass::uint1b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::uint1b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 4096);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 512>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 512>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_128x64x512_64x32x512_16x8x256_4stage) {
using ElementA = cutlass::uint1b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::uint1b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 64, 4096);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 512>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 512>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x128x512_32x64x512_16x8x256_4stage) {
using ElementA = cutlass::uint1b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::uint1b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 128, 4096);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 512>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 512>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_128x128x512_64x64x512_16x8x256_4stage) {
using ElementA = cutlass::uint1b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::uint1b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 128, 4096);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 512>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 512>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
multicta_256x256x6144_128x128x512_64x64x512_16x8x256_4stage) {
using ElementA = cutlass::uint1b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::uint1b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(256, 256, 6144);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 512>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 512>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
multicta_512x256x6144_256x128x512_64x64x512_16x8x256_4stage) {
using ElementA = cutlass::uint1b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::uint1b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(512, 256, 6144);
using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 512>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 512>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 8, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
tensor_op_64x64x16_32x64x16_8x8x4_3stage) {
using ElementA = double;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = double;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = double;
using LayoutC = cutlass::layout::RowMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 16);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 16>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 2, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k())
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
tensor_op_128x128x16_32x64x16_8x8x4_3stage) {
using ElementA = double;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = double;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = double;
using LayoutC = cutlass::layout::RowMajor;
cutlass::gemm::GemmCoord problem_size(128, 128, 64);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 16>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 8, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k())
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_interleaved,
tensor_op_64x128x64_32x64x64_16x8x32_3stage) {
using ElementA = int8_t;
using LayoutA = cutlass::layout::ColumnMajorInterleaved<32>;
using ElementB = int8_t;
using LayoutB = cutlass::layout::RowMajorInterleaved<32>;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 128, 256);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_interleaved,
tensor_op_128x128x64_64x64x64_16x8x32_3stage) {
using ElementA = int8_t;
using LayoutA = cutlass::layout::ColumnMajorInterleaved<32>;
using ElementB = int8_t;
using LayoutB = cutlass::layout::RowMajorInterleaved<32>;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 128, 256);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_interleaved,
multicta_256x256x384_128x128x64_64x64x64_16x8x32_3stage) {
using ElementA = int8_t;
using LayoutA = cutlass::layout::ColumnMajorInterleaved<32>;
using ElementB = int8_t;
using LayoutB = cutlass::layout::RowMajorInterleaved<32>;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(256, 256, 384);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_interleaved,
multicta_512x256x384_256x128x64_64x64x64_16x8x32_3stage) {
using ElementA = int8_t;
using LayoutA = cutlass::layout::ColumnMajorInterleaved<32>;
using ElementB = int8_t;
using LayoutB = cutlass::layout::RowMajorInterleaved<32>;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(512, 256, 384);
using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 8, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_interleaved,
tensor_op_64x128x128_32x64x128_16x8x64_3stage) {
using ElementA = cutlass::int4b_t;
using LayoutA = cutlass::layout::ColumnMajorInterleaved<64>;
using ElementB = cutlass::int4b_t;
using LayoutB = cutlass::layout::RowMajorInterleaved<64>;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 128, 512);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 128>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_interleaved,
tensor_op_128x128x128_64x64x128_16x8x64_3stage) {
using ElementA = cutlass::int4b_t;
using LayoutA = cutlass::layout::ColumnMajorInterleaved<64>;
using ElementB = cutlass::int4b_t;
using LayoutB = cutlass::layout::RowMajorInterleaved<64>;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 128, 512);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 128>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_interleaved,
multicta_256x256x768_128x128x128_64x64x128_16x8x64_3stage) {
using ElementA = cutlass::int4b_t;
using LayoutA = cutlass::layout::ColumnMajorInterleaved<64>;
using ElementB = cutlass::int4b_t;
using LayoutB = cutlass::layout::RowMajorInterleaved<64>;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(256, 256, 768);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 128>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_interleaved,
multicta_512x256x1536_256x128x128_64x64x128_16x8x64_3stage) {
using ElementA = cutlass::int4b_t;
using LayoutA = cutlass::layout::ColumnMajorInterleaved<64>;
using ElementB = cutlass::int4b_t;
using LayoutB = cutlass::layout::RowMajorInterleaved<64>;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(512, 256, 1536);
using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 128>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 8, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise_f64,
tensor_op_32x32x16_16x16x16_8x8x4_4stage) {
using ElementA = double;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = double;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = double;
using LayoutC = cutlass::layout::RowMajor;
cutlass::gemm::GemmCoord problem_size(32, 32, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k())
.run(grid, block);
}
TEST(SM80_gemm_threadblock_crosswise_f64,
tensor_op_64x64x16_32x32x16_8x8x4_4stage) {
using ElementA = double;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = double;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = double;
using LayoutC = cutlass::layout::RowMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k())
.run(grid, block);
}
TEST(SM80_gemm_threadblock_crosswise_f64,
tensor_op_64x128x16_32x64x16_8x8x4_4stage) {
using ElementA = double;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = double;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = double;
using LayoutC = cutlass::layout::RowMajor;
cutlass::gemm::GemmCoord problem_size(64, 128, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 16>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k())
.run(grid, block);
}
TEST(SM80_gemm_threadblock_crosswise_f64,
tensor_op_128x64x16_64x32x16_8x8x4_4stage) {
using ElementA = double;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = double;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = double;
using LayoutC = cutlass::layout::RowMajor;
cutlass::gemm::GemmCoord problem_size(128, 64, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k())
.run(grid, block);
}
TEST(SM80_gemm_threadblock_crosswise_f64,
tensor_op_128x128x16_32x64x16_8x8x4_3stage) {
using ElementA = double;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = double;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = double;
using LayoutC = cutlass::layout::RowMajor;
cutlass::gemm::GemmCoord problem_size(128, 128, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 16>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 8, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k())
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
#endif
| test/unit/gemm/threadblock/mma_multistage.cu/0 | {
"file_path": "test/unit/gemm/threadblock/mma_multistage.cu",
"repo_id": "test",
"token_count": 53626
} | 66 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit testbed for kernel-level GEMM
*/
#pragma once
#include <fstream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/cutlass.h"
#include "cutlass/platform/platform.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/vector.h"
#include "cutlass/numeric_types.h"
#include "cutlass/core_io.h"
#include "cutlass/util/host_tensor_planar_complex.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/reference/host/gemm_planar_complex.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_fill.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Mma>
__global__ void kernel_mma_planar_complex(
cutlass::gemm::GemmCoord problem_size,
typename Mma::IteratorA::Params params_A,
typename Mma::IteratorA::Element *ptr_A,
int64_t imaginary_stride_A,
typename Mma::IteratorB::Params params_B,
typename Mma::IteratorB::Element *ptr_B,
int64_t imaginary_stride_B,
typename Mma::ElementC *ptr_C,
typename Mma::LayoutC::Stride::Index ldc, int64_t imaginary_stride_C) {
// Shared storage needed by threadblock-scoped matrix multiply-accumulate
__shared__ typename Mma::SharedStorage shared_storage;
// Compute threadblock location
cutlass::gemm::GemmCoord tb_tile_offset = {int(blockIdx.x), int(blockIdx.y),
0};
cutlass::MatrixCoord tb_offset_A{tb_tile_offset.m() * Mma::Shape::kM,
tb_tile_offset.k()};
cutlass::MatrixCoord tb_offset_B{tb_tile_offset.k(),
tb_tile_offset.n() * Mma::Shape::kN};
// Compute position within threadblock
int tb_thread_id = threadIdx.y * blockDim.x + threadIdx.x;
// Construct iterators to A operand
typename Mma::IteratorA iterator_A_real(params_A, ptr_A,
{problem_size.m(), problem_size.k()},
tb_thread_id, tb_offset_A);
typename Mma::IteratorA iterator_A_imag(params_A, ptr_A + imaginary_stride_A,
{problem_size.m(), problem_size.k()},
tb_thread_id, tb_offset_A);
// Construct iterators to B operand
typename Mma::IteratorB iterator_B_real(params_B, ptr_B,
{problem_size.k(), problem_size.n()},
tb_thread_id, tb_offset_B);
typename Mma::IteratorB iterator_B_imag(params_B, ptr_B + imaginary_stride_B,
{problem_size.k(), problem_size.n()},
tb_thread_id, tb_offset_B);
int warp_id = threadIdx.y;
int lane_id = threadIdx.x;
// Construct thread-scoped matrix multiply
Mma mma(shared_storage, tb_thread_id, warp_id, threadIdx.x);
typename Mma::FragmentC accum;
accum.clear();
int gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
mma(gemm_k_iterations, accum, iterator_A_real, iterator_A_imag, iterator_B_real, iterator_B_imag, accum);
// Output results
typename Mma::Operator::IteratorC iterator_C({ptr_C, ldc}, lane_id);
iterator_C.add_tile_offset(
{(tb_tile_offset.m() * Mma::WarpCount::kM) +
(warp_id % Mma::WarpCount::kM),
(tb_tile_offset.n() * Mma::WarpCount::kN) +
(warp_id / Mma::WarpCount::kM)});
iterator_C.store(accum.real);
iterator_C.store_with_pointer_offset(accum.imag, imaginary_stride_C);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product
template <
/// Threadblock-level matrix multiply-accumulate
typename Mma_>
struct TestbedPlanarComplex {
using Mma = Mma_;
using ThreadblockShape = typename Mma::Shape;
using IteratorA = typename Mma::IteratorA;
using ElementA = typename Mma::IteratorA::Element;
using LayoutA = typename Mma::IteratorA::Layout;
using IteratorB = typename Mma::IteratorB;
using ElementB = typename Mma::IteratorB::Element;
using LayoutB = typename Mma::IteratorB::Layout;
using ElementC = typename Mma::ElementC;
using ElementAccumulator = typename Mma::ElementC;
using LayoutC = typename Mma::LayoutC;
using ThreadMapA = typename Mma::IteratorA::ThreadMap;
using ThreadMapB = typename Mma::IteratorB::ThreadMap;
using AccessTypeA = cutlass::Array<ElementA, ThreadMapA::kElementsPerAccess>;
using AccessTypeB = cutlass::Array<ElementB, ThreadMapB::kElementsPerAccess>;
static int const Stages = Mma::kStages;
static cutlass::arch::CacheOperation::Kind const CacheOpA =
Mma::kCacheOpA;
static cutlass::arch::CacheOperation::Kind const CacheOpB =
Mma::kCacheOpB;
//
// Data members
//
cutlass::HostTensorPlanarComplex<ElementA, LayoutA> matrix_A;
cutlass::HostTensorPlanarComplex<ElementB, LayoutB> matrix_B;
cutlass::HostTensorPlanarComplex<ElementC, LayoutC> matrix_C_computed;
cutlass::HostTensorPlanarComplex<ElementC, LayoutC> matrix_C_reference;
cutlass::gemm::GemmCoord problem_size;
//
// Methods
//
/// Allocates workspace in device memory
TestbedPlanarComplex(int m, int n, int k)
: problem_size(m, n, k) {
matrix_A.reset(cutlass::make_Coord(m, k));
matrix_B.reset(cutlass::make_Coord(k, n));
matrix_C_computed.reset(cutlass::make_Coord(m, n));
matrix_C_reference.reset(cutlass::make_Coord(m, n), false);
}
/// Runs the test
bool run(
dim3 grid, dim3 block,
cutlass::Distribution::Kind init_A = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B = cutlass::Distribution::Uniform) {
//
// initialize device memory
//
if (init_A == cutlass::Distribution::Uniform) {
int scope_max = 8;
int scope_min = -8;
if (cutlass::sizeof_bits<ElementA>::value == 4) {
scope_max = 2;
scope_min = -2;
} else if (cutlass::sizeof_bits<ElementA>::value == 1) {
scope_max = 2;
scope_min = 0;
}
uint64_t seed = 7;
cutlass::reference::host::TensorFillRandomUniform(
matrix_A.host_view(), seed, scope_max, scope_min, 0);
} else if (init_A == cutlass::Distribution::Sequential) {
for (int i = 0; i < matrix_A.capacity() * 2; ++i) {
matrix_A.host_data()[i] = cutlass::half_t(float(i % 5) - 2);
}
/*
cutlass::reference::host::BlockFillSequential(matrix_A.host_data(),
matrix_A.capacity() * 2);
*/
} else if (init_A == cutlass::Distribution::Identity) {
//cutlass::reference::host::TensorFillIdentity(matrix_A.host_view());
} else {
return false;
}
if (init_B == cutlass::Distribution::Uniform) {
int scope_max = 8;
int scope_min = -8;
if (cutlass::sizeof_bits<ElementB>::value == 4) {
scope_max = 2;
scope_min = -2;
} else if (cutlass::sizeof_bits<ElementB>::value == 1) {
scope_max = 2;
scope_min = 0;
}
uint64_t seed = 7;
cutlass::reference::host::TensorFillRandomUniform(
matrix_B.host_view(), seed + 16, scope_max, scope_min, 0);
} else if (init_B == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(matrix_B.host_data(),
matrix_B.capacity() * 2);
for (int i = 0; i < matrix_B.capacity() * 2; ++i) {
matrix_B.host_data()[i] = cutlass::half_t(float((i + 3) % 5) - 2);
}
} else if (init_B == cutlass::Distribution::Identity) {
//cutlass::reference::host::TensorFillIdentity(matrix_B.host_view());
} else {
return false;
}
matrix_A.sync_device();
matrix_B.sync_device();
matrix_C_computed.sync_device();
typename IteratorA::Params params_A(matrix_A.layout());
typename IteratorB::Params params_B(matrix_B.layout());
test::gemm::threadblock::kernel_mma_planar_complex<Mma><<<grid, block>>>(
problem_size,
params_A,
matrix_A.device_data(),
matrix_A.imaginary_stride(),
params_B,
matrix_B.device_data(),
matrix_B.imaginary_stride(),
matrix_C_computed.device_data(),
matrix_C_computed.layout().stride(0),
matrix_C_computed.imaginary_stride()
);
//
// Check error code
//
cudaError_t result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess)
<< " kernel error: " << cudaGetErrorString(result);
matrix_C_computed.sync_host();
cutlass::reference::host::GemmPlanarComplex<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementAccumulator
>(
problem_size,
cutlass::complex<ElementAccumulator>(ElementAccumulator(1)),
matrix_A.host_ref(),
Mma::kTransformA,
matrix_B.host_ref(),
Mma::kTransformB,
cutlass::complex<ElementAccumulator>(ElementAccumulator(0)),
matrix_C_reference.host_ref(),
matrix_C_reference.host_ref()
);
bool passed = cutlass::reference::host::TensorEquals(
matrix_C_computed.host_view(),
matrix_C_reference.host_view()
);
EXPECT_TRUE(passed);
if (!passed) {
std::ofstream output("mma_pipelined_testbed_errors.txt");
output
<< "A:\n" << matrix_A.host_view() << "\n"
<< "B:\n" << matrix_B.host_view() << "\n"
<< "Reference:\n"
<< matrix_C_reference.host_view() << "\n"
<< "Computed:\n"
<< matrix_C_computed.host_view() << "\n";
}
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace test
| test/unit/gemm/threadblock/mma_planar_complex_testbed.h/0 | {
"file_path": "test/unit/gemm/threadblock/mma_planar_complex_testbed.h",
"repo_id": "test",
"token_count": 4837
} | 67 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for thread-level GEMM
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/numeric_types.h"
#include "cutlass/subbyte_reference.h"
#include "cutlass/platform/platform.h"
#include "cutlass/arch/arch.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/util/reference/host/gemm_complex.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/host_reorder.h"
#include "cutlass/util/host_uncompress.h"
namespace test {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test kernel
template <typename Mma, typename ThreadblockShape>
__global__ void kernel(
typename Mma::ElementC *output_C,
typename Mma::ElementA const *input_A,
typename Mma::ElementB const *input_B,
typename Mma::ElementC const *input_C,
int iterations = 1) {
// Use AlignedBuffer to store trivially copyable objects in unions and __shared__ buffers.
__shared__ cutlass::AlignedBuffer<
typename Mma::ElementA, ThreadblockShape::kM * ThreadblockShape::kK> smem_buffer_A;
__shared__ cutlass::AlignedBuffer<
typename Mma::ElementB, ThreadblockShape::kN * ThreadblockShape::kK> smem_buffer_B;
if (threadIdx.x == 0) {
typename Mma::ElementA *smem_ptr_A = smem_buffer_A.data();
#pragma unroll 1
for (size_t i = 0; i < smem_buffer_A.size(); ++i) {
cutlass::ReferenceFactory<typename Mma::ElementA>::get(smem_ptr_A, i) =
cutlass::ReferenceFactory<typename cutlass::platform::remove_const<
typename Mma::ElementA>::type>::get(input_A, i);
}
typename Mma::ElementB *smem_ptr_B = smem_buffer_B.data();
#pragma unroll 1
for (size_t i = 0; i < smem_buffer_B.size(); ++i) {
cutlass::ReferenceFactory<typename Mma::ElementB>::get(smem_ptr_B, i) =
cutlass::ReferenceFactory<typename cutlass::platform::remove_const<
typename Mma::ElementB>::type>::get(input_B, i);
}
}
__syncthreads();
//
// Construct warp-level matrix product
//
using FragmentA = typename Mma::FragmentA;
using FragmentB = typename Mma::FragmentB;
using FragmentC = typename Mma::FragmentC;
typename Mma::LayoutA layout_A = Mma::LayoutA::packed({ThreadblockShape::kM, ThreadblockShape::kK});
typename Mma::LayoutB layout_B = Mma::LayoutB::packed({ThreadblockShape::kK, ThreadblockShape::kN});
typename Mma::LayoutC layout_C = Mma::LayoutC::packed({Mma::Shape::kM, Mma::Shape::kN});
typename Mma::IteratorA iter_A({smem_buffer_A.data(), layout_A}, cutlass::arch::LaneId());
typename Mma::IteratorB iter_B({smem_buffer_B.data(), layout_B}, cutlass::arch::LaneId());
FragmentA frag_A;
FragmentB frag_B;
FragmentC accum;
Mma mma;
accum.clear();
CUTLASS_PRAGMA_NO_UNROLL
for (int iter = 0; iter < iterations; ++iter) { // place in loop that is not unrolled
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < ThreadblockShape::kK;
k += Mma::Policy::MmaShape::kK) {
iter_A.load(frag_A);
iter_B.load(frag_B);
++iter_A;
++iter_B;
mma(accum, frag_A, frag_B, accum);
}
}
typename Mma::IteratorC iter_C({output_C, layout_C}, cutlass::arch::LaneId());
iter_C.store(accum);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product
template <
/// Warp-level matrix multiply-accumulate
typename Mma_,
/// Size of threadblock-scoped shape used to store SMEM
typename ThreadblockShape_,
/// The inner product operation performed by GEMM
typename Operator_ = cutlass::arch::OpMultiplyAdd
>
struct Testbed {
/// Thread-level matrix multiply-accumulate operator
using Mma = Mma_;
using ThreadblockShape = ThreadblockShape_;
using Operator = Operator_;
using Shape = typename Mma::Shape;
using ElementA = typename Mma::ElementA;
using LayoutA = typename Mma::LayoutA;
using ElementB = typename Mma::ElementB;
using LayoutB = typename Mma::LayoutB;
using ElementC = typename Mma::ElementC;
using LayoutC = typename Mma::LayoutC;
//
// Data members
//
cutlass::HostTensor<ElementA, LayoutA> tensor_A;
cutlass::HostTensor<ElementB, LayoutB> tensor_B;
cutlass::HostTensor<ElementC, LayoutC> tensor_C;
cutlass::HostTensor<ElementC, LayoutC> tensor_D_computed;
cutlass::HostTensor<ElementC, LayoutC> tensor_D_reference;
//
// Methods
//
/// Allocates workspace in device memory
Testbed() {
tensor_A.reset(cutlass::make_Coord(ThreadblockShape::kM, ThreadblockShape::kK));
tensor_B.reset(cutlass::make_Coord(ThreadblockShape::kK, ThreadblockShape::kN));
tensor_C.reset(cutlass::make_Coord(Shape::kM, Shape::kN));
tensor_D_computed.reset(cutlass::make_Coord(Shape::kM, Shape::kN));
tensor_D_reference.reset(cutlass::make_Coord(Shape::kM, Shape::kN), false);
}
/// Returns true if the CUDA device is sufficient to execute the kernel.
bool sufficient() const {
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.major == 9) {
// NVIDIA Hopper drops support for several data types
if (
cutlass::sizeof_bits<ElementA>::value < 8 ||
cutlass::sizeof_bits<ElementB>::value < 8 ||
cutlass::sizeof_bits<ElementC>::value < 8) {
return false;
}
}
return true;
}
/// Runs the test
bool run(
cutlass::Distribution::Kind init_A = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B = cutlass::Distribution::Uniform) {
if (!sufficient()) {
return true;
}
//
// initialize device memory
//
if (init_A == cutlass::Distribution::Uniform) {
int scope_max = 8;
int scope_min = -8;
if (cutlass::sizeof_bits<ElementA>::value == 4) {
scope_max = 2;
scope_min = -2;
} else if (cutlass::sizeof_bits<ElementA>::value == 1) {
scope_max = 2;
scope_min = 0;
}
uint64_t seed = 7;
cutlass::reference::host::BlockFillRandomUniform(tensor_A.host_data(),
tensor_A.capacity(), seed, scope_max, scope_min, 0);
} else if (init_A == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(tensor_A.host_data(),
tensor_A.capacity());
} else if (init_A == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(tensor_A.host_view());
} else {
return false;
}
if (init_B == cutlass::Distribution::Uniform) {
int scope_max = 8;
int scope_min = -8;
if (cutlass::sizeof_bits<ElementB>::value == 4) {
scope_max = 2;
scope_min = -2;
} else if (cutlass::sizeof_bits<ElementB>::value == 1) {
scope_max = 2;
scope_min = 0;
}
uint64_t seed = 7;
cutlass::reference::host::BlockFillRandomUniform(tensor_B.host_data(),
tensor_B.capacity(), seed, scope_max, scope_min, 0);
} else if (init_B == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(tensor_B.host_data(),
tensor_B.capacity());
} else if (init_B == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(tensor_B.host_view());
} else {
return false;
}
cutlass::reference::host::TensorFill(
tensor_C.host_view(),
ElementC(0)
);
cutlass::reference::host::TensorFill(
tensor_D_computed.host_view(),
ElementC(0)
);
cutlass::reference::host::TensorFill(
tensor_D_reference.host_view(),
ElementC(0)
);
tensor_A.sync_device();
tensor_B.sync_device();
tensor_C.sync_device();
tensor_D_computed.sync_device();
// launch kernel
kernel<Mma, ThreadblockShape><<< dim3(1, 1), dim3(32, 1, 1) >>>(
tensor_D_computed.device_data(),
tensor_A.device_data(),
tensor_B.device_data(),
tensor_C.device_data());
// verify no errors
cudaError_t result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess) << "CUDA ERROR: " << cudaGetErrorString(result);
if (result != cudaSuccess) {
return false;
}
tensor_D_computed.sync_host();
//
// Reference implementation
//
cutlass::reference::host::Gemm<ElementA, LayoutA, ElementB, LayoutB,
ElementC, LayoutC, ElementC, ElementC,
Operator>
reference_gemm;
reference_gemm(
{Shape::kM, Shape::kN, ThreadblockShape::kK},
ElementC(1),
tensor_A.host_ref(),
tensor_B.host_ref(),
ElementC(0),
tensor_D_reference.host_ref()
);
//
// Verify equivalence
//
// compare
bool passed = cutlass::reference::host::TensorEquals(
tensor_D_computed.host_view(),
tensor_D_reference.host_view()
);
EXPECT_TRUE(passed);
if (!passed) {
cutlass::TensorView<ElementA, cutlass::layout::ColumnMajor> tensor_A_physical(
tensor_A.host_data(),
tensor_A.stride()[0],
tensor_A.extent());
cutlass::TensorView<ElementB, cutlass::layout::RowMajor> tensor_B_physical(
tensor_B.host_data(),
tensor_B.stride()[0],
tensor_B.extent());
std::cout <<"cutlass::sizeof_bits<ElementA>::value = "<<cutlass::sizeof_bits<ElementA>::value<<"\n";
std::cout
<< "A:\n" << tensor_A.host_view() << "\n\n"
<< "A(physical - stride: " << tensor_A.stride()[0]
<< ", extent: " << tensor_A.extent() << "):\n" << tensor_A_physical << "\n\n";
std::cout <<"cutlass::sizeof_bits<ElementB>::value = "<<cutlass::sizeof_bits<ElementB>::value<<"\n";
std::cout
<< "B:\n" << tensor_B.host_view() << "\n\n"
<< "B(physical - stride: " << tensor_B.stride()[0]
<< ", extent: " << tensor_B.extent() << "):\n" << tensor_B_physical << "\n\n";
std::cout
<< "C:\n" << tensor_C.host_view() << "\n\n"
<< "Reference:\n" << tensor_D_reference.host_view() << "\n\n"
<< "Computed:\n" << tensor_D_computed.host_view() << std::endl;
}
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product
template <
/// Warp-level matrix multiply-accumulate
typename Mma_,
/// Size of threadblock-scoped shape used to store SMEM
typename ThreadblockShape_
>
struct TestbedComplex {
/// Thread-level matrix multiply-accumulate operator
using Mma = Mma_;
using ThreadblockShape = ThreadblockShape_;
using Shape = typename Mma::Shape;
using ElementA = typename Mma::ElementA;
using LayoutA = typename Mma::LayoutA;
using ElementB = typename Mma::ElementB;
using LayoutB = typename Mma::LayoutB;
using ElementC = typename Mma::ElementC;
using LayoutC = typename Mma::LayoutC;
//
// Data members
//
cutlass::HostTensor<ElementA, LayoutA> tensor_A;
cutlass::HostTensor<ElementB, LayoutB> tensor_B;
cutlass::HostTensor<ElementC, LayoutC> tensor_C;
cutlass::HostTensor<ElementC, LayoutC> tensor_D_computed;
cutlass::HostTensor<ElementC, LayoutC> tensor_D_reference;
//
// Methods
//
/// Allocates workspace in device memory
TestbedComplex() {
tensor_A.reset(cutlass::make_Coord(ThreadblockShape::kM, ThreadblockShape::kK));
tensor_B.reset(cutlass::make_Coord(ThreadblockShape::kK, ThreadblockShape::kN));
tensor_C.reset(cutlass::make_Coord(Shape::kM, Shape::kN));
tensor_D_computed.reset(cutlass::make_Coord(Shape::kM, Shape::kN));
tensor_D_reference.reset(cutlass::make_Coord(Shape::kM, Shape::kN), false);
}
/// Returns true if the CUDA device is sufficient to execute the kernel.
bool sufficient() const {
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.major == 9) {
// NVIDIA Hopper drops support for several data types
if (
cutlass::sizeof_bits<ElementA>::value < 8 ||
cutlass::sizeof_bits<ElementB>::value < 8 ||
cutlass::sizeof_bits<ElementC>::value < 8) {
return false;
}
}
return true;
}
/// Runs the test
bool run(
cutlass::Distribution::Kind init_A = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B = cutlass::Distribution::Uniform) {
if (!sufficient()) {
return true;
}
//
// initialize device memory
//
if (init_A == cutlass::Distribution::Uniform) {
uint64_t seed = 7;
cutlass::reference::host::TensorFillRandomUniform(tensor_A.host_view(),
seed, 8, -8, 0);
} else if (init_A == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(tensor_A.host_data(),
tensor_A.capacity());
} else if (init_A == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(tensor_A.host_view());
} else {
return false;
}
if (init_B == cutlass::Distribution::Uniform) {
uint64_t seed = 7;
cutlass::reference::host::TensorFillRandomUniform(tensor_B.host_view(),
seed + 16, 8, -8, 0);
} else if (init_B == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(tensor_B.host_data(),
tensor_B.capacity());
} else if (init_B == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(tensor_B.host_view());
} else {
return false;
}
cutlass::reference::host::TensorFill(
tensor_C.host_view(),
ElementC(0)
);
cutlass::reference::host::TensorFill(
tensor_D_computed.host_view(),
ElementC(0)
);
cutlass::reference::host::TensorFill(
tensor_D_reference.host_view(),
ElementC(0)
);
tensor_A.sync_device();
tensor_B.sync_device();
tensor_C.sync_device();
tensor_D_computed.sync_device();
// launch kernel
kernel<Mma, ThreadblockShape><<< dim3(1, 1), dim3(32, 1, 1) >>>(
tensor_D_computed.device_data(),
tensor_A.device_data(),
tensor_B.device_data(),
tensor_C.device_data());
// verify no errors
cudaError_t result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess) << "CUDA ERROR: " << cudaGetErrorString(result);
if (result != cudaSuccess) {
return false;
}
tensor_D_computed.sync_host();
//
// Reference implementation
//
cutlass::reference::host::GemmComplex(
{Shape::kM, Shape::kN, ThreadblockShape::kK},
ElementC(1),
tensor_A.host_ref(),
Mma::kTransformA,
tensor_B.host_ref(),
Mma::kTransformB,
ElementC(0),
tensor_C.host_ref(),
tensor_D_reference.host_ref()
);
//
// Verify equivalence
//
// compare
bool passed = cutlass::reference::host::TensorEquals(
tensor_D_computed.host_view(),
tensor_D_reference.host_view()
);
EXPECT_TRUE(passed);
if (!passed) {
cutlass::TensorView<ElementA, cutlass::layout::ColumnMajor> tensor_A_physical(
tensor_A.host_data(),
tensor_A.stride()[0],
tensor_A.extent());
cutlass::TensorView<ElementB, cutlass::layout::RowMajor> tensor_B_physical(
tensor_B.host_data(),
tensor_B.stride()[0],
tensor_B.extent());
std::cout <<"cutlass::sizeof_bits<ElementA>::value = "<<cutlass::sizeof_bits<ElementA>::value<<"\n";
std::cout
<< "A:\n" << tensor_A.host_view() << "\n\n"
<< "A(physical - stride: " << tensor_A.stride()[0] << ", extent: " << tensor_A.extent() << "):\n" << tensor_A_physical << "\n\n";
std::cout <<"cutlass::sizeof_bits<ElementB>::value = "<<cutlass::sizeof_bits<ElementB>::value<<"\n";
std::cout
<< "B:\n" << tensor_B.host_view() << "\n\n"
<< "B(physical - stride: " << tensor_B.stride()[0] << ", extent: " << tensor_B.extent() <<"):\n" << tensor_B_physical << "\n\n";
std::cout
<< "C:\n" << tensor_C.host_view() << "\n\n"
<< "Reference:\n" << tensor_D_reference.host_view() << "\n\n"
<< "Computed:\n" << tensor_D_computed.host_view() << std::endl;
}
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test kernel
template <typename Mma, typename ThreadblockShape>
__global__ void kernel_transform(
typename Mma::ElementC *output_C,
typename Mma::ElementA const *input_A,
typename Mma::ElementB const *input_B,
typename Mma::ElementC const *input_C,
int iterations = 1) {
// Use AlignedBuffer to store trivially copyable objects in unions and __shared__ buffers.
__shared__ cutlass::AlignedBuffer<
typename Mma::ElementA, ThreadblockShape::kM * ThreadblockShape::kK> smem_buffer_A;
__shared__ cutlass::AlignedBuffer<
typename Mma::ElementB, ThreadblockShape::kN * ThreadblockShape::kK> smem_buffer_B;
if (threadIdx.x == 0) {
typename Mma::ElementA *smem_ptr_A = smem_buffer_A.data();
#pragma unroll 1
for (size_t i = 0; i < smem_buffer_A.size(); ++i) {
cutlass::ReferenceFactory<typename Mma::ElementA>::get(smem_ptr_A, i) =
cutlass::ReferenceFactory<typename cutlass::platform::remove_const<
typename Mma::ElementA>::type>::get(input_A, i);
}
typename Mma::ElementB *smem_ptr_B = smem_buffer_B.data();
#pragma unroll 1
for (size_t i = 0; i < smem_buffer_B.size(); ++i) {
cutlass::ReferenceFactory<typename Mma::ElementB>::get(smem_ptr_B, i) =
cutlass::ReferenceFactory<typename cutlass::platform::remove_const<
typename Mma::ElementB>::type>::get(input_B, i);
}
}
__syncthreads();
//
// Construct warp-level matrix product
//
using FragmentA = typename Mma::FragmentA;
using FragmentB = typename Mma::FragmentB;
using FragmentC = typename Mma::FragmentC;
using TransformedFragmentA = typename Mma::TransformedFragmentA;
using TransformedFragmentB = typename Mma::TransformedFragmentB;
typename Mma::LayoutA layout_A = Mma::LayoutA::packed({ThreadblockShape::kM, ThreadblockShape::kK});
typename Mma::LayoutB layout_B = Mma::LayoutB::packed({ThreadblockShape::kK, ThreadblockShape::kN});
typename Mma::LayoutC layout_C = Mma::LayoutC::packed({Mma::Shape::kM, Mma::Shape::kN});
typename Mma::IteratorA iter_A({smem_buffer_A.data(), layout_A}, cutlass::arch::LaneId());
typename Mma::IteratorB iter_B({smem_buffer_B.data(), layout_B}, cutlass::arch::LaneId());
FragmentA loaded_frag_A;
FragmentB loaded_frag_B;
TransformedFragmentA transformed_frag_A;
TransformedFragmentB transformed_frag_B;
FragmentC accum;
Mma mma;
accum.clear();
CUTLASS_PRAGMA_NO_UNROLL
for (int iter = 0; iter < iterations; ++iter) { // place in loop that is not unrolled
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < ThreadblockShape::kK;
k += Mma::Policy::MmaShape::kK) {
iter_A.load(loaded_frag_A);
iter_B.load(loaded_frag_B);
++iter_A;
++iter_B;
mma.transform(transformed_frag_A, transformed_frag_B, loaded_frag_A,
loaded_frag_B);
mma(accum, transformed_frag_A, transformed_frag_B, accum);
}
}
typename Mma::IteratorC iter_C({output_C, layout_C}, cutlass::arch::LaneId());
iter_C.store(accum);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product
template <
/// Warp-level matrix multiply-accumulate
typename Mma_,
/// Size of threadblock-scoped shape used to store SMEM
typename ThreadblockShape_,
/// The innter product operation performed by GEMM
typename Operator_ = cutlass::arch::OpMultiplyAdd
>
struct TransformTestbed {
/// Thread-level matrix multiply-accumulate operator
using Mma = Mma_;
using ThreadblockShape = ThreadblockShape_;
using Operator = Operator_;
using Shape = typename Mma::Shape;
using ElementA = typename Mma::ElementA;
using LayoutA = typename Mma::LayoutA;
using ElementB = typename Mma::ElementB;
using LayoutB = typename Mma::LayoutB;
using ElementC = typename Mma::ElementC;
using LayoutC = typename Mma::LayoutC;
//
// Data members
//
cutlass::HostTensor<ElementA, LayoutA> tensor_A;
cutlass::HostTensor<ElementB, LayoutB> tensor_B;
cutlass::HostTensor<ElementC, LayoutC> tensor_C;
cutlass::HostTensor<ElementC, LayoutC> tensor_D_computed;
cutlass::HostTensor<ElementC, LayoutC> tensor_D_reference;
//
// Methods
//
/// Allocates workspace in device memory
TransformTestbed() {
tensor_A.reset(cutlass::make_Coord(ThreadblockShape::kM, ThreadblockShape::kK));
tensor_B.reset(cutlass::make_Coord(ThreadblockShape::kK, ThreadblockShape::kN));
tensor_C.reset(cutlass::make_Coord(Shape::kM, Shape::kN));
tensor_D_computed.reset(cutlass::make_Coord(Shape::kM, Shape::kN));
tensor_D_reference.reset(cutlass::make_Coord(Shape::kM, Shape::kN), false);
}
/// Returns true if the CUDA device is sufficient to execute the kernel.
bool sufficient() const {
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.major == 9) {
// NVIDIA Hopper drops support for several data types
if (
cutlass::sizeof_bits<ElementA>::value < 8 ||
cutlass::sizeof_bits<ElementB>::value < 8 ||
cutlass::sizeof_bits<ElementC>::value < 8) {
return false;
}
}
return true;
}
/// Runs the test
bool run(
cutlass::Distribution::Kind init_A = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B = cutlass::Distribution::Uniform) {
if (!sufficient()) {
return true;
}
//
// initialize device memory
//
if (init_A == cutlass::Distribution::Uniform) {
int scope_max = 8;
int scope_min = -8;
if (cutlass::sizeof_bits<ElementA>::value == 4) {
scope_max = 2;
scope_min = -2;
} else if (cutlass::sizeof_bits<ElementA>::value == 1) {
scope_max = 2;
scope_min = 0;
}
uint64_t seed = 7;
cutlass::reference::host::TensorFillRandomUniform(
tensor_A.host_view(), seed, scope_max, scope_min, 0);
} else if (init_A == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(tensor_A.host_data(),
tensor_A.capacity());
} else if (init_A == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(tensor_A.host_view());
} else {
return false;
}
if (init_B == cutlass::Distribution::Uniform) {
int scope_max = 8;
int scope_min = -8;
if (cutlass::sizeof_bits<ElementB>::value == 4) {
scope_max = 2;
scope_min = -2;
} else if (cutlass::sizeof_bits<ElementB>::value == 1) {
scope_max = 2;
scope_min = 0;
}
uint64_t seed = 7;
cutlass::reference::host::TensorFillRandomUniform(
tensor_B.host_view(), seed + 16, scope_max, scope_min, 0);
} else if (init_B == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(tensor_B.host_data(),
tensor_B.capacity());
} else if (init_B == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(tensor_B.host_view());
} else {
return false;
}
cutlass::reference::host::TensorFill(
tensor_C.host_view(),
ElementC(0)
);
cutlass::reference::host::TensorFill(
tensor_D_computed.host_view(),
ElementC(0)
);
cutlass::reference::host::TensorFill(
tensor_D_reference.host_view(),
ElementC(0)
);
tensor_A.sync_device();
tensor_B.sync_device();
tensor_C.sync_device();
tensor_D_computed.sync_device();
// launch kernel
kernel_transform<Mma, ThreadblockShape><<<dim3(1, 1), dim3(32, 1, 1)>>>(
tensor_D_computed.device_data(), tensor_A.device_data(),
tensor_B.device_data(), tensor_C.device_data());
// verify no errors
cudaError_t result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess) << "CUDA ERROR: " << cudaGetErrorString(result);
if (result != cudaSuccess) {
return false;
}
tensor_D_computed.sync_host();
//
// Reference implementation
//
cutlass::reference::host::Gemm<ElementA, LayoutA, ElementB, LayoutB,
ElementC, LayoutC, ElementC, ElementC,
Operator>
reference_gemm;
reference_gemm(
{Shape::kM, Shape::kN, ThreadblockShape::kK},
ElementC(1),
tensor_A.host_ref(),
tensor_B.host_ref(),
ElementC(0),
tensor_D_reference.host_ref()
);
//
// Verify equivalence
//
// compare
bool passed = cutlass::reference::host::TensorEquals(
tensor_D_computed.host_view(),
tensor_D_reference.host_view()
);
EXPECT_TRUE(passed);
if (!passed) {
cutlass::TensorView<ElementA, cutlass::layout::ColumnMajor> tensor_A_physical(
tensor_A.host_data(),
tensor_A.stride()[0],
tensor_A.extent());
cutlass::TensorView<ElementB, cutlass::layout::RowMajor> tensor_B_physical(
tensor_B.host_data(),
tensor_B.stride()[0],
tensor_B.extent());
std::cout <<"cutlass::sizeof_bits<ElementA>::value = "<<cutlass::sizeof_bits<ElementA>::value<<"\n";
std::cout
<< "A:\n" << tensor_A.host_view() << "\n\n"
<< "A(physical - stride: " << tensor_A.stride()[0] << ", extent: " << tensor_A.extent() << "):\n" << tensor_A_physical << "\n\n";
std::cout <<"cutlass::sizeof_bits<ElementB>::value = "<<cutlass::sizeof_bits<ElementB>::value<<"\n";
std::cout
<< "B:\n" << tensor_B.host_view() << "\n\n"
<< "B(physical - stride: " << tensor_B.stride()[0] << ", extent: " << tensor_B.extent() << "):\n" << tensor_B_physical << "\n\n";
std::cout
<< "C:\n" << tensor_C.host_view() << "\n\n"
<< "Reference:\n" << tensor_D_reference.host_view() << "\n\n"
<< "Computed:\n" << tensor_D_computed.host_view() << std::endl;
}
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product
template <
/// Warp-level matrix multiply-accumulate
typename Mma_,
/// Size of threadblock-scoped shape used to store SMEM
typename ThreadblockShape_
>
struct TransformedTestbedComplex {
/// Thread-level matrix multiply-accumulate operator
using Mma = Mma_;
using ThreadblockShape = ThreadblockShape_;
using Shape = typename Mma::Shape;
using ElementA = typename Mma::ElementA;
using LayoutA = typename Mma::LayoutA;
using ElementB = typename Mma::ElementB;
using LayoutB = typename Mma::LayoutB;
using ElementC = typename Mma::ElementC;
using LayoutC = typename Mma::LayoutC;
//
// Data members
//
cutlass::HostTensor<ElementA, LayoutA> tensor_A;
cutlass::HostTensor<ElementB, LayoutB> tensor_B;
cutlass::HostTensor<ElementC, LayoutC> tensor_C;
cutlass::HostTensor<ElementC, LayoutC> tensor_D_computed;
cutlass::HostTensor<ElementC, LayoutC> tensor_D_reference;
//
// Methods
//
/// Allocates workspace in device memory
TransformedTestbedComplex() {
tensor_A.reset(cutlass::make_Coord(ThreadblockShape::kM, ThreadblockShape::kK));
tensor_B.reset(cutlass::make_Coord(ThreadblockShape::kK, ThreadblockShape::kN));
tensor_C.reset(cutlass::make_Coord(Shape::kM, Shape::kN));
tensor_D_computed.reset(cutlass::make_Coord(Shape::kM, Shape::kN));
tensor_D_reference.reset(cutlass::make_Coord(Shape::kM, Shape::kN), false);
}
/// Returns true if the CUDA device is sufficient to execute the kernel.
bool sufficient() const {
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.major == 9) {
// NVIDIA Hopper drops support for several data types
if (
cutlass::sizeof_bits<ElementA>::value < 8 ||
cutlass::sizeof_bits<ElementB>::value < 8 ||
cutlass::sizeof_bits<ElementC>::value < 8) {
return false;
}
}
return true;
}
/// Runs the test
bool run(
cutlass::Distribution::Kind init_A = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B = cutlass::Distribution::Uniform) {
if (!sufficient()) {
return true;
}
//
// initialize device memory
//
if (init_A == cutlass::Distribution::Uniform) {
uint64_t seed = 7;
cutlass::reference::host::TensorFillRandomUniform(tensor_A.host_view(),
seed, 8, -8, 0);
} else if (init_A == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(tensor_A.host_data(),
tensor_A.capacity());
} else if (init_A == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(tensor_A.host_view());
} else {
return false;
}
if (init_B == cutlass::Distribution::Uniform) {
uint64_t seed = 7;
cutlass::reference::host::TensorFillRandomUniform(tensor_B.host_view(),
seed + 16, 8, -8, 0);
} else if (init_B == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(tensor_B.host_data(),
tensor_B.capacity());
} else if (init_B == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(tensor_B.host_view());
} else {
return false;
}
cutlass::reference::host::TensorFill(
tensor_C.host_view(),
ElementC(0)
);
cutlass::reference::host::TensorFill(
tensor_D_computed.host_view(),
ElementC(0)
);
cutlass::reference::host::TensorFill(
tensor_D_reference.host_view(),
ElementC(0)
);
tensor_A.sync_device();
tensor_B.sync_device();
tensor_C.sync_device();
tensor_D_computed.sync_device();
// launch kernel
kernel_transform<Mma, ThreadblockShape><<< dim3(1, 1), dim3(32, 1, 1) >>>(
tensor_D_computed.device_data(),
tensor_A.device_data(),
tensor_B.device_data(),
tensor_C.device_data());
// verify no errors
cudaError_t result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess) << "CUDA ERROR: " << cudaGetErrorString(result);
if (result != cudaSuccess) {
return false;
}
tensor_D_computed.sync_host();
//
// Reference implementation
//
cutlass::reference::host::GemmComplex(
{Shape::kM, Shape::kN, ThreadblockShape::kK},
ElementC(1),
tensor_A.host_ref(),
Mma::kTransformA,
tensor_B.host_ref(),
Mma::kTransformB,
ElementC(0),
tensor_C.host_ref(),
tensor_D_reference.host_ref()
);
//
// Verify equivalence
//
// compare
bool passed = cutlass::reference::host::TensorEquals(
tensor_D_computed.host_view(),
tensor_D_reference.host_view()
);
EXPECT_TRUE(passed);
if (!passed) {
cutlass::TensorView<ElementA, cutlass::layout::ColumnMajor> tensor_A_physical(
tensor_A.host_data(),
tensor_A.stride()[0],
tensor_A.extent());
cutlass::TensorView<ElementB, cutlass::layout::RowMajor> tensor_B_physical(
tensor_B.host_data(),
tensor_B.stride()[0],
tensor_B.extent());
std::cout <<"cutlass::sizeof_bits<ElementA>::value = "<<cutlass::sizeof_bits<ElementA>::value<<"\n";
std::cout
<< "A:\n" << tensor_A.host_view() << "\n\n"
<< "A(physical - stride: " << tensor_A.stride()[0] << ", extent: " << tensor_A.extent() << "):\n" << tensor_A_physical << "\n\n";
std::cout <<"cutlass::sizeof_bits<ElementB>::value = "<<cutlass::sizeof_bits<ElementB>::value<<"\n";
std::cout
<< "B:\n" << tensor_B.host_view() << "\n\n"
<< "B(physical - stride: " << tensor_B.stride()[0] << ", extent: " << tensor_B.extent() <<"):\n" << tensor_B_physical << "\n\n";
std::cout
<< "C:\n" << tensor_C.host_view() << "\n\n"
<< "Reference:\n" << tensor_D_reference.host_view() << "\n\n"
<< "Computed:\n" << tensor_D_computed.host_view() << std::endl;
}
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test kernel
template <typename Mma, typename ThreadblockShape>
__global__ void sparse_kernel(
typename Mma::ElementC *output_C,
typename Mma::ElementA const *input_A,
typename Mma::ElementB const *input_B,
typename Mma::ElementC const *input_C,
typename Mma::ElementE const *input_E,
int iterations = 1) {
// Use AlignedBuffer to store trivially copyable objects in unions and __shared__ buffers.
__shared__ cutlass::AlignedBuffer<typename Mma::ElementA,
ThreadblockShape::kM *
ThreadblockShape::kK / Mma::kSparse>
smem_buffer_A;
__shared__ cutlass::AlignedBuffer<
typename Mma::ElementB, ThreadblockShape::kN * ThreadblockShape::kK> smem_buffer_B;
__shared__ cutlass::AlignedBuffer<
typename Mma::ElementE, Mma::Shape::kM * Mma::Shape::kK /
Mma::kSparse / Mma::kElementsPerElementE>
smem_buffer_E;
__syncthreads();
if (threadIdx.x == 0) {
typename Mma::ElementA *smem_ptr_A = smem_buffer_A.data();
#pragma unroll 1
for (size_t i = 0; i < smem_buffer_A.size(); ++i) {
cutlass::ReferenceFactory<typename Mma::ElementA>::get(smem_ptr_A, i) =
cutlass::ReferenceFactory<typename cutlass::platform::remove_const<
typename Mma::ElementA>::type>::get(input_A, i);
}
typename Mma::ElementB *smem_ptr_B = smem_buffer_B.data();
#pragma unroll 1
for (size_t i = 0; i < smem_buffer_B.size(); ++i) {
cutlass::ReferenceFactory<typename Mma::ElementB>::get(smem_ptr_B, i) =
cutlass::ReferenceFactory<typename cutlass::platform::remove_const<
typename Mma::ElementB>::type>::get(input_B, i);
}
typename Mma::ElementE *smem_ptr_E = smem_buffer_E.data();
#pragma unroll 1
for (size_t i = 0; i < smem_buffer_E.size(); ++i) {
cutlass::ReferenceFactory<typename Mma::ElementE>::get(smem_ptr_E, i) =
cutlass::ReferenceFactory<typename cutlass::platform::remove_const<
typename Mma::ElementE>::type>::get(input_E, i);
}
}
__syncthreads();
//
// Construct warp-level matrix product
//
using FragmentA = typename Mma::FragmentA;
using FragmentB = typename Mma::FragmentB;
using FragmentC = typename Mma::FragmentC;
using FragmentE = typename Mma::FragmentE;
typename Mma::LayoutA layout_A = Mma::LayoutA::packed(
{ThreadblockShape::kM, ThreadblockShape::kK / Mma::kSparse});
typename Mma::LayoutB layout_B =
Mma::LayoutB::packed({ThreadblockShape::kK, ThreadblockShape::kN});
typename Mma::LayoutC layout_C = Mma::LayoutC::packed({Mma::Shape::kM, Mma::Shape::kN});
typename Mma::LayoutE layout_E =
Mma::LayoutE::packed({Mma::Shape::kM * Mma::kInterleaved,
Mma::Shape::kK / Mma::kSparse /
Mma::kElementsPerElementE / Mma::kInterleaved});
typename Mma::IteratorA iter_A({smem_buffer_A.data(), layout_A}, cutlass::arch::LaneId());
typename Mma::IteratorB iter_B({smem_buffer_B.data(), layout_B}, cutlass::arch::LaneId());
typename Mma::IteratorE iter_E({smem_buffer_E.data(), layout_E}, cutlass::arch::LaneId());
FragmentA frag_A;
FragmentB frag_B;
FragmentC accum;
FragmentE frag_E;
Mma mma;
accum.clear();
CUTLASS_PRAGMA_NO_UNROLL
for (int iter = 0; iter < iterations; ++iter) { // place in loop that is not unrolled
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < ThreadblockShape::kK;
k += Mma::Policy::MmaShape::kK) {
iter_A.load(frag_A);
iter_B.load(frag_B);
iter_E.load(frag_E);
++iter_A;
++iter_B;
++iter_E;
mma(accum, frag_A, frag_B, accum, frag_E);
}
}
typename Mma::IteratorC iter_C({output_C, layout_C}, cutlass::arch::LaneId());
iter_C.store(accum);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product
template <
/// Warp-level matrix multiply-accumulate
typename Mma_,
/// Size of threadblock-scoped shape used to store SMEM
typename ThreadblockShape_,
/// The innter product operation performed by GEMM
typename Operator_ = cutlass::arch::OpMultiplyAdd
>
struct SparseTestbed {
/// Thread-level matrix multiply-accumulate operator
using Mma = Mma_;
using ThreadblockShape = ThreadblockShape_;
using Operator = Operator_;
using Shape = typename Mma::Shape;
using ElementA = typename Mma::ElementA;
using LayoutA = typename Mma::LayoutA;
using ElementB = typename Mma::ElementB;
using LayoutB = typename Mma::LayoutB;
using ElementC = typename Mma::ElementC;
using LayoutC = typename Mma::LayoutC;
static int const Sparse = Mma::kSparse;
static int const MetaSizeInBits = Mma::kMetaSizeInBits;
static int const MaxID2 = Mma::kMaxID2;
static int const Interleaved = Mma::kInterleaved;
using ElementE = typename Mma::ElementE;
static int const ElementsPerElementE = Mma::kElementsPerElementE;
using LayoutE = cutlass::layout::RowMajor;
using ReorderedLayoutE =
cutlass::layout::ColumnMajorInterleaved<Interleaved>;
//
// Data members
//
cutlass::HostTensor<ElementA, LayoutA> tensor_A;
cutlass::HostTensor<ElementA, LayoutA> tensor_A_uncompressed;
cutlass::HostTensor<ElementB, LayoutB> tensor_B;
cutlass::HostTensor<ElementC, LayoutC> tensor_C;
cutlass::HostTensor<ElementC, LayoutC> tensor_D_computed;
cutlass::HostTensor<ElementC, LayoutC> tensor_D_reference;
cutlass::HostTensor<ElementE, LayoutE> tensor_E;
cutlass::HostTensor<ElementE, ReorderedLayoutE> tensor_E_reordered;
//
// Methods
//
/// Allocates workspace in device memory
SparseTestbed() {
tensor_A.reset(cutlass::make_Coord(ThreadblockShape::kM,
ThreadblockShape::kK / Sparse));
tensor_A_uncompressed.reset(
cutlass::make_Coord(ThreadblockShape::kM, ThreadblockShape::kK));
tensor_B.reset(cutlass::make_Coord(ThreadblockShape::kK, ThreadblockShape::kN));
tensor_C.reset(cutlass::make_Coord(Shape::kM, Shape::kN));
tensor_D_computed.reset(cutlass::make_Coord(Shape::kM, Shape::kN));
tensor_D_reference.reset(cutlass::make_Coord(Shape::kM, Shape::kN), false);
tensor_E.reset(cutlass::make_Coord(
Shape::kM, Shape::kK / Sparse / ElementsPerElementE));
tensor_E_reordered.reset(cutlass::make_Coord(
Shape::kM, Shape::kK / Sparse / ElementsPerElementE));
}
/// Returns true if the CUDA device is sufficient to execute the kernel.
bool sufficient() const {
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.major == 9) {
// NVIDIA Hopper drops support for several data types
if (
cutlass::sizeof_bits<ElementA>::value < 8 ||
cutlass::sizeof_bits<ElementB>::value < 8 ||
cutlass::sizeof_bits<ElementC>::value < 8) {
return false;
}
}
return true;
}
/// Runs the test
bool run(
cutlass::Distribution::Kind init_A = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_E = cutlass::Distribution::Uniform) {
if (!sufficient()) {
return true;
}
//
// initialize device memory
//
if (init_A == cutlass::Distribution::Uniform) {
int scope_max = 8;
int scope_min = -8;
if (cutlass::sizeof_bits<ElementA>::value == 4) {
scope_max = 2;
scope_min = -2;
} else if (cutlass::sizeof_bits<ElementA>::value == 1) {
scope_max = 2;
scope_min = 0;
}
uint64_t seed = 7;
cutlass::reference::host::TensorFillRandomUniform(
tensor_A.host_view(), seed, scope_max, scope_min, 0);
} else if (init_A == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(tensor_A.host_data(),
tensor_A.capacity());
} else if (init_A == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(tensor_A.host_view());
} else {
return false;
}
if (init_B == cutlass::Distribution::Uniform) {
int scope_max = 8;
int scope_min = -8;
if (cutlass::sizeof_bits<ElementB>::value == 4) {
scope_max = 2;
scope_min = -2;
} else if (cutlass::sizeof_bits<ElementB>::value == 1) {
scope_max = 2;
scope_min = 0;
}
uint64_t seed = 7;
cutlass::reference::host::TensorFillRandomUniform(
tensor_B.host_view(), seed + 16, scope_max, scope_min, 0);
} else if (init_B == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(tensor_B.host_data(),
tensor_B.capacity());
} else if (init_B == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(tensor_B.host_view());
} else {
return false;
}
cutlass::reference::host::TensorFill(
tensor_C.host_view(),
ElementC(0)
);
cutlass::reference::host::TensorFill(
tensor_D_computed.host_view(),
ElementC(0)
);
cutlass::reference::host::TensorFill(
tensor_D_reference.host_view(),
ElementC(0)
);
if (init_E == cutlass::Distribution::Uniform) {
uint64_t seed = 7;
cutlass::reference::host::TensorFillRandomSparseMeta(
tensor_E.host_view(), seed, MetaSizeInBits);
} else if (init_E == cutlass::Distribution::Identity) {
uint32_t content = (MaxID2 == 1) ? 0x44444444 : 0x4444;
cutlass::reference::host::TensorFill(tensor_E.host_view(),
(ElementE)(content));
} else {
return false;
}
cutlass::reorder_meta(
tensor_E_reordered.host_ref(), tensor_E.host_ref(),
{Shape::kM, Shape::kN, Shape::kK / Sparse / ElementsPerElementE});
tensor_A.sync_device();
tensor_B.sync_device();
tensor_C.sync_device();
tensor_D_computed.sync_device();
tensor_E_reordered.sync_device();
// launch kernel
sparse_kernel<Mma, ThreadblockShape><<< dim3(1, 1), dim3(32, 1, 1) >>>(
tensor_D_computed.device_data(),
tensor_A.device_data(),
tensor_B.device_data(),
tensor_C.device_data(),
tensor_E_reordered.device_data());
// verify no errors
cudaError_t result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess) << "CUDA ERROR: " << cudaGetErrorString(result);
if (result != cudaSuccess) {
return false;
}
tensor_D_computed.sync_host();
//
// Reference implementation
//
cutlass::uncompress(tensor_A_uncompressed.host_ref(), tensor_A.host_ref(),
tensor_E.host_ref(), Shape::kM, Shape::kK);
cutlass::reference::host::Gemm<ElementA, LayoutA, ElementB, LayoutB,
ElementC, LayoutC, ElementC, ElementC,
Operator>
reference_gemm;
reference_gemm(
{Shape::kM, Shape::kN, ThreadblockShape::kK},
ElementC(1),
tensor_A_uncompressed.host_ref(),
tensor_B.host_ref(),
ElementC(0),
tensor_D_reference.host_ref()
);
//
// Verify equivalence
//
// compare
bool passed = cutlass::reference::host::TensorEquals(
tensor_D_computed.host_view(),
tensor_D_reference.host_view()
);
EXPECT_TRUE(passed);
if (!passed) {
std::cout <<"cutlass::sizeof_bits<ElementA>::value = "<<cutlass::sizeof_bits<ElementA>::value<<"\n";
std::cout << "A:\n" << tensor_A.host_view() << "\n\n";
std::cout <<"cutlass::sizeof_bits<ElementB>::value = "<<cutlass::sizeof_bits<ElementB>::value<<"\n";
std::cout << "B:\n" << tensor_B.host_view() << "\n\n";
std::cout <<"cutlass::sizeof_bits<ElementB>::value = "<<cutlass::sizeof_bits<ElementE>::value<<"\n";
std::cout << "E:\n" << tensor_E.host_view() << "\n\n";
std::cout
<< "C:\n" << tensor_C.host_view() << "\n\n"
<< "Reference:\n" << tensor_D_reference.host_view() << "\n\n"
<< "Computed:\n" << tensor_D_computed.host_view() << "\n";
}
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace test
| test/unit/gemm/warp/testbed.h/0 | {
"file_path": "test/unit/gemm/warp/testbed.h",
"repo_id": "test",
"token_count": 20479
} | 68 |
#pragma once
#define CUDA_INCLUDE_DIR "@CUDA_TOOLKIT_ROOT_DIR@/include"
| test/unit/nvrtc/thread/nvrtc_config.in/0 | {
"file_path": "test/unit/nvrtc/thread/nvrtc_config.in",
"repo_id": "test",
"token_count": 36
} | 69 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide GEMM interface
*/
#include <iostream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/cutlass.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/reduction/kernel/reduce_split_k.h"
#include "cutlass/reduction/thread/reduction_operators.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/tensor_view_io.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace reduction {
template <typename ReductionKernel>
__global__ void kernel_reduce_splitk(typename ReductionKernel::Params params) {
__shared__ typename ReductionKernel::SharedStorage shared_storage;
ReductionKernel reduction_op;
reduction_op(params, shared_storage);
}
template <typename ReductionKernel>
class ReduceSplitKTestbed {
public:
using ElementAccumulator = typename ReductionKernel::ElementAccumulator;
using ElementWorkspace = typename ReductionKernel::ElementWorkspace;
using ElementOutput = typename ReductionKernel::ElementOutput;
using Layout = cutlass::layout::RowMajor;
public:
cutlass::Distribution::Kind distribution_workspace;
cutlass::Distribution::Kind distribution_source;
uint64_t seed;
public:
/// Ctor
ReduceSplitKTestbed(
cutlass::Distribution::Kind distribution_workspace = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind distribution_source = cutlass::Distribution::Uniform,
uint64_t seed = 2019
):
distribution_workspace(distribution_workspace),
distribution_source(distribution_source),
seed(seed) {
}
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
bool initialize_tensor(cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
cutlass::reference::host::TensorFillRandomUniform(view, seed, 8, -8, 0);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5, -1);
} else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
} else if (dist_kind == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(view.data(),
view.capacity());
} else {
EXPECT_TRUE(false) << "Not implemented";
return false;
}
return true;
}
/// Runs a single problem size
bool run(
cutlass::MatrixCoord problem_size,
int partitions,
ElementAccumulator alpha = 1,
ElementAccumulator beta = 0) {
cutlass::HostTensor<ElementWorkspace, Layout> workspace({
problem_size.row() * partitions,
problem_size.column()
});
cutlass::HostTensor<ElementOutput, Layout> source(problem_size);
cutlass::HostTensor<ElementOutput, Layout> destination(problem_size);
cutlass::HostTensor<ElementOutput, Layout> destination_reference(problem_size, false);
//
// Initialize
//
initialize_tensor(workspace.host_view(), distribution_workspace, seed);
initialize_tensor(source.host_view(), distribution_source, seed + 23);
cutlass::reference::host::TensorFill(destination.host_view());
workspace.sync_device();
source.sync_device();
destination.sync_device();
//
// Launch reduction kernel
//
dim3 block = ReductionKernel::block_shape();
dim3 grid = ReductionKernel::grid_shape(problem_size);
typename ReductionKernel::Params params(
problem_size,
partitions,
problem_size.row() * problem_size.column(),
workspace.device_ref(),
destination.device_ref(),
source.device_ref(),
{alpha, beta}
);
test::reduction::kernel_reduce_splitk<ReductionKernel><<< grid, block >>>(params);
cudaError_t result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess)
<< "CUDA error: " << cudaGetErrorString(result);
destination.sync_host();
//
// Compute reference
//
for (int m = 0; m < problem_size.row(); ++m) {
for (int n = 0; n < problem_size.column(); ++n) {
ElementAccumulator accum = 0;
for (int k = 0; k < partitions; ++k) {
accum += ElementAccumulator(workspace.at({m + k * problem_size.row(), n}));
}
ElementAccumulator c = ElementAccumulator(source.at({m, n}));
destination_reference.at({m, n}) = ElementOutput(accum * alpha + beta * c);
}
}
//
// Compare
//
EXPECT_GT(cutlass::reference::host::TensorNorm(destination.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(destination_reference.host_view()), 0);
bool passed = cutlass::reference::host::TensorEquals(
destination.host_view(), destination_reference.host_view());
EXPECT_TRUE(passed)
<< "Workspace =\n" << workspace.host_view() << "\n\n"
<< "\n"
<< "Reference =\n" << destination_reference.host_view() << "\n\n"
<< "Computed =\n" << destination.host_view() << "\n";
return passed;
}
/// Runs through a variety of test cases
bool run_all() {
cutlass::MatrixCoord problem_sizes[] = {
{8, 8},
{136, 72},
{248, 232},
};
int partition_counts[] = {
1,3,4,5,11
};
bool passed = false;
for (cutlass::MatrixCoord problem : problem_sizes) {
for (int partitions : partition_counts) {
passed = run(problem, partitions);
if (!passed) {
return false;
}
}
}
return passed;
}
};
} // namespace reduction
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Strictly F32 data
//
TEST(Reduction_ReduceSplitK, f32_f32_f32_1_1x32) {
using ElementWorkspace = float;
using ElementAccumulator = float;
using ElementOutput = float;
int const kN = 1;
using Shape = cutlass::MatrixShape<1, 32>;
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kN,
ElementAccumulator,
ElementAccumulator
>;
using ReductionOp = cutlass::reduction::thread::ReduceAdd<
ElementAccumulator,
ElementWorkspace,
kN
>;
using ReductionKernel = cutlass::reduction::kernel::ReduceSplitK<
Shape,
OutputOp,
ReductionOp
>;
test::reduction::ReduceSplitKTestbed<ReductionKernel> testbed;
EXPECT_TRUE(testbed.run_all());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Vectorized access
//
TEST(Reduction_ReduceSplitK, f32_f32_f32_2_4x64) {
using ElementWorkspace = float;
using ElementAccumulator = float;
using ElementOutput = float;
int const kN = 2;
using Shape = cutlass::MatrixShape<4, 64>;
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kN,
ElementAccumulator,
ElementAccumulator
>;
using ReductionOp = cutlass::reduction::thread::ReduceAdd<
ElementAccumulator,
ElementWorkspace,
kN
>;
using ReductionKernel = cutlass::reduction::kernel::ReduceSplitK<
Shape,
OutputOp,
ReductionOp
>;
test::reduction::ReduceSplitKTestbed<ReductionKernel> testbed;
EXPECT_TRUE(testbed.run_all());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Vectorized access
//
TEST(Reduction_ReduceSplitK, f32_f32_f16_2_4x64) {
using ElementWorkspace = float;
using ElementAccumulator = float;
using ElementOutput = cutlass::half_t;
int const kN = 2;
using Shape = cutlass::MatrixShape<4, 64>;
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kN,
ElementAccumulator,
ElementAccumulator
>;
using ReductionOp = cutlass::reduction::thread::ReduceAdd<
ElementAccumulator,
ElementWorkspace,
kN
>;
using ReductionKernel = cutlass::reduction::kernel::ReduceSplitK<
Shape,
OutputOp,
ReductionOp
>;
test::reduction::ReduceSplitKTestbed<ReductionKernel> testbed;
EXPECT_TRUE(testbed.run_all());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Vectorized access
//
TEST(Reduction_ReduceSplitK, f32_f32_f16_8_4x64) {
using ElementWorkspace = float;
using ElementAccumulator = float;
using ElementOutput = cutlass::half_t;
int const kN = 8;
using Shape = cutlass::MatrixShape<4, 64>;
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kN,
ElementAccumulator,
ElementAccumulator
>;
using ReductionOp = cutlass::reduction::thread::ReduceAdd<
ElementAccumulator,
ElementWorkspace,
kN
>;
using ReductionKernel = cutlass::reduction::kernel::ReduceSplitK<
Shape,
OutputOp,
ReductionOp
>;
test::reduction::ReduceSplitKTestbed<ReductionKernel> testbed;
EXPECT_TRUE(testbed.run_all());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/reduction/kernel/reduce_splitk.cu/0 | {
"file_path": "test/unit/reduction/kernel/reduce_splitk.cu",
"repo_id": "test",
"token_count": 3842
} | 70 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief CUTLASS Library handle.
*/
#include <iostream>
#include <stdexcept>
#include <cstdint>
#include "cutlass/library/handle.h"
#include "cutlass/library/singleton.h"
#include "cutlass/library/util.h"
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Constructor
Handle::Handle(
cudaStream_t stream,
size_t workspace_size
):
provider_(Provider::kCUTLASS),
stream_(stream),
workspace_(nullptr),
workspace_size_(0),
scalar_pointer_mode_(ScalarPointerMode::kHost),
last_operation_(nullptr) {
int device_idx = -1;
cudaError_t error = cudaGetDevice(&device_idx);
if (error != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() failed");
}
error = cudaGetDeviceProperties(&device_, device_idx);
if (error != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
set_workspace_size(workspace_size);
Singleton::get();
}
/// Destructor
Handle::~Handle() {
if (workspace_) {
if (workspace_) {
cudaFree(workspace_);
}
workspace_ = nullptr;
workspace_size_ = 0;
}
}
/// Move constructor
Handle::Handle(Handle && handle) {
device_ = handle.device_;
workspace_size_ = handle.workspace_size_;
workspace_ = handle.workspace_;
stream_ = handle.stream_;
scalar_pointer_mode_ = handle.scalar_pointer_mode_;
handle.workspace_ = nullptr;
handle.workspace_size_ = 0;
}
/// Move assignment operator
Handle & Handle::operator=(Handle && handle) {
provider_ = handle.provider_;
device_ = handle.device_;
workspace_size_ = handle.workspace_size_;
workspace_ = handle.workspace_;
stream_ = handle.stream_;
scalar_pointer_mode_ = handle.scalar_pointer_mode_;
handle.workspace_ = nullptr;
handle.workspace_size_ = 0;
return *this;
}
int Handle::compute_capability() const {
return device_.major * 10 + device_.minor;
}
/// Sets the current CUDA stream
void Handle::set_stream(cudaStream_t stream) {
stream_ = stream;
}
/// Gets the current CUDA stream
cudaStream_t Handle::get_stream() const {
return stream_;
}
/// Gets the current provider
Provider Handle::get_provider() const {
return provider_;
}
/// Sets the provider of operations
void Handle::set_provider(Provider provider) {
provider_ = provider;
}
/// Gets the device workspace size
size_t Handle::get_workspace_size() const {
return workspace_size_;
}
/// Gets a pointer to the device workspace allocation in Global Memory
void *Handle::get_workspace() const {
return workspace_;
}
/// Sets the size of device workspace, invalidating previous calls to get_device_workspace()
void Handle::set_workspace_size(size_t bytes) {
if (bytes != workspace_size_) {
if (workspace_) {
cudaFree(workspace_);
}
workspace_ = nullptr;
workspace_size_ = bytes;
if (workspace_size_) {
cudaError_t error = cudaMalloc((void **)&workspace_, workspace_size_);
if (error != cudaSuccess) {
throw std::runtime_error("Failed to allocate workspace");
}
}
}
if (workspace_) {
cudaError_t error = cudaMemset(workspace_, 0, workspace_size_);
if (error != cudaSuccess) {
throw std::runtime_error("Failed to clear workspace");
}
}
}
/// Gets the scalar pointer mode
ScalarPointerMode Handle::get_scalar_pointer_mode() const {
return scalar_pointer_mode_;
}
/// Sets the scalar pointer mode
void Handle::set_scalar_pointer_mode(ScalarPointerMode mode) {
scalar_pointer_mode_ = mode;
}
/// Gets the last operation
Operation const *Handle::get_last_operation() const {
return last_operation_;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns the maximum required alignment for each operator
static int maximum_alignment_requirement(GemmDescription const &desc) {
return std::max(
std::max(desc.A.alignment, desc.B.alignment), desc.C.alignment);
}
/// Returns the largest alignment (in units of elements) the problem satisfies, starting from a
/// given upper limit.
static int gemm_problem_alignment(
int M,
int N,
int K,
NumericTypeID element_A,
void const *ptr_A,
int64_t lda,
int64_t batch_stride_A,
NumericTypeID element_B,
void const *ptr_B,
int64_t ldb,
int64_t batch_stride_B,
NumericTypeID element_C,
void const * ptr_C,
int64_t ldc,
int64_t batch_stride_C,
void const * ptr_D,
int64_t ldd,
int64_t batch_stride_D,
int max_alignment_in_bytes = 16
) {
void const *pointers[] = {
ptr_A, ptr_B, ptr_C, ptr_D
};
int64_t extents[] = {
M, N, K, lda, ldb, ldc, ldd, batch_stride_A, batch_stride_B, batch_stride_C, batch_stride_D
};
NumericTypeID elements[] = {
element_A, element_B, element_C
};
for (; max_alignment_in_bytes > 0; max_alignment_in_bytes /= 2) {
bool satisfied = true;
// Can pointers satisfy this?
for (void const *ptr : pointers) {
std::uintptr_t int_ptr = reinterpret_cast<std::uintptr_t>(ptr);
if (int_ptr % max_alignment_in_bytes) {
satisfied = false;
break;
}
}
if (!satisfied) {
continue;
}
// Compute the maximum alignment based on element data types
int max_element_alignment = 0;
for (NumericTypeID type_id : elements) {
int element_alignment = max_alignment_in_bytes * 8 / library::sizeof_bits(type_id);
max_element_alignment = std::max(max_element_alignment, element_alignment);
}
// Can the problem size and leading dimensions satisfy this?
for (int64_t extent : extents) {
if (extent % max_element_alignment) {
satisfied = false;
break;
}
}
if (!satisfied) {
continue;
}
// Yes
return max_element_alignment;
}
// No alignment satisfies this problem
return 0;
}
/// Find the best kernel in descending order of preference.
static Operation const * find_gemm_operation(
GemmOperationFunctionalMap::const_iterator operators_it,
GemmPreferenceKey const preference_key) {
auto cc_it = operators_it->second.upper_bound(preference_key);
if (cc_it == operators_it->second.begin()) {
return nullptr;
}
Operation const *operation = nullptr;
// Search in descending order of compute capability
do {
--cc_it;
// Search tile sizes in order, for now.
for (auto const * op : cc_it->second) {
GemmDescription const &desc = static_cast<GemmDescription const &>(op->description());
int min_cc = desc.tile_description.minimum_compute_capability;
int max_cc = desc.tile_description.maximum_compute_capability;
int op_alignment = maximum_alignment_requirement(desc);
if ((min_cc <= preference_key.compute_capability) &&
(preference_key.compute_capability <= max_cc) &&
(op_alignment <= preference_key.alignment)) {
operation = op;
break;
}
}
} while (!operation && cc_it != operators_it->second.begin());
return operation;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Executes a GEMM computation: D <= alpha * A*B + beta * C
Status Handle::gemm(
int M, /// GEMM M dimension
int N, /// GEMM N dimension
int K, /// GEMM K dimension
NumericTypeID element_compute, /// Data type of internal accumulation
NumericTypeID element_scalar, /// Data type of alpha/beta scalars
void const *alpha, /// Pointer to alpha scalar
NumericTypeID element_A, /// Data type of A matrix elements
LayoutTypeID layout_A, /// Layout of A matrix
ComplexTransform transform_A, /// Complex transformation applied to A matrix - ignored for real-valued matrices
void const * ptr_A, /// Pointer to A matrix in Global Memory
int64_t lda, /// Leading dimension of A matrix
NumericTypeID element_B, /// Data type of B matrix elements
LayoutTypeID layout_B, /// Layout of B matrix
ComplexTransform transform_B, /// Complex transformation applied to B matrix - ignored for real-valued matrices
void const * ptr_B, /// Pointer to B matrix in Global Memory
int64_t ldb, /// Leading dimension of B matrix
void const * beta, /// Pointer to beta scalar
NumericTypeID element_C, /// Data type of C and D matrices
void const * ptr_C, /// Pointer to C matrix
int64_t ldc, /// Leading dimension of C matrix
void * ptr_D, /// Pointer to D matrix
int64_t ldd /// Leading dimension of D matrix
) {
//
// Find the operation
//
GemmFunctionalKey key(
provider_,
GemmKind::kGemm,
element_compute,
element_scalar,
element_A,
layout_A,
transform_A,
element_B,
layout_B,
transform_B,
element_C, // C/D are same type and col major default
LayoutTypeID::kColumnMajor,
element_C,
LayoutTypeID::kColumnMajor
);
auto operators_it = Singleton::get().operation_table.gemm_operations.find(key);
if (operators_it == Singleton::get().operation_table.gemm_operations.end()) {
return cutlass::Status::kErrorNotSupported;
}
if (operators_it->second.empty()) {
return cutlass::Status::kErrorNotSupported;
}
//
// Compute the largest alignment restriction the kernel can satisfy.
//
// Maximum alignment expectation among all kernels (in units of bytes)
int const kMaximumAlignmentSize = 16;
int alignment = gemm_problem_alignment(
M, N, K,
element_A, ptr_A, lda, 0,
element_B, ptr_B, ldb, 0,
element_C, ptr_C, ldc, 0,
ptr_D, ldd, 0, kMaximumAlignmentSize
);
//
// Find the best kernel in descending order of preference.
//
GemmPreferenceKey preference_key(compute_capability(), alignment);
Operation const *operation = find_gemm_operation(operators_it, preference_key);
if (!operation) {
return cutlass::Status::kErrorNotSupported;
}
last_operation_ = operation;
//
// Configure operation
//
GemmConfiguration configuration{
{M, N, K},
lda,
ldb,
ldc,
ldd,
1
};
// Query host work space size
uint64_t host_workspace_size_needed = operation->get_host_workspace_size(&configuration);
if (uint64_t(kHostWorkspaceSize) < host_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
char host_workspace[kHostWorkspaceSize];
// Query device workspace size
uint64_t device_workspace_size_needed = operation->get_device_workspace_size(&configuration);
if (uint64_t(workspace_size_) < device_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
// Initialize host and device workspaces
Status status = operation->initialize(
&configuration,
host_workspace,
workspace_,
stream_);
if (status != cutlass::Status::kSuccess) {
return status;
}
// Run the operator
GemmArguments arguments{
ptr_A,
ptr_B,
ptr_C,
ptr_D,
alpha,
beta,
scalar_pointer_mode_
};
return operation->run(&arguments, host_workspace, workspace_, stream_);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Executes a GEMM computation: D <= alpha * A*B + beta * C.
//
// Supports batched-strided, batched array or split-K serial or split-K parallel.
//
Status Handle::gemm_universal(
GemmUniversalMode mode, /// indicates the mode in which the kUniversal GEMM is launched
int M, /// GEMM M dimension
int N, /// GEMM N dimension
int K, /// GEMM K dimension
NumericTypeID element_compute, /// Data type of internal accumulation
NumericTypeID element_scalar, /// Data type of alpha/beta scalars
void const *alpha, /// Pointer to alpha scalar
NumericTypeID element_A, /// Data type of A matrix elements
LayoutTypeID layout_A, /// Layout of A matrix
ComplexTransform transform_A, /// Complex transformation applied to A matrix - ignored for real-valued matrices
void const * ptr_A, /// Pointer to A matrix in Global Memory
int64_t lda, /// Leading dimension of A matrix
NumericTypeID element_B, /// Data type of B matrix elements
LayoutTypeID layout_B, /// Layout of B matrix
ComplexTransform transform_B, /// Complex transformation applied to B matrix - ignored for real-valued matrices
void const * ptr_B, /// Pointer to B matrix in Global Memory
int64_t ldb, /// Leading dimension of B matrix
void const * beta, /// Pointer to beta scalar
NumericTypeID element_C, /// Data type of C matrix
LayoutTypeID layout_C, /// Layout of D matrix
void const * ptr_C, /// Pointer to C matrix
int64_t ldc, /// Leading dimension of C matrix
NumericTypeID element_D, /// Data type of D matrix
LayoutTypeID layout_D, /// Layout of D matrix
void * ptr_D, /// Pointer to D matrix
int64_t ldd, /// Leading dimension of D matrix
int batch_count, /// Batch count or number of split-K slices
int64_t batch_stride_A, /// Batch stride of A operand
int64_t batch_stride_B, /// Batch stride of B operand
int64_t batch_stride_C, /// Batch stride of C operand
int64_t batch_stride_D /// Batch stride of D operand
) {
//
// Find the operation
//
GemmFunctionalKey key(
provider_,
GemmKind::kUniversal,
element_compute,
element_scalar,
element_A,
layout_A,
transform_A,
element_B,
layout_B,
transform_B,
element_C,
layout_C,
element_D,
layout_D
);
auto operators_it = Singleton::get().operation_table.gemm_operations.find(key);
if (operators_it == Singleton::get().operation_table.gemm_operations.end()) {
return cutlass::Status::kErrorNotSupported;
}
if (operators_it->second.empty()) {
return cutlass::Status::kErrorNotSupported;
}
//
// Compute the largest alignment restriction the kernel can satisfy.
//
// Maximum alignment expectation among all kernels (in units of bytes)
int const kMaximumAlignmentSize = 16;
void const *ptr_A_check = ptr_A;
void const *ptr_B_check = ptr_B;
void const *ptr_C_check = ptr_C;
void * ptr_D_check = ptr_D;
// Ignore alignment of pointers to pointers. We can't check this from the host,
// as each batch index has its own pointer in device memory.
if (mode == GemmUniversalMode::kArray) {
ptr_A_check = nullptr;
ptr_B_check = nullptr;
ptr_C_check = nullptr;
ptr_D_check = nullptr;
}
int alignment = gemm_problem_alignment(
M, N, K,
element_A, ptr_A_check, lda, 0,
element_B, ptr_B_check, ldb, 0,
element_C, ptr_C_check, ldc, 0,
ptr_D_check, ldd, 0, kMaximumAlignmentSize
);
//
// Find the best kernel in descending order of preference.
//
GemmPreferenceKey preference_key(compute_capability(), alignment);
Operation const *operation = find_gemm_operation(operators_it, preference_key);
if (!operation) {
return cutlass::Status::kErrorNotSupported;
}
last_operation_ = operation;
//
// Configure operation
//
GemmUniversalConfiguration configuration{
mode,
{M, N, K},
batch_count,
lda,
ldb,
ldc,
ldd
};
// Query host work space size
uint64_t host_workspace_size_needed = operation->get_host_workspace_size(&configuration);
if (uint64_t(kHostWorkspaceSize) < host_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
char host_workspace[kHostWorkspaceSize];
GemmUniversalArguments arguments{
{M, N, K},
batch_count,
ptr_A,
ptr_B,
ptr_C,
ptr_D,
alpha,
beta,
scalar_pointer_mode_,
lda,
ldb,
ldc,
ldd,
batch_stride_A,
batch_stride_B,
batch_stride_C,
batch_stride_D
};
// Query device workspace size
uint64_t device_workspace_size_needed = operation->get_device_workspace_size(&configuration, &arguments);
if (uint64_t(workspace_size_) < device_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
// Initialize host and device workspaces
Status status = operation->initialize(
&configuration,
host_workspace,
workspace_,
stream_);
if (status != cutlass::Status::kSuccess) {
return status;
}
// Run the operator
return operation->run(&arguments, host_workspace, workspace_, stream_);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Planar complex GEMM
Status Handle::gemm_planar_complex(
int M, /// GEMM M dimension
int N, /// GEMM N dimension
int K, /// GEMM K dimension
NumericTypeID element_compute, /// Data type of internal accumulation
NumericTypeID element_scalar, /// Data type of alpha/beta scalars
void const *alpha, /// Pointer to alpha scalar
NumericTypeID element_A, /// Data type of A matrix elements
LayoutTypeID layout_A, /// Layout of A matrix
ComplexTransform transform_A, /// Complex transformation applied to A matrix
void const * ptr_A_real, /// Pointer to real part of A matrix
void const * ptr_A_imag, /// Pointer to imaginary part of A matrix
int64_t lda_real, /// Leading dimension of real part of A matrix
int64_t lda_imag, /// Leading dimension of imaginary part of A matrix
NumericTypeID element_B, /// Data type of B matrix elements
LayoutTypeID layout_B, /// Layout of B matrix
ComplexTransform transform_B, /// Complex transformation applied to B matrix
void const * ptr_B_real, /// Pointer to real part of B matrix
void const * ptr_B_imag, /// Pointer to imaginary part of B matrix
int64_t ldb_real, /// Leading dimension of real part of B matrix
int64_t ldb_imag, /// Leading dimension of imaginary part of B matrix
void const * beta, /// Pointer to beta scalar
NumericTypeID element_C, /// Data type of C and D matrix
void const * ptr_C_real, /// Pointer to real part of C matrix
void const * ptr_C_imag, /// Pointer to imaginary part of C matrix
int64_t ldc_real, /// Leading dimension of real part of C matrix
int64_t ldc_imag, /// Leading dimension of imaginary part of C matrix
void * ptr_D_real, /// Pointer to real part of D matrix
void * ptr_D_imag, /// Pointer to imaginary part of D matrix
int64_t ldd_real, /// Leading dimension of real part of D matrix
int64_t ldd_imag, /// Leading dimension of imaginary part of D matrix
int batch_count, /// Number of batched GEMMs to execute
int64_t batch_stride_A_real,
int64_t batch_stride_A_imag,
int64_t batch_stride_B_real,
int64_t batch_stride_B_imag,
int64_t batch_stride_C_real,
int64_t batch_stride_C_imag,
int64_t batch_stride_D_real,
int64_t batch_stride_D_imag
) {
//
// Find the operation
//
GemmFunctionalKey key(
provider_,
GemmKind::kPlanarComplex,
element_compute,
element_scalar,
element_A,
layout_A,
transform_A,
element_B,
layout_B,
transform_B,
element_C, // C/D are same type
LayoutTypeID::kColumnMajor,
element_C,
LayoutTypeID::kColumnMajor
);
auto operators_it = Singleton::get().operation_table.gemm_operations.find(key);
if (operators_it == Singleton::get().operation_table.gemm_operations.end()) {
return cutlass::Status::kErrorNotSupported;
}
if (operators_it->second.empty()) {
return cutlass::Status::kErrorNotSupported;
}
//
// Compute the largest alignment restriction the kernel can satisfy.
//
// Maximum alignment expectation among all kernels (in units of bytes)
int const kMaximumAlignmentSize = 16;
int alignment = std::max(
gemm_problem_alignment(
M, N, K,
element_A, ptr_A_real, lda_real, batch_stride_A_real,
element_B, ptr_B_real, ldb_real, batch_stride_B_real,
element_C, ptr_C_real, ldc_real, batch_stride_C_real,
ptr_D_real, ldd_real, batch_stride_D_real, kMaximumAlignmentSize
),
gemm_problem_alignment(
M, N, K,
element_A, ptr_A_imag, lda_imag, batch_stride_A_imag,
element_B, ptr_B_imag, ldb_imag, batch_stride_B_imag,
element_C, ptr_C_imag, ldc_imag, batch_stride_C_imag,
ptr_D_imag, ldd_imag, batch_stride_D_imag, kMaximumAlignmentSize
)
);
//
// Find the best kernel in descending order of preference.
//
GemmPreferenceKey preference_key(compute_capability(), alignment);
Operation const *operation = find_gemm_operation(operators_it, preference_key);
if (!operation) {
return cutlass::Status::kErrorNotSupported;
}
last_operation_ = operation;
//
// Configure operation
//
GemmPlanarComplexConfiguration configuration{
GemmUniversalMode::kBatched,
{M, N, K},
batch_count,
lda_real,
lda_imag,
ldb_real,
ldb_imag,
ldc_real,
ldc_imag,
ldd_real,
ldd_imag
};
// Query host work space size
uint64_t host_workspace_size_needed = operation->get_host_workspace_size(&configuration);
if (uint64_t(kHostWorkspaceSize) < host_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
char host_workspace[kHostWorkspaceSize];
// Query device workspace size
uint64_t device_workspace_size_needed = operation->get_device_workspace_size(&configuration);
if (uint64_t(workspace_size_) < device_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
// Initialize host and device workspaces
Status status = operation->initialize(
&configuration,
host_workspace,
workspace_,
stream_);
if (status != cutlass::Status::kSuccess) {
return status;
}
// Run the operator
GemmPlanarComplexArguments arguments{
ptr_A_real,
ptr_A_imag,
ptr_B_real,
ptr_B_imag,
ptr_C_real,
ptr_C_imag,
ptr_D_real,
ptr_D_imag,
alpha,
beta,
scalar_pointer_mode_,
batch_stride_A_real,
batch_stride_A_imag,
batch_stride_B_real,
batch_stride_B_imag,
batch_stride_C_real,
batch_stride_C_imag,
batch_stride_D_real,
batch_stride_D_imag
};
return operation->run(&arguments, host_workspace, workspace_, stream_);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Planar complex batched GEMM loading pointers from arrays in global memory
Status Handle::gemm_planar_complex_array(
int expected_M, /// Expected GEMM M dimension (used for sizing CUDA grid)
int expected_N, /// Expected GEMM N dimension (used for sizing CUDA grid)
int expected_K, /// Expected GEMM K dimension
int batch_count, /// Number of independent GEMM computations to execute
int const *M, /// Array containing the GEMM M dimension for each batch index
int const *N, /// Array containing the GEMM N dimension for each batch index
int const *K, /// Array containing the GEMM K dimension for each batch index
NumericTypeID element_compute, /// Data type of internal accumulation
NumericTypeID element_scalar, /// Data type of alpha/beta scalars
void const *alpha, /// Pointer to alpha scalar
NumericTypeID element_A, /// Data type of A matrix elements
LayoutTypeID layout_A, /// Layout of A matrix
ComplexTransform transform_A, /// Complex transformation applied to A matrix
void const * const * ptr_A_real, /// Pointer to array containing pointers to real part of A matrices
void const * const * ptr_A_imag, /// Pointer to array containing pointers to imaginary part of A matrices
int64_t lda_real, /// Leading dimension of real part of A matrix
int64_t lda_imag, /// Leading dimension of imaginary part of A matrix
NumericTypeID element_B, /// Data type of B matrix elements
LayoutTypeID layout_B, /// Layout of B matrix
ComplexTransform transform_B, /// Complex transformation applied to B matrix
void const * const * ptr_B_real, /// Pointer to array containing pointers to real part of B matrices
void const * const * ptr_B_imag, /// Pointer to array containing pointers to imaginary part of B matrices
int64_t ldb_real, /// Leading dimension of real part of B matrix
int64_t ldb_imag, /// Leading dimension of imaginary part of B matrix
void const * beta, /// Pointer to beta scalar
NumericTypeID element_C, /// Data type of C and D matrix
void const * const * ptr_C_real, /// Pointer to array containing pointers to real part of C matrices
void const * const * ptr_C_imag, /// Pointer to array containing pointers to imaginary part of C matrices
int64_t ldc_real, /// Leading dimension of real part of C matrix
int64_t ldc_imag, /// Leading dimension of imaginary part of C matrix
void * const * ptr_D_real, /// Pointer to array containing pointers to real part of D matrices
void * const * ptr_D_imag, /// Pointer to array containing pointers to imaginary part of D matrices
int64_t ldd_real, /// Leading dimension of real part of D matrix
int64_t ldd_imag /// Leading dimension of imaginary part of D matrix
) {
//
// Find the operation
//
GemmFunctionalKey key(
provider_,
GemmKind::kPlanarComplexArray,
element_compute,
element_scalar,
element_A,
layout_A,
transform_A,
element_B,
layout_B,
transform_B,
element_C, // C/D are same type
LayoutTypeID::kColumnMajor,
element_C,
LayoutTypeID::kColumnMajor
);
auto operators_it = Singleton::get().operation_table.gemm_operations.find(key);
if (operators_it == Singleton::get().operation_table.gemm_operations.end()) {
return cutlass::Status::kErrorNotSupported;
}
if (operators_it->second.empty()) {
return cutlass::Status::kErrorNotSupported;
}
//
// Compute the largest alignment restriction the kernel can satisfy.
//
// Maximum alignment expectation among all kernels (in units of bytes)
int const kMaximumAlignmentSize = 16;
int alignment = std::max(
gemm_problem_alignment(
expected_M, expected_N, expected_K,
element_A, nullptr, lda_real, 0,
element_B, nullptr, ldb_real, 0,
element_C, nullptr, ldc_real, 0,
nullptr, ldd_real, 0, kMaximumAlignmentSize
),
gemm_problem_alignment(
expected_M, expected_N, expected_K,
element_A, nullptr, lda_imag, 0,
element_B, nullptr, ldb_imag, 0,
element_C, nullptr, ldc_imag, 0,
nullptr, ldd_imag, 0, kMaximumAlignmentSize
)
);
//
// Find the best kernel in descending order of preference.
//
GemmPreferenceKey preference_key(compute_capability(), alignment);
Operation const *operation = find_gemm_operation(operators_it, preference_key);
if (!operation) {
return cutlass::Status::kErrorNotSupported;
}
last_operation_ = operation;
//
// Configure operation
//
GemmPlanarComplexArrayConfiguration configuration{
{expected_M, expected_N, expected_K},
batch_count,
lda_real,
lda_imag,
ldb_real,
ldb_imag,
ldc_real,
ldc_imag,
ldd_real,
ldd_imag
};
// Query host work space size
uint64_t host_workspace_size_needed = operation->get_host_workspace_size(&configuration);
if (uint64_t(kHostWorkspaceSize) < host_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
char host_workspace[kHostWorkspaceSize];
// Query device workspace size
uint64_t device_workspace_size_needed = operation->get_device_workspace_size(&configuration);
if (uint64_t(workspace_size_) < device_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
// Initialize host and device workspaces
Status status = operation->initialize(
&configuration,
host_workspace,
workspace_,
stream_);
if (status != cutlass::Status::kSuccess) {
return status;
}
// Run the operator
GemmPlanarComplexArrayArguments arguments{
M, N, K,
ptr_A_real,
ptr_A_imag,
ptr_B_real,
ptr_B_imag,
ptr_C_real,
ptr_C_imag,
ptr_D_real,
ptr_D_imag,
alpha,
beta,
scalar_pointer_mode_
};
return operation->run(&arguments, host_workspace, workspace_, stream_);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Finds conv operation instances with Conv::ElementC = Reduction::ElementWorkspace
Operation const* find_conv_operation_for_parallel_reduction(Operation const *operation) {
ConvDescription const &conv_desc =
static_cast<ConvDescription const &>(operation->description());
// if the curren conv operation accumulator and output data type match return operation
if(conv_desc.tile_description.math_instruction.element_accumulator == conv_desc.C.element) {
return operation;
}
// find conv operation to match conv output and reduction workspace data type
ConvFunctionalKey key(
library::Provider::kCUTLASS,
conv_desc.conv_kind,
conv_desc.A.element,
conv_desc.A.layout,
conv_desc.B.element,
conv_desc.B.layout,
conv_desc.tile_description.math_instruction.element_accumulator,
conv_desc.C.layout,
conv_desc.tile_description.math_instruction.element_accumulator,
conv_desc.element_epilogue);
// conv operation table for conv2d or conv3d
auto conv_operations = (conv_desc.kind == OperationKind::kConv2d) ?
Singleton::get().operation_table.conv2d_operations :
Singleton::get().operation_table.conv3d_operations;
// find ConvFunctionalKey in convolution operation table
auto operators_it = conv_operations.find(key);
if (operators_it == conv_operations.end()) {
return nullptr;
}
if (operators_it->second.empty()) {
return nullptr;
}
// conv operation for same compute capability and iterator algorithm
ConvPreferenceKey preference_key(
conv_desc.tile_description.minimum_compute_capability,
conv_desc.iterator_algorithm);
auto it = operators_it->second.find(preference_key);
if(it == operators_it->second.end()) {
return nullptr;
}
// return matching conv opertion (same tile sizes and instruction)
for (auto op : it->second) {
if (op->description().tile_description == operation->description().tile_description) {
return op;
}
}
return nullptr;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Finds gemm operation instances with Gemm::ElementC = Reduction::ElementWorkspace
Operation const* find_gemm_operation_for_parallel_reduction(Operation const *operation) {
GemmDescription const &gemm_desc =
static_cast<GemmDescription const &>(operation->description());
// if the curren gemm operation accumulator and output data type match return operation
if(gemm_desc.tile_description.math_instruction.element_accumulator == gemm_desc.D.element) {
return operation;
}
// find gemm operation to match gemm output and reduction workspace data type
GemmFunctionalKey key(
library::Provider::kCUTLASS,
gemm_desc.gemm_kind,
gemm_desc.tile_description.math_instruction.element_accumulator,
gemm_desc.element_epilogue,
gemm_desc.A.element,
gemm_desc.A.layout,
gemm_desc.transform_A,
gemm_desc.B.element,
gemm_desc.B.layout,
gemm_desc.transform_B,
gemm_desc.tile_description.math_instruction.element_accumulator, // C/D are same type
LayoutTypeID::kColumnMajor,
gemm_desc.tile_description.math_instruction.element_accumulator,
LayoutTypeID::kColumnMajor);
// gemm operation table
auto gemm_operations = Singleton::get().operation_table.gemm_operations;
// find ConvFunctionalKey in gemm operation table
auto operators_it = gemm_operations.find(key);
if (operators_it == gemm_operations.end()) {
return nullptr;
}
if (operators_it->second.empty()) {
return nullptr;
}
// gemm operation for same compute capability and max operand alignment
int alignment = std::max(
gemm_desc.A.alignment,
gemm_desc.B.alignment);
GemmPreferenceKey preference_key(
gemm_desc.tile_description.minimum_compute_capability,
alignment);
auto it = operators_it->second.find(preference_key);
if(it == operators_it->second.end()) {
return nullptr;
}
// return matching gemm opertion (same tile shape, stages, warp count, and instruction)
for (auto op : it->second) {
if (op->description().tile_description == operation->description().tile_description) {
return op;
}
}
// return nullptr if no matching gemm operation found for parallel split-k reduction
return nullptr;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| tools/library/src/handle.cu/0 | {
"file_path": "tools/library/src/handle.cu",
"repo_id": "tools",
"token_count": 13942
} | 71 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief
"Any sufficiently complicated C or Fortran program contains an ad-hoc, informally-specified,
bug-ridden, slow implementation of half of Common Lisp."
- Greenspun's Tenth Rule of Programming
cutlass::profiler::ProblemSpace defines a set of data structures which represent the Cartesian
product of sequences defined by integer ranges, lists of scalars, and sets of enumerated types.
These permit a single invocation of the CUTLASS Profiler to iterate over a large set of problems,
verify and profile various operations when they are compatible with the command line, and
construct data tables of results that are convenient inputs to post processing in Excel or Pandas.
By executing multiple problems per invocation, startup overheads may be amortized across many
kernel launches.
*/
#pragma once
// Standard Library includes
#include <string>
#include <vector>
#include <memory>
#include <unordered_map>
#include <cstdlib>
// CUTLASS Utility includes
#include "cutlass/util/command_line.h"
// CUTLASS Library includes
#include "cutlass/library/library.h"
// Profiler includes
#include "enumerated_types.h"
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines the argument schema
struct ArgumentDescription {
/// Type of argument
ArgumentTypeID type;
/// Prioritized array of aliases used in command line parsing
std::vector<std::string> aliases;
/// Description of argument
std::string description;
//
// Methods
//
/// Default ctor
ArgumentDescription():
type(ArgumentTypeID::kInvalid) { }
/// Constructor with aliases
ArgumentDescription(
ArgumentTypeID type_,
std::vector<std::string> const &aliases_,
std::string const &description_
):
type(type_), aliases(aliases_), description(description_) { }
};
/// Vector of arguments
using ArgumentDescriptionVector = std::vector<ArgumentDescription>;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Base class for kernel arguments
struct KernelArgument {
//
// Type definitions
//
/// Value base class
struct Value {
KernelArgument const *argument;
bool not_null;
//
// Methods
//
Value(
KernelArgument const *argument_ = nullptr,
bool not_null_ = true
): argument(argument_), not_null(not_null_) { }
virtual ~Value() { }
virtual std::ostream &print(std::ostream &out) const =0;
};
/// Abstract base class to iterate over values within arguments
struct ValueIterator {
/// Indicates type of kernel argument
KernelArgument const *argument;
/// If the iterator points to an argument that is null, it needs to be distinguished
/// from end.
bool null_argument;
//
// Methods
//
/// Constructs a value iterator - no methods are valid if argument_ == nullptr
ValueIterator(
KernelArgument const *argument_ = nullptr,
bool null_argument_ = false):
argument(argument_), null_argument(null_argument_) {
if (!argument_->not_null()) {
null_argument = true;
}
}
virtual ~ValueIterator() { }
/// Advances to next point in range
virtual void operator++() = 0;
/// Compares against another value iterator - must be of the same KernelArgument type
virtual bool operator==(ValueIterator const &it) const = 0;
/// Returns a unique_ptr<Value> object pointing to a newly created value object
virtual std::unique_ptr<Value> at() const = 0;
/// Gets the type of the iterator
ArgumentTypeID type() const {
return argument->description->type;
}
/// Helper to compute inequality
bool operator!=(ValueIterator const &it) const {
return !(*this == it);
}
std::ostream &print(std::ostream &out) const;
};
//
// Data members
//
/// Describes the argument
ArgumentDescription const *description;
/// Parent node
KernelArgument *parent;
/// Sequence in which the kernel argument is to be iterated over.
/// Smaller means faster changing. -1 is don't care
int ordinal;
//
// Methods
//
/// Default ctor
KernelArgument(
ArgumentDescription const *description_ = nullptr,
KernelArgument *parent_ = nullptr,
int ordinal_ = -1
): description(description_), parent(parent_), ordinal(ordinal_) { }
virtual ~KernelArgument();
/// Returns true if the kernel argument iself is empty
virtual bool not_null() const =0;
/// Returns a string name for debugging
std::string qualified_name() const {
if (description) {
if (description->aliases.empty()) {
return "<description_not_null_no_aliases>";
}
return description->aliases.front();
}
return "<description_null>";
}
virtual std::unique_ptr<ValueIterator> begin() const =0;
virtual std::unique_ptr<ValueIterator> end() const =0;
};
using KernelArgumentVector = std::vector<std::unique_ptr<KernelArgument>>;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a scalar argument type as a string that is lexically cast to the appropriate kernel
/// type.
struct ScalarArgument : public KernelArgument {
//
// Type definitions
//
/// Value type
struct ScalarValue : public KernelArgument::Value {
std::string value;
//
// Methods
//
ScalarValue(
std::string const &value_ = "",
ScalarArgument const *argument = nullptr,
bool not_null_ = true
);
virtual std::ostream &print(std::ostream &out) const;
};
using ValueCollection = std::vector<std::string>;
/// Abstract base class to iterate over values within arguments
struct ScalarValueIterator : public KernelArgument::ValueIterator {
//
// Data members
//
ValueCollection::const_iterator value_it;
//
// Methods
//
ScalarValueIterator(ScalarArgument const *argument = nullptr);
virtual void operator++();
virtual bool operator==(ValueIterator const &it) const;
/// Gets the value pointed to
virtual std::unique_ptr<KernelArgument::Value> at() const;
};
//
// Data members
//
/// Set of possible values
ValueCollection values;
//
// Methods
//
/// Default ctor
ScalarArgument(
ArgumentDescription const *description
):
KernelArgument(description) { }
virtual bool not_null() const {
return !values.empty();
}
virtual std::unique_ptr<KernelArgument::ValueIterator> begin() const;
virtual std::unique_ptr<KernelArgument::ValueIterator> end() const;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Closed range supporting additive increment
struct Range {
//
// Type definitions
//
enum class Mode {
kSequence,
kRandom,
kRandomLog2,
kInvalid
};
struct Iterator {
int64_t value;
int64_t increment;
Range const *range;
//
// Methods
//
Iterator(
int64_t value_ = 0,
int64_t increment_ = 1,
Range const *range_ = nullptr
):
value(value_), increment(increment_), range(range_) { }
Iterator & operator++() {
value += increment;
return *this;
}
Iterator operator++(int) {
Iterator self(*this);
++(*this);
return self;
}
bool operator==(Iterator const &it) const {
return value == it.value;
}
bool operator!=(Iterator const &it) const {
return !(*this == it);
}
static int64_t round(int64_t value, int64_t divisible) {
int64_t rem = (value % divisible);
// Round either up or down
if (rem > divisible / 2) {
value += (divisible - rem);
}
else {
value -= rem;
}
return value;
}
int64_t at() const {
if (!range) {
return value;
}
switch (range->mode) {
case Mode::kSequence: return value;
case Mode::kRandom: {
double rnd = double(range->minimum) +
double(std::rand()) / double(RAND_MAX) * (double(range->maximum) - double(range->minimum));
int64_t value = int64_t(rnd);
return round(value, range->divisible);
}
break;
case Mode::kRandomLog2: {
double lg2_minimum = std::log(double(range->minimum)) / std::log(2.0);
double lg2_maximum = std::log(double(range->maximum)) / std::log(2.0);
double rnd = lg2_minimum + double(std::rand()) / double(RAND_MAX) * (lg2_maximum - lg2_minimum);
int64_t value = int64_t(std::pow(2.0, rnd));
return round(value, range->divisible);
}
break;
default: break;
}
return value;
}
int64_t operator*() const {
return at();
}
};
//
// Data members
//
int64_t first; ///< first element in range
int64_t last; ///< last element in range
int64_t increment; ///< additive increment between values
Mode mode; ///< mode selection enables alternative values
int64_t minimum; ///< minimum value to return
int64_t maximum; ///< maximum value to return
int64_t divisible; ///< rounds value down to an integer multiple of this value
//
// Methods
//
/// Default constructor - range acts as a scalar
Range(int64_t first_ = 0): first(first_), last(first_), increment(1), mode(Mode::kSequence), minimum(0), maximum(0), divisible(1) { }
/// Range acts as a range
Range(
int64_t first_,
int64_t last_,
int64_t increment_ = 1,
Mode mode_ = Mode::kSequence,
int64_t minimum_ = 0,
int64_t maximum_ = 0,
int64_t divisible_ = 1
): first(first_), last(last_), increment(increment_), mode(mode_), minimum(minimum_), maximum(maximum_), divisible(divisible_) {
// Helpers to avoid constructing invalid ranges
if (increment > 0) {
if (last < first) {
std::swap(last, first);
}
}
else if (increment < 0) {
if (first < last) {
std::swap(last, first);
}
}
else if (last != first) {
last = first;
increment = 1;
}
}
/// Helper to construct a sequence range
static Range Sequence(int64_t first_, int64_t last_, int64_t increment_ = 1) {
return Range(first_, last_, increment_, Mode::kSequence);
}
/// Helper to construct a range that is a random distribution
static Range Random(int64_t minimum_, int64_t maximum_, int64_t count_, int64_t divisible_ = 1) {
return Range(1, count_, 1, Mode::kRandom, minimum_, maximum_, divisible_);
}
/// Helper to construct a range that is a random distribution over a log scale
static Range RandomLog2(int64_t minimum_, int64_t maximum_, int64_t count_, int64_t divisible_ = 1) {
return Range(1, count_, 1, Mode::kRandomLog2, minimum_, maximum_, divisible_);
}
/// Returns an iterator to the first element within the range
Iterator begin() const {
return Iterator(first, increment, this);
}
/// Returns an iterator to the first element *after* the range
Iterator end() const {
return Iterator(first + ((last - first)/increment + 1) * increment, increment, this);
}
};
/// Integer-valued argument - represented as a list of integer-valued ranges
struct IntegerArgument : public KernelArgument {
//
// Type definitions
//
/// Value type
struct IntegerValue : public KernelArgument::Value {
int64_t value;
//
// Methods
//
IntegerValue(
int64_t value_ = 0,
IntegerArgument const *argument_ = nullptr,
bool not_null_ = true
);
/// Pretty printer for debugging
virtual std::ostream &print(std::ostream &out) const;
};
/// Collection of ranges represent the IntegerArgument's state
using RangeCollection = std::vector<Range>;
/// Abstract base class to iterate over values within arguments
struct IntegerValueIterator : public KernelArgument::ValueIterator {
//
// Data members
//
RangeCollection::const_iterator range_it;
Range::Iterator value_it;
//
// Methods
//
IntegerValueIterator();
IntegerValueIterator(IntegerArgument const *argument);
virtual void operator++();
virtual bool operator==(ValueIterator const &it) const;
/// Gets the value pointed to
virtual std::unique_ptr<KernelArgument::Value> at() const;
};
//
// Data members
//
/// Set of possible values
RangeCollection ranges;
//
// Methods
//
/// Default ctor
IntegerArgument(
ArgumentDescription const *description
):
KernelArgument(description) { }
virtual bool not_null() const {
bool _not_null = !ranges.empty();
return _not_null;
}
virtual std::unique_ptr<KernelArgument::ValueIterator> begin() const;
virtual std::unique_ptr<KernelArgument::ValueIterator> end() const;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure defining the data type of tensors
struct TensorArgument : public KernelArgument {
//
// Type definitions
//
struct TensorDescription {
/// Data type of elements
library::NumericTypeID element;
/// Layout definition
library::LayoutTypeID layout;
/// Computed extent
std::vector<int> extent;
/// Enables directly specifying stride value used to size tensor
std::vector<int> stride;
//
// Methods
//
TensorDescription(
library::NumericTypeID element_ = library::NumericTypeID::kUnknown,
library::LayoutTypeID layout_ = library::LayoutTypeID::kUnknown,
std::vector<int> extent_ = std::vector<int>(),
std::vector<int> stride_ = std::vector<int>()
):
element(element_), layout(layout_), extent(extent_), stride(stride_) {}
};
using ValueCollection = std::vector<TensorDescription>;
/// Value structure
struct TensorValue : public KernelArgument::Value {
TensorDescription desc;
//
// Methods
//
TensorValue(
TensorDescription const &desc_ = TensorDescription(),
TensorArgument const *argument_ = nullptr,
bool not_null_ = true
);
/// Pretty printer for debugging
virtual std::ostream &print(std::ostream &out) const;
};
/// Abstract base class to iterate over values within arguments
struct TensorValueIterator : public KernelArgument::ValueIterator {
//
// Data members
//
ValueCollection::const_iterator value_it;
//
// Methods
//
TensorValueIterator(TensorArgument const *argument_);
virtual void operator++();
virtual bool operator==(ValueIterator const &it) const;
/// Gets the value pointed to
virtual std::unique_ptr<KernelArgument::Value> at() const;
};
/// Set of possible values
ValueCollection values;
//
// Methods
//
/// Default ctor
TensorArgument(
ArgumentDescription const *description
):
KernelArgument(description) { }
virtual bool not_null() const {
return !values.empty();
}
virtual std::unique_ptr<KernelArgument::ValueIterator> begin() const;
virtual std::unique_ptr<KernelArgument::ValueIterator> end() const;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Numeric data type
struct EnumeratedTypeArgument : public KernelArgument {
//
// Type definitions
//
struct EnumeratedTypeValue : public KernelArgument::Value {
/// Data type of element
std::string element;
//
// Methods
//
EnumeratedTypeValue(
std::string const &element_ = std::string(),
EnumeratedTypeArgument const *argument_ = nullptr,
bool not_null_ = true
);
/// Pretty printer for debugging
virtual std::ostream &print(std::ostream &out) const;
};
using ValueCollection = std::vector<std::string>;
/// Abstract base class to iterate over values within arguments
struct EnumeratedTypeValueIterator : public KernelArgument::ValueIterator {
//
// Data members
//
ValueCollection::const_iterator value_it;
//
// Methods
//
EnumeratedTypeValueIterator(EnumeratedTypeArgument const *argument_ = nullptr);
virtual void operator++();
virtual bool operator==(ValueIterator const &it) const;
/// Gets the value pointed to
virtual std::unique_ptr<KernelArgument::Value> at() const;
};
//
// Data members
//
ValueCollection values;
//
// Members
//
/// Default ctor
EnumeratedTypeArgument(ArgumentDescription const *description):
KernelArgument(description) {}
virtual bool not_null() const {
return !values.empty();
}
virtual std::unique_ptr<KernelArgument::ValueIterator> begin() const;
virtual std::unique_ptr<KernelArgument::ValueIterator> end() const;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Object storing the space argument values
class ProblemSpace {
public:
/// Tuple of arguments
using Problem = std::vector<std::unique_ptr<KernelArgument::Value>>;
/// Type used to iterator over things
using IteratorVector = std::vector<std::unique_ptr<KernelArgument::ValueIterator>>;
/// Iterates over points in the design space
class Iterator {
private:
/// One iterator per argument
IteratorVector iterators;
public:
//
// Methods
//
explicit Iterator();
Iterator(ProblemSpace const &problem_space);
Iterator(Iterator &&it);
// Rule of three
Iterator(Iterator const &) = delete;
Iterator &operator=(Iterator const &it) = delete;
~Iterator() = default;
/// Pre-increment - advances to next point in argument range
void operator++();
/// Gets the current argument value
Problem at() const;
/// Moves iterator to end
void move_to_end();
/// Equality operator
bool operator==(Iterator const &it) const;
/// Inequality operator
bool operator!=(Iterator const &it) const {
return !(*this == it);
}
/// Helper to call at() method
Problem operator*() const {
return at();
}
/// Helper to print iterator state
std::ostream & print(std::ostream &out) const;
private:
/// Helper for recursively constructing iterators
void construct_(KernelArgument const *argument);
};
public:
//
// Data members
//
KernelArgumentVector arguments;
/// Map of argument names to their position within the argument vector
std::unordered_map<std::string, size_t> argument_index_map;
public:
//
// Methods
//
/// Default ctor
ProblemSpace() {}
/// Constructs a problem space from a vector of arguments. This vector must outlive
/// the ProblemSpace object, which stores pointers to objects within the
/// ArgumentDescriptionVector.
ProblemSpace(ArgumentDescriptionVector const &schema, CommandLine const &cmdline);
Iterator begin() const; // returns an iterator to the first point in the range
Iterator end() const; // returns an iterator to the first point after the range
/// Returns the index of an argument by name
size_t argument_index(char const *name) const;
/// Gets all argument names as an ordered vector
std::vector<std::string> argument_names() const;
/// Returns the number of dimensions of the problem space
size_t rank() const { return arguments.size(); }
private:
/// Helper for recursively cloning
void clone_(
KernelArgumentVector &kernel_args,
ArgumentDescription const *arg_desc);
/// Parses command line argument
void parse_(
KernelArgument *arg,
CommandLine const &cmdline);
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Lexically casts an argument to an int if it is defined. Returns true if not null.
bool arg_as_int(int &int_value, KernelArgument::Value const *value_ptr);
/// Lexically casts an argument to an int64 if it is defined. Returns true if not null.
bool arg_as_int(int64_t &int_value, KernelArgument::Value const *value_ptr);
/// Lexically casts an argument to an int64 if it is defined. Returns true if not null.
bool arg_as_int(
int &int_value,
char const *name,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem);
/// Lexically casts an argument to an int64 if it is defined. Returns true if not null.
bool arg_as_int(
int64_t &int_value,
char const *name,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem);
/// Lexically casts an argument to an int64 if it is defined. Returns true if not null.
bool arg_as_NumericTypeID(library::NumericTypeID &numeric_type, KernelArgument::Value const *value_ptr);
/// Lexically casts an argument to an int64 if it is defined. Returns true if not null.
bool arg_as_NumericTypeID(
library::NumericTypeID &numeric_type,
char const *name,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem);
/// Lexically casts an argument to an int64 if it is defined. Returns true if not null.
bool arg_as_LayoutTypeID(library::LayoutTypeID &layout_type, KernelArgument::Value const *value_ptr);
/// Lexically casts an argument to an int64 if it is defined. Returns true if not null.
bool arg_as_LayoutTypeID(
library::LayoutTypeID &layout_type,
char const *name,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem);
/// Lexically casts an argument to an int64 if it is defined. Returns true if not null.
bool arg_as_OpcodeClassID(library::OpcodeClassID &opcode_class, KernelArgument::Value const *value_ptr);
/// Lexically casts an argument to an int64 if it is defined. Returns true if not null.
bool arg_as_OpcodeClassID(
library::OpcodeClassID &opcode_class,
char const *name,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem);
/// Lexically casts an argument to an int64 if it is defined. Returns true if not null.
bool arg_as_SplitKModeID(library::SplitKMode &split_k_mode, KernelArgument::Value const *value_ptr);
/// Lexically casts an argument to an int64 if it is defined. Returns true if not null.
bool arg_as_SplitKModeID(
library::SplitKMode &split_k_mode,
char const *name,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem);
/// Lexically casts an argument to an int64 if it is defined. Returns true if not null.
bool arg_as_ConvModeID(library::ConvModeID &conv_mode, KernelArgument::Value const *value_ptr);
/// Lexically casts an argument to an int64 if it is defined. Returns true if not null.
bool arg_as_ConvModeID(
library::ConvModeID &conv_mode,
char const *name,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem);
/// Lexically casts an argument to an int64 if it is defined. Returns true if not null.
bool arg_as_IteratorAlgorithmID(library::IteratorAlgorithmID &iterator_algorithm, KernelArgument::Value const *value_ptr);
/// Lexically casts an argument to an int64 if it is defined. Returns true if not null.
bool arg_as_IteratorAlgorithmID(
library::IteratorAlgorithmID &iterator_algorithm,
char const *name,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem);
/// Lexically casts an argument to an int64 if it is defined. Returns true if not null.
bool arg_as_RasterOrder(library::RasterOrder &raster_order, KernelArgument::Value const *value_ptr);
/// Lexically casts an argument to an int64 if it is defined. Returns true if not null.
bool arg_as_RasterOrder(
library::RasterOrder &raster_order,
char const *name,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem);
/// Lexically casts an argument to an int64 if it is defined. Returns true if not null.
bool arg_as_ProviderID(library::Provider &provider, KernelArgument::Value const *value_ptr);
/// Lexically casts an argument to an int64 if it is defined. Returns true if not null.
bool arg_as_ProviderID(
library::Provider &provider,
char const *name,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem);
/// Lexically casts an argument to a given type stored in a byte array. Returns true if not null.
bool arg_as_scalar(
std::vector<uint8_t> &bytes,
library::NumericTypeID numeric_type,
KernelArgument::Value const *value_ptr);
/// Lexically casts an argument to a given type stored in a byte array. Returns true if not null.
bool arg_as_scalar(
std::vector<uint8_t> &bytes,
library::NumericTypeID numeric_type,
char const *name,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem);
/// Returns true if a tensor description satisfies a `tensor` value
bool tensor_description_satisfies(
library::TensorDescription const &tensor_desc,
TensorArgument::TensorValue const *value_ptr);
/// Returns true if a tensor description satisfies a `tensor` value
bool tensor_description_satisfies(
library::TensorDescription const &tensor_desc,
char const *name,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem);
/// Returns true if a conv kind satisfies the value
bool conv_kind_satisfies(
library::ConvKind const &conv_kind,
EnumeratedTypeArgument::EnumeratedTypeValue const *value_ptr);
/// Returns true if a conv kind satisfies the value
bool conv_kind_satisfies(
library::ConvKind const &conv_kind,
char const *name,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem);
/// Returns true if a iterator algorithm satisfies the value
bool iterator_algorithm_satisfies(
library::IteratorAlgorithmID const &iterator_algorithm,
EnumeratedTypeArgument::EnumeratedTypeValue const *value_ptr);
/// Returns true if a iterator algorithm satisfies the value
bool iterator_algorithm_satisfies(
library::IteratorAlgorithmID const &iterator_algorithm,
char const *name,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem);
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////////////////////
| tools/profiler/include/cutlass/profiler/problem_space.h/0 | {
"file_path": "tools/profiler/include/cutlass/profiler/problem_space.h",
"repo_id": "tools",
"token_count": 8750
} | 72 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Contains code for debugging cutlass code
*/
#pragma once
#include "device_dump.h"
////////////////////////////////////////////////////////////////////////////////////////////////////
/******************************************************************************
* Debug and logging macros
******************************************************************************/
/**
* Formats and prints the given message to stdout
*/
#if !defined(CUDA_LOG)
#if !defined(__CUDA_ARCH__)
#define CUDA_LOG(format, ...) printf(format, __VA_ARGS__)
#else
#define CUDA_LOG(format, ...) \
printf("[block (%d,%d,%d), thread (%d,%d,%d)]: " format, \
blockIdx.x, \
blockIdx.y, \
blockIdx.z, \
threadIdx.x, \
threadIdx.y, \
threadIdx.z, \
__VA_ARGS__);
#endif
#endif
/**
* Formats and prints the given message to stdout only if DEBUG is defined
*/
#if !defined(CUDA_LOG_DEBUG)
#ifdef DEBUG
#define CUDA_LOG_DEBUG(format, ...) CUDA_LOG(format, __VA_ARGS__)
#else
#define CUDA_LOG_DEBUG(format, ...)
#endif
#endif
/**
* \brief The corresponding error message is printed to \p stderr (or \p stdout in device code)
* along with the supplied source context.
*
* \return The CUDA error.
*/
__host__ CUTLASS_DEVICE cudaError_t cuda_perror_impl(cudaError_t error,
const char* expression,
const char* filename,
int line) {
(void)filename;
(void)line;
if (error) {
#if !defined(__CUDA_ARCH__)
fprintf(
stderr, "CUDA error %d [%s, %d] in expression '%s': %s\n", error, filename, line, expression, cudaGetErrorString(error));
fflush(stderr);
#else
printf("CUDA error %d [%s, %d] in expression '%s'\n", error, filename, line, expression);
#endif
}
return error;
}
/**
* \brief Perror macro
*/
#ifndef CUDA_PERROR
#define CUDA_PERROR(e) cuda_perror_impl((cudaError_t)(e), #e, __FILE__, __LINE__)
#endif
/**
* \brief Perror macro with exit
*/
#ifndef CUDA_PERROR_EXIT
#define CUDA_PERROR_EXIT(e) \
do { if (cuda_perror_impl((cudaError_t)(e), #e, __FILE__, __LINE__)) { \
exit(1); \
} } while (0)
#endif
/**
* \brief Perror macro only if DEBUG is defined
*/
#ifndef CUDA_PERROR_DEBUG
#ifdef DEBUG
#define CUDA_PERROR_DEBUG(e) CUDA_PERROR(e)
#else
#define CUDA_PERROR_DEBUG(e) (e)
#endif
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// A small helper class to dump a type at compile time
// Usage:: DumpType<Class>::Class
template <typename T>
struct DebugType {};
template <typename T>
void DebugTypeFunc(T const& t) {
T::t;
}
// A small helper class to dump a compile time constant at compile time
// Usage: DumpValue<Class::kConstant>::kConstant
template <int Value>
struct DebugValue {};
| tools/util/include/cutlass/util/debug.h/0 | {
"file_path": "tools/util/include/cutlass/util/debug.h",
"repo_id": "tools",
"token_count": 1956
} | 73 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
/*! \file
\brief HostTensor contributes management for both host and device memory.
HostTensor allocates host and device memory upon construction. Basic element-wise operations on
host memory synchronize device memory automatically. Explicit copy operations provide abstractions
for CUDA memcpy operations.
Call {host, device}_{data, ref, view}() for accessing host or device memory.
See cutlass/tensor_ref.h and cutlass/tensor_view.h for more details.
*/
#include <vector>
#include "cutlass/cutlass.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/fast_math.h"
#include "device_memory.h"
namespace cutlass {
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Host tensor
template <
/// Data type of element stored within tensor (concept: NumericType)
typename Element_,
/// Defines a mapping from logical coordinate to linear memory (concept: Layout)
typename Layout_
>
class HostTensor {
public:
/// Data type of individual access
using Element = Element_;
/// Mapping function from logical coordinate to linear memory
using Layout = Layout_;
/// Logical rank of tensor index space
static int const kRank = Layout::kRank;
/// Index type
using Index = typename Layout::Index;
/// Long index used for pointer offsets
using LongIndex = typename Layout::LongIndex;
/// Coordinate in logical tensor space
using TensorCoord = typename Layout::TensorCoord;
/// Layout's stride vector
using Stride = typename Layout::Stride;
/// Tensor reference to device memory
using TensorRef = TensorRef<Element, Layout>;
/// Tensor reference to constant device memory
using ConstTensorRef = typename TensorRef::ConstTensorRef;
/// Tensor reference to device memory
using TensorView = TensorView<Element, Layout>;
/// Tensor reference to constant device memory
using ConstTensorView = typename TensorView::ConstTensorView;
/// Reference to element in tensor
using Reference = typename TensorRef::Reference;
/// Constant reference to element in tensor
using ConstReference = typename ConstTensorRef::Reference;
/// Note: Below is used to handle packing of subbyte elements
/// kBitsStoredVec : The bits of store vec that could be divisiable by the element
/// kElementsPerStoredVec : The number of elements could be stored in per store vec
/// kNumStoragePerStoredVec : How much storage(i.e. sizeof(element storage)) the store vec needs to consume.
/// Usually the element storage of subbyte is uint8_t.
/// Example
/// int2: kBitsStoredVec = 8; kElementsPerStoredVec = 4; kNumStoragePerStoredVec = 1 uint8_t;
/// int4: kBitsStoredVec = 8; kElementsPerStoredVec = 2; kNumStoragePerStoredVec = 1 uint8_t;
static constexpr int kBitsStoredVec = (sizeof_bits<Element>::value < 8) ? cutlass::lcm(sizeof_bits<Element>::value, 8) : sizeof_bits<Element>::value;
static constexpr int kElementsPerStoredVec = kBitsStoredVec / sizeof_bits<Element>::value;
static constexpr int kNumStoragePerStoredVec = kBitsStoredVec / (sizeof(Element) * 8);
static_assert(kBitsStoredVec != 0, "kBitsStoredVec can not be zero");
static_assert(kElementsPerStoredVec != 0, "kElementsPerStoredVec can not be zero");
static_assert(kNumStoragePerStoredVec != 0, "kNumStoragePerStoredVec can not be zero");
private:
//
// Data members
//
/// Extent of tensor in logical dimensions
TensorCoord extent_;
/// Layout object
Layout layout_;
/// Host-side memory allocation
/// avoid the std::vector<bool> specialization
std::vector<std::conditional_t<std::is_same_v<Element,bool>, uint8_t, Element>> host_;
/// Device-side memory
device_memory::allocation<Element> device_;
public:
//
// Device and Host Methods
//
/// Default constructor
HostTensor() {}
/// Constructs a tensor given an extent. Assumes a packed layout
HostTensor(
TensorCoord const &extent,
bool device_backed = true
) {
this->reset(extent, Layout::packed(extent), device_backed);
}
/// Constructs a tensor given an extent and layout
HostTensor(
TensorCoord const &extent,
Layout const &layout,
bool device_backed = true
) {
this->reset(extent, layout, device_backed);
}
~HostTensor() { }
/// Clears the HostTensor allocation to size/capacity = 0
void reset() {
extent_ = TensorCoord();
layout_ = Layout::packed(extent_);
host_.clear();
device_.reset();
}
/// Resizes internal memory allocations without affecting layout or extent
void reserve(
size_t count, ///< size of tensor in elements
bool device_backed_ = true) { ///< if true, device memory is also allocated
device_.reset();
host_.clear();
count = (count + kElementsPerStoredVec - 1) / kElementsPerStoredVec * kNumStoragePerStoredVec;
host_.resize(count);
// Allocate memory
Element* device_memory = nullptr;
if (device_backed_) {
device_memory = device_memory::allocate<Element>(count);
}
device_.reset(device_memory, device_backed_ ? count : 0);
}
/// Updates the extent and layout of the HostTensor. Allocates memory according to the new
/// extent and layout.
void reset(
TensorCoord const &extent, ///< extent of logical tensor
Layout const &layout, ///< layout object of tensor
bool device_backed_ = true) { ///< if true, device memory is also allocated.
extent_ = extent;
layout_ = layout;
reserve(size_t(layout_.capacity(extent_)), device_backed_);
}
/// Updates the extent and layout of the HostTensor. Allocates memory according to the new
/// extent and layout. Assumes a packed tensor configuration.
void reset(
TensorCoord const &extent, ///< extent of logical tensor
bool device_backed_ = true) { ///< if true, device memory is also allocated.
reset(extent, Layout::packed(extent), device_backed_);
}
/// Changes the size of the logical tensor. Only allocates memory if new capacity exceeds reserved capacity.
/// To force allocation, call reset().
void resize(
TensorCoord const &extent, ///< extent of logical tensor
Layout const &layout, ///< layout object of tensor
bool device_backed_ = true) { ///< if true, device memory is also allocated.
extent_ = extent;
layout_ = layout;
LongIndex new_size = size_t(layout_.capacity(extent_));
if (static_cast<decltype(host_.size())>(new_size) > host_.size()) {
reserve(new_size, device_backed_);
}
}
/// Changes the size of the logical tensor. Only allocates memory if new capacity exceeds reserved capacity.
/// To force allocation, call reset(). Note, this form of resize() assumes a packed tensor configuration.
void resize(
TensorCoord const &extent, ///< extent of logical tensor
bool device_backed_ = true) { ///< if true, device memory is also allocated.
resize(extent, Layout::packed(extent), device_backed_);
}
/// Returns the number of elements stored in the host tensor
size_t size() const {
return host_.size() / kNumStoragePerStoredVec * kElementsPerStoredVec;
}
/// Returns the logical capacity based on extent and layout. May differ from size().
LongIndex capacity() const {
return layout_.capacity(extent_);
}
/// Gets pointer to host data
Element * host_data() { return reinterpret_cast<Element *>(host_.data()); }
/// Gets pointer to host data with a pointer offset
Element * host_data_ptr_offset(LongIndex ptr_element_offset) { return &ReferenceFactory<Element>::get(host_data(), ptr_element_offset); }
/// Gets a reference to an element in host memory
Reference host_data(LongIndex idx) {
return ReferenceFactory<Element>::get(host_data(), idx);
}
/// Gets pointer to host data
Element const * host_data() const { return reinterpret_cast<Element const *>(host_.data()); }
/// Gets pointer to host data with a pointer offset
Element const * host_data_ptr_offset(LongIndex ptr_element_offset) const { return &ReferenceFactory<Element>::get(host_data(), ptr_element_offset); }
/// Gets a constant reference to an element in host memory
ConstReference host_data(LongIndex idx) const {
return ReferenceFactory<Element const>::get(host_data(), idx);
}
/// Gets pointer to device data
Element * device_data() { return device_.get(); }
/// Gets pointer to device data
Element const * device_data() const { return device_.get(); }
/// Gets pointer to device data with a pointer offset
Element * device_data_ptr_offset(LongIndex ptr_element_offset) { return &ReferenceFactory<Element>::get(device_data(), ptr_element_offset); }
/// Gets pointer to device data with a pointer offset
Element const * device_data_ptr_offset(LongIndex ptr_element_offset) const { return &ReferenceFactory<Element>::get(device_data(), ptr_element_offset); }
/// Accesses the tensor reference pointing to data
TensorRef host_ref(LongIndex ptr_element_offset=0) { return TensorRef(host_data_ptr_offset(ptr_element_offset), layout_); }
/// Accesses the tensor reference pointing to data
ConstTensorRef host_ref(LongIndex ptr_element_offset=0) const { return ConstTensorRef(host_data_ptr_offset(ptr_element_offset), layout_); }
/// Accesses the tensor reference pointing to data
TensorRef device_ref(LongIndex ptr_element_offset=0) {
return TensorRef(device_data_ptr_offset(ptr_element_offset), layout_);
}
/// Accesses the tensor reference pointing to data
ConstTensorRef device_ref(LongIndex ptr_element_offset=0) const {
return TensorRef(device_data_ptr_offset(ptr_element_offset), layout_);
}
/// Accesses the tensor reference pointing to data
TensorView host_view(LongIndex ptr_element_offset=0) {
return TensorView(host_data_ptr_offset(ptr_element_offset), layout_, extent_);
}
/// Accesses the tensor reference pointing to data
ConstTensorView host_view(LongIndex ptr_element_offset=0) const {
return ConstTensorView(host_data_ptr_offset(ptr_element_offset), layout_, extent_);
}
/// Accesses the tensor reference pointing to data
TensorView device_view(LongIndex ptr_element_offset=0) {
return TensorView(device_data_ptr_offset(ptr_element_offset), layout_, extent_);
}
/// Accesses the tensor reference pointing to data
ConstTensorView device_view(LongIndex ptr_element_offset=0) const {
return ConstTensorView(device_data_ptr_offset(ptr_element_offset), layout_, extent_);
}
/// Returns true if device memory is allocated
bool device_backed() const {
return (device_.get() == nullptr) ? false : true;
}
/// Returns the layout object
Layout & layout() {
return layout_;
}
/// Returns the layout object
Layout layout() const {
return layout_;
}
/// Returns the layout object's stride vector
Stride stride() const {
return layout_.stride();
}
/// Returns the layout object's stride vector
Stride & stride() {
return layout_.stride();
}
/// Returns the layout object's stride in a given physical dimension
LongIndex stride(int dim) const {
return layout_.stride().at(dim);
}
/// Returns the layout object's stride in a given physical dimension
LongIndex & stride(int dim) {
return layout_.stride().at(dim);
}
/// Computes the offset of an index from the origin of the tensor
LongIndex offset(TensorCoord const& coord) const {
return layout_(coord);
}
/// Returns a reference to the element at the logical Coord in host memory
Reference at(TensorCoord const& coord) {
return host_data(offset(coord));
}
/// Returns a const reference to the element at the logical Coord in host memory
ConstReference at(TensorCoord const& coord) const {
return host_data(offset(coord));
}
/// Returns the extent of the tensor
TensorCoord extent() const {
return extent_;
}
/// Returns the extent of the tensor
TensorCoord & extent() {
return extent_;
}
/// Copies data from device to host
void sync_host() {
if (device_backed()) {
device_memory::copy_to_host(
host_data(), device_data(), size());
}
}
/// Copies data from host to device
void sync_device() {
if (device_backed()) {
device_memory::copy_to_device(
device_data(), host_data(), size());
}
}
/// Copy data from a caller-supplied device pointer into host memory.
void copy_in_device_to_host(
Element const* ptr_device, ///< source device memory
LongIndex count = -1) { ///< number of elements to transfer; if negative, entire tensor is overwritten.
if (count < 0) {
count = capacity();
}
else {
count = __NV_STD_MIN(capacity(), count);
}
device_memory::copy_to_host(
host_data(), ptr_device, count);
}
/// Copy data from a caller-supplied device pointer into host memory.
void copy_in_device_to_device(
Element const* ptr_device, ///< source device memory
LongIndex count = -1) { ///< number of elements to transfer; if negative, entire tensor is overwritten.
if (count < 0) {
count = capacity();
}
else {
count = __NV_STD_MIN(capacity(), count);
}
device_memory::copy_device_to_device(
device_data(), ptr_device, count);
}
/// Copy data from a caller-supplied device pointer into host memory.
void copy_in_host_to_device(
Element const* ptr_host, ///< source host memory
LongIndex count = -1) { ///< number of elements to transfer; if negative, entire tensor is overwritten.
if (count < 0) {
count = capacity();
}
else {
count = __NV_STD_MIN(capacity(), count);
}
device_memory::copy_to_device(
device_data(), ptr_host, count);
}
/// Copy data from a caller-supplied device pointer into host memory.
void copy_in_host_to_host(
Element const* ptr_host, ///< source host memory
LongIndex count = -1) { ///< number of elements to transfer; if negative, entire tensor is overwritten.
if (count < 0) {
count = capacity();
}
else {
count = __NV_STD_MIN(capacity(), count);
}
device_memory::copy_host_to_host(
host_data(), ptr_host, count);
}
/// Copy data from a caller-supplied device pointer into host memory.
void copy_out_device_to_host(
Element * ptr_host, ///< source device memory
LongIndex count = -1) const { ///< number of elements to transfer; if negative, entire tensor is overwritten.
if (count < 0) {
count = capacity();
}
else {
count = __NV_STD_MIN(capacity(), count);
}
device_memory::copy_to_host(
ptr_host, device_data(), count);
}
/// Copy data from a caller-supplied device pointer into host memory.
void copy_out_device_to_device(
Element * ptr_device, ///< source device memory
LongIndex count = -1) const { ///< number of elements to transfer; if negative, entire tensor is overwritten.
if (count < 0) {
count = capacity();
}
else {
count = __NV_STD_MIN(capacity(), count);
}
device_memory::copy_device_to_device(
ptr_device, device_data(), count);
}
/// Copy data from a caller-supplied device pointer into host memory.
void copy_out_host_to_device(
Element * ptr_device, ///< source host memory
LongIndex count = -1) const { ///< number of elements to transfer; if negative, entire tensor is overwritten.
if (count < 0) {
count = capacity();
}
else {
count = __NV_STD_MIN(capacity(), count);
}
device_memory::copy_to_device(
ptr_device, host_data(), count);
}
/// Copy data from a caller-supplied device pointer into host memory.
void copy_out_host_to_host(
Element * ptr_host, ///< source host memory
LongIndex count = -1) const { ///< number of elements to transfer; if negative, entire tensor is overwritten.
if (count < 0) {
count = capacity();
}
else {
count = __NV_STD_MIN(capacity(), count);
}
device_memory::copy_host_to_host(
ptr_host, host_data(), count);
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
| tools/util/include/cutlass/util/host_tensor.h/0 | {
"file_path": "tools/util/include/cutlass/util/host_tensor.h",
"repo_id": "tools",
"token_count": 6056
} | 74 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Reference implementation for complex-valued Rank 2K update in host-side code.
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/complex.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/tensor_view.h"
#include "cutlass/gemm/gemm.h"
#include <assert.h>
namespace cutlass {
namespace reference {
namespace host {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
///
/// Explicitly naming types needed by this template can be cumbersome, particularly for the
/// accumulator type, so a function argument 'initial_accum' is exposed. Passing
/// AccumulatorType(0) as the last function argument can be easier than naming all template
/// arguments explicitly.
template <
typename ElementA,
typename LayoutA,
typename ElementC,
typename LayoutC,
typename ScalarType,
typename ComputeType,
typename ConvertOp = NumericConverter<ElementC, ScalarType>,
typename InnerProductOp = multiply_add<ComputeType>
>
void Rank2KComplex(
gemm::GemmCoord problem_size,
ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
ComplexTransform transform_a,
ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
ComputeType initial_accum,
FillMode fill_mode_c,
BlasMode blas_mode,
int batch_count = 1,
int64_t batch_stride_A = 0,
int64_t batch_stride_C = 0,
int64_t batch_stride_D = 0) {
static_assert(
LayoutA::kRank == 2 &&
LayoutC::kRank == 2, "Tensors must be of rank 2");
// Note: batch is ignored.
int const M = problem_size.m();
int const N = problem_size.n();
int const K = problem_size.k();
// Rank2K update operates on A=NxK, B=NxK, and C=NxN
assert(M==N);
// Blocking necessary to speedup reference implementation
int const Mblock = 16;
int const Nblock = 16;
ConvertOp convert_op;
InnerProductOp inner_product_op;
for (int batch_idx = 0; batch_idx < batch_count; ++batch_idx) {
// Compute matrix product using blocks
for (int row_block = 0; row_block < M; row_block += Mblock) {
for (int col_block = 0; col_block < N; col_block += Nblock) {
ComputeType accum[Mblock][Nblock];
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Mblock; i++) {
accum[i][j] = initial_accum;
}
}
for (int k_block = 0; k_block < K; ++k_block) {
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Mblock; i++) {
int row = row_block + i;
int col = col_block + j;
if (row < M && col < N &&
( (fill_mode_c == FillMode::kLower && row >= col) ||
(fill_mode_c == FillMode::kUpper && row <= col) )
) {
// A x A^T (Symmetric) or A x A^H (Hermitian)
// complex conjugation on operandB (a_t) (function of blas3 computation)
ElementA a = tensor_a.at(MatrixCoord(row, k_block));
ElementA a_t = (blas_mode == BlasMode::kHermitian) ?
conj(tensor_a.at(MatrixCoord(col, k_block))) :
tensor_a.at(MatrixCoord(col, k_block));
ComputeType a_ik = ComputeType(a);
ComputeType b_jk = ComputeType(a_t);
// complex conjugation (function of input layouts)
if (transform_a == ComplexTransform::kConjugate) {
a_ik = conj(a_ik);
}
// complex conjugation (function of input layouts)
if (transform_a == ComplexTransform::kConjugate) {
b_jk = conj(b_jk);
}
accum[i][j] = inner_product_op(a_ik, b_jk, accum[i][j]);
}
}
}
}
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Mblock; i++) {
int row = row_block + i;
int col = col_block + j;
MatrixCoord coord = MatrixCoord(row, col);
if (row < M && col < N &&
((fill_mode_c == FillMode::kLower && row >= col) ||
(fill_mode_c == FillMode::kUpper && row <= col))
) {
ScalarType c = tensor_c.at(coord);
// The imaginary parts of the diagonal elements of
// a complex data type are assumed and set to zero
if (blas_mode == BlasMode::kHermitian) {
c = (row == col) ? real(c) : c;
}
ScalarType tmp_d = convert_op(
alpha * ScalarType(accum[i][j]) +
beta * c);
if (blas_mode == BlasMode::kHermitian && row == col ) {
tensor_d.at(coord) = real(tmp_d);
} else {
tensor_d.at(coord) = tmp_d;
}
}
}
}
} // for (col_block)
} // for (row_block)
tensor_a.add_pointer_offset(batch_stride_A);
tensor_c.add_pointer_offset(batch_stride_C);
tensor_d.add_pointer_offset(batch_stride_D);
} // for (batch_idx)
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
///
/// This assumes the accumulator type is the same type as the scalars.
template <
typename ElementA,
typename LayoutA,
typename ElementC,
typename LayoutC,
typename ScalarType
>
void RankKComplex(
gemm::GemmCoord problem_size,
ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
ComplexTransform transform_a,
ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
FillMode fill_mode_c,
BlasMode blas_mode) {
Rank2KComplex(
problem_size, alpha,
tensor_a, transform_a,
beta, tensor_c, tensor_d,
ScalarType(0),
fill_mode_c,
blas_mode);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace host
} // namespace reference
} // namespace cutlass
| tools/util/include/cutlass/util/reference/host/rank_k_complex.h/0 | {
"file_path": "tools/util/include/cutlass/util/reference/host/rank_k_complex.h",
"repo_id": "tools",
"token_count": 3255
} | 75 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Type traits for common CUDA types
*/
#pragma once
#include <cublas_v2.h>
#include <cuda_fp16.h>
#include <stdint.h>
#include "cutlass/numeric_types.h"
#include "cutlass/complex.h"
namespace cutlass {
struct half_t;
template <typename T>
struct TypeTraits {
typedef T host_type;
typedef T device_type;
static inline T remove_negative_zero(T x) { return x; }
static inline T to_print(T x) { return x; }
static inline device_type to_device(host_type x) { return x; }
};
template <>
struct TypeTraits<int8_t> {
static cudaDataType_t const cublas_type = CUDA_R_8I;
typedef int8_t host_type;
typedef int8_t device_type;
typedef int8_t integer_type;
typedef uint8_t unsigned_type;
static inline int8_t remove_negative_zero(int8_t x) { return x; }
static inline int to_print(int8_t x) { return (int)x; }
static inline device_type to_device(host_type x) { return x; }
};
template <>
struct TypeTraits<uint8_t> {
static cudaDataType_t const cublas_type = CUDA_R_8I;
typedef uint8_t host_type;
typedef uint8_t device_type;
typedef uint8_t integer_type;
typedef uint8_t unsigned_type;
static inline uint8_t remove_negative_zero(uint8_t x) { return x; }
static inline uint32_t to_print(uint8_t x) { return (uint32_t)x; }
static inline device_type to_device(host_type x) { return x; }
};
template <>
struct TypeTraits<int> {
static cudaDataType_t const cublas_type = CUDA_R_32I;
typedef int host_type;
typedef int device_type;
typedef int32_t integer_type;
typedef uint32_t unsigned_type;
static inline int32_t remove_negative_zero(int32_t x) { return x; }
static inline int to_print(int x) { return x; }
static inline device_type to_device(host_type x) { return x; }
};
template <>
struct TypeTraits<unsigned> {
static cudaDataType_t const cublas_type = CUDA_R_32I;
typedef unsigned host_type;
typedef unsigned device_type;
typedef uint32_t integer_type;
typedef uint32_t unsigned_type;
static inline uint32_t remove_negative_zero(uint32_t x) { return x; }
static inline uint32_t to_print(uint32_t x) { return x; }
static inline device_type to_device(host_type x) { return x; }
};
template <>
struct TypeTraits<int64_t> {
static cudaDataType_t const cublas_type = CUDA_R_8I;
typedef int64_t host_type;
typedef int64_t device_type;
typedef int64_t integer_type;
typedef uint64_t unsigned_type;
static inline int64_t remove_negative_zero(int64_t x) { return x; }
static inline int64_t to_print(int64_t x) { return x; }
static inline device_type to_device(host_type x) { return x; }
};
template <>
struct TypeTraits<uint64_t> {
static cudaDataType_t const cublas_type = CUDA_R_8I;
typedef uint64_t host_type;
typedef uint64_t device_type;
typedef uint64_t integer_type;
typedef uint64_t unsigned_type;
static inline uint64_t remove_negative_zero(uint64_t x) { return x; }
static inline uint64_t to_print(uint64_t x) { return x; }
static inline device_type to_device(host_type x) { return x; }
};
template <>
struct TypeTraits<half_t> {
static cudaDataType_t const cublas_type = CUDA_R_16F;
typedef half_t host_type;
typedef half_t device_type;
typedef int16_t integer_type;
typedef uint16_t unsigned_type;
static inline half_t remove_negative_zero(half_t x) {
return (x.raw() == 0x8000 ? half_t::bitcast(0) : x);
}
static inline half_t to_print(half_t x) { return x; }
static inline device_type to_device(half_t x) { return reinterpret_cast<device_type const &>(x); }
};
template <>
struct TypeTraits<float> {
static cudaDataType_t const cublas_type = CUDA_R_32F;
typedef float host_type;
typedef float device_type;
typedef int32_t integer_type;
typedef uint32_t unsigned_type;
static inline float remove_negative_zero(float x) { return x == -0.f ? 0.f : x; }
static inline float to_print(float x) { return x; }
static inline device_type to_device(host_type x) { return x; }
};
template <>
struct TypeTraits<double> {
static cudaDataType_t const cublas_type = CUDA_R_64F;
typedef double host_type;
typedef double device_type;
typedef int64_t integer_type;
typedef uint64_t unsigned_type;
static inline double remove_negative_zero(double x) { return x == -0.0 ? 0.0 : x; }
static inline double to_print(double x) { return x; }
static inline device_type to_device(host_type x) { return x; }
};
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// Complex types
//
///////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct TypeTraits<complex<half> > {
static cudaDataType_t const cublas_type = CUDA_C_16F;
typedef complex<half_t> host_type;
typedef complex<half> device_type;
typedef int16_t integer_type;
typedef uint16_t unsigned_type;
static inline device_type to_device(complex<half> x) { return reinterpret_cast<device_type const &>(x); }
};
template <>
struct TypeTraits<complex<half_t> > {
static cudaDataType_t const cublas_type = CUDA_C_16F;
typedef complex<half_t> host_type;
typedef complex<half> device_type;
typedef int16_t integer_type;
typedef uint16_t unsigned_type;
static inline complex<half_t> remove_negative_zero(complex<half_t> x) {
return complex<half_t>(
real(x) == -0_hf ? 0_hf : real(x),
imag(x) == -0_hf ? 0_hf : imag(x)
);
}
static inline complex<half_t> to_print(complex<half_t> x) { return x; }
static inline device_type to_device(complex<half_t> x) { return reinterpret_cast<device_type const &>(x); }
};
template <>
struct TypeTraits<complex<float> > {
static cudaDataType_t const cublas_type = CUDA_C_32F;
typedef complex<float> host_type;
typedef complex<float> device_type;
typedef int64_t integer_type;
typedef uint64_t unsigned_type;
static inline complex<float> remove_negative_zero(complex<float> x) {
return complex<float>(
real(x) == -0.f ? 0.f : real(x),
imag(x) == -0.f ? 0.f : imag(x)
);
}
static inline complex<float> to_print(complex<float> x) { return x; }
static inline device_type to_device(complex<float> x) { return reinterpret_cast<device_type const &>(x); }
};
template <>
struct TypeTraits<complex<double> > {
static cudaDataType_t const cublas_type = CUDA_C_64F;
typedef complex<double> host_type;
typedef complex<double> device_type;
struct integer_type { int64_t real, imag; };
struct unsigned_type { uint64_t real, imag; };
static inline complex<double> remove_negative_zero(complex<double> x) {
return complex<double>(
real(x) == -0.0 ? 0.0 : real(x),
imag(x) == -0.0 ? 0.0 : imag(x)
);
}
static inline complex<double> to_print(complex<double> x) { return x; }
static inline device_type to_device(complex<double> x) { return reinterpret_cast<device_type const &>(x); }
};
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
| tools/util/include/cutlass/util/type_traits.h/0 | {
"file_path": "tools/util/include/cutlass/util/type_traits.h",
"repo_id": "tools",
"token_count": 2954
} | 76 |
var searchData=
[
['shmem_5frow_5fsize',['SHMEM_ROW_SIZE',['../device__dump_8h.html#a9add1f3dc46bf728b679c7ca472abfe3',1,'device_dump.h']]],
['static_5fassert',['static_assert',['../platform_8h.html#adde4c9ea91b753491851361a4198c009',1,'platform.h']]]
];
| docs/search/defines_3.js/0 | {
"file_path": "docs/search/defines_3.js",
"repo_id": "docs",
"token_count": 124
} | 0 |
var searchData=
[
['opcodeclassid',['OpcodeClassID',['../namespacecutlass_1_1library.html#a6e7f08a7db0273b3da7cc7ec6188b95e',1,'cutlass::library']]],
['operand',['Operand',['../namespacecutlass_1_1gemm.html#a34338284023da7403c9ecbd3f406b2a6',1,'cutlass::gemm']]],
['operationkind',['OperationKind',['../namespacecutlass_1_1library.html#ae609b16f8fa78f39136fc0a9802e4459',1,'cutlass::library']]]
];
| docs/search/enums_7.js/0 | {
"file_path": "docs/search/enums_7.js",
"repo_id": "docs",
"token_count": 179
} | 1 |
var searchData=
[
['default_5fepilogue_5fcomplex_5ftensor_5fop_2eh',['default_epilogue_complex_tensor_op.h',['../default__epilogue__complex__tensor__op_8h.html',1,'']]],
['default_5fepilogue_5fsimt_2eh',['default_epilogue_simt.h',['../default__epilogue__simt_8h.html',1,'']]],
['default_5fepilogue_5ftensor_5fop_2eh',['default_epilogue_tensor_op.h',['../default__epilogue__tensor__op_8h.html',1,'']]],
['default_5fepilogue_5fvolta_5ftensor_5fop_2eh',['default_epilogue_volta_tensor_op.h',['../default__epilogue__volta__tensor__op_8h.html',1,'']]],
['default_5fepilogue_5fwmma_5ftensor_5fop_2eh',['default_epilogue_wmma_tensor_op.h',['../default__epilogue__wmma__tensor__op_8h.html',1,'']]],
['default_5fgemm_2eh',['default_gemm.h',['../default__gemm_8h.html',1,'']]],
['default_5fgemm_5fconfiguration_2eh',['default_gemm_configuration.h',['../default__gemm__configuration_8h.html',1,'']]],
['default_5fgemm_5fsplitk_5fparallel_2eh',['default_gemm_splitk_parallel.h',['../default__gemm__splitk__parallel_8h.html',1,'']]],
['default_5fgemv_2eh',['default_gemv.h',['../default__gemv_8h.html',1,'']]],
['default_5fgemv_5fcore_2eh',['default_gemv_core.h',['../default__gemv__core_8h.html',1,'']]],
['default_5fmma_2eh',['default_mma.h',['../default__mma_8h.html',1,'']]],
['default_5fmma_5fcore_2eh',['default_mma_core.h',['../default__mma__core_8h.html',1,'']]],
['default_5fmma_5fcore_5fsimt_2eh',['default_mma_core_simt.h',['../default__mma__core__simt_8h.html',1,'']]],
['default_5fmma_5fcore_5fsm50_2eh',['default_mma_core_sm50.h',['../default__mma__core__sm50_8h.html',1,'']]],
['default_5fmma_5fcore_5fsm70_2eh',['default_mma_core_sm70.h',['../default__mma__core__sm70_8h.html',1,'']]],
['default_5fmma_5fcore_5fsm75_2eh',['default_mma_core_sm75.h',['../default__mma__core__sm75_8h.html',1,'']]],
['default_5fmma_5fcore_5fwmma_2eh',['default_mma_core_wmma.h',['../default__mma__core__wmma_8h.html',1,'']]],
['default_5fmma_5ftensor_5fop_2eh',['default_mma_tensor_op.h',['../default__mma__tensor__op_8h.html',1,'']]],
['default_5fmma_5fwmma_5ftensor_5fop_2eh',['default_mma_wmma_tensor_op.h',['../default__mma__wmma__tensor__op_8h.html',1,'']]],
['default_5fthread_5fmap_5fsimt_2eh',['default_thread_map_simt.h',['../default__thread__map__simt_8h.html',1,'']]],
['default_5fthread_5fmap_5ftensor_5fop_2eh',['default_thread_map_tensor_op.h',['../default__thread__map__tensor__op_8h.html',1,'']]],
['default_5fthread_5fmap_5fvolta_5ftensor_5fop_2eh',['default_thread_map_volta_tensor_op.h',['../default__thread__map__volta__tensor__op_8h.html',1,'']]],
['default_5fthread_5fmap_5fwmma_5ftensor_5fop_2eh',['default_thread_map_wmma_tensor_op.h',['../default__thread__map__wmma__tensor__op_8h.html',1,'']]],
['device_5fdump_2eh',['device_dump.h',['../device__dump_8h.html',1,'']]],
['device_5fkernel_2eh',['device_kernel.h',['../device__kernel_8h.html',1,'']]],
['device_5fmemory_2eh',['device_memory.h',['../device__memory_8h.html',1,'']]],
['direct_5fepilogue_5ftensor_5fop_2eh',['direct_epilogue_tensor_op.h',['../direct__epilogue__tensor__op_8h.html',1,'']]],
['distribution_2eh',['distribution.h',['../distribution_8h.html',1,'']]],
['doxygen_5fmainpage_2emd',['doxygen_mainpage.md',['../doxygen__mainpage_8md.html',1,'']]],
['gemm_5fbatched_2eh',['gemm_batched.h',['../device_2gemm__batched_8h.html',1,'']]],
['gemm_5fsplitk_5fparallel_2eh',['gemm_splitk_parallel.h',['../device_2gemm__splitk__parallel_8h.html',1,'']]],
['tensor_5fcompare_2eh',['tensor_compare.h',['../device_2tensor__compare_8h.html',1,'']]],
['tensor_5felementwise_2eh',['tensor_elementwise.h',['../device_2kernel_2tensor__elementwise_8h.html',1,'']]],
['tensor_5ffill_2eh',['tensor_fill.h',['../device_2tensor__fill_8h.html',1,'']]],
['tensor_5fforeach_2eh',['tensor_foreach.h',['../device_2kernel_2tensor__foreach_8h.html',1,'']]],
['tensor_5fforeach_2eh',['tensor_foreach.h',['../device_2tensor__foreach_8h.html',1,'']]]
];
| docs/search/files_3.js/0 | {
"file_path": "docs/search/files_3.js",
"repo_id": "docs",
"token_count": 1805
} | 2 |
var searchData=
[
['manifest_2eh',['manifest.h',['../manifest_8h.html',1,'']]],
['matrix_5fcoord_2eh',['matrix_coord.h',['../matrix__coord_8h.html',1,'']]],
['matrix_5fshape_2eh',['matrix_shape.h',['../matrix__shape_8h.html',1,'']]],
['matrix_5ftraits_2eh',['matrix_traits.h',['../matrix__traits_8h.html',1,'']]],
['memory_2eh',['memory.h',['../memory_8h.html',1,'']]],
['memory_5fsm75_2eh',['memory_sm75.h',['../memory__sm75_8h.html',1,'']]],
['mma_5fbase_2eh',['mma_base.h',['../mma__base_8h.html',1,'']]],
['mma_5fcomplex_5ftensor_5fop_2eh',['mma_complex_tensor_op.h',['../mma__complex__tensor__op_8h.html',1,'']]],
['mma_5fpipelined_2eh',['mma_pipelined.h',['../mma__pipelined_8h.html',1,'']]],
['mma_5fsimt_2eh',['mma_simt.h',['../mma__simt_8h.html',1,'']]],
['mma_5fsimt_5fpolicy_2eh',['mma_simt_policy.h',['../mma__simt__policy_8h.html',1,'']]],
['mma_5fsimt_5ftile_5fiterator_2eh',['mma_simt_tile_iterator.h',['../mma__simt__tile__iterator_8h.html',1,'']]],
['mma_5fsinglestage_2eh',['mma_singlestage.h',['../mma__singlestage_8h.html',1,'']]],
['mma_5fsm70_2eh',['mma_sm70.h',['../mma__sm70_8h.html',1,'']]],
['mma_5fsm75_2eh',['mma_sm75.h',['../mma__sm75_8h.html',1,'']]],
['mma_5ftensor_5fop_2eh',['mma_tensor_op.h',['../mma__tensor__op_8h.html',1,'']]],
['mma_5ftensor_5fop_5fpolicy_2eh',['mma_tensor_op_policy.h',['../mma__tensor__op__policy_8h.html',1,'']]],
['mma_5ftensor_5fop_5fsm70_2eh',['mma_tensor_op_sm70.h',['../mma__tensor__op__sm70_8h.html',1,'']]],
['mma_5ftensor_5fop_5ftile_5fiterator_2eh',['mma_tensor_op_tile_iterator.h',['../mma__tensor__op__tile__iterator_8h.html',1,'']]],
['mma_5ftensor_5fop_5ftile_5fiterator_5fsm70_2eh',['mma_tensor_op_tile_iterator_sm70.h',['../mma__tensor__op__tile__iterator__sm70_8h.html',1,'']]],
['mma_5ftensor_5fop_5ftile_5fiterator_5fwmma_2eh',['mma_tensor_op_tile_iterator_wmma.h',['../mma__tensor__op__tile__iterator__wmma_8h.html',1,'']]],
['mma_5ftensor_5fop_5fwmma_2eh',['mma_tensor_op_wmma.h',['../mma__tensor__op__wmma_8h.html',1,'']]]
];
| docs/search/files_b.js/0 | {
"file_path": "docs/search/files_b.js",
"repo_id": "docs",
"token_count": 994
} | 3 |
function convertToId(search)
{
var result = '';
for (i=0;i<search.length;i++)
{
var c = search.charAt(i);
var cn = c.charCodeAt(0);
if (c.match(/[a-z0-9\u0080-\uFFFF]/))
{
result+=c;
}
else if (cn<16)
{
result+="_0"+cn.toString(16);
}
else
{
result+="_"+cn.toString(16);
}
}
return result;
}
function getXPos(item)
{
var x = 0;
if (item.offsetWidth)
{
while (item && item!=document.body)
{
x += item.offsetLeft;
item = item.offsetParent;
}
}
return x;
}
function getYPos(item)
{
var y = 0;
if (item.offsetWidth)
{
while (item && item!=document.body)
{
y += item.offsetTop;
item = item.offsetParent;
}
}
return y;
}
/* A class handling everything associated with the search panel.
Parameters:
name - The name of the global variable that will be
storing this instance. Is needed to be able to set timeouts.
resultPath - path to use for external files
*/
function SearchBox(name, resultsPath, inFrame, label)
{
if (!name || !resultsPath) { alert("Missing parameters to SearchBox."); }
// ---------- Instance variables
this.name = name;
this.resultsPath = resultsPath;
this.keyTimeout = 0;
this.keyTimeoutLength = 500;
this.closeSelectionTimeout = 300;
this.lastSearchValue = "";
this.lastResultsPage = "";
this.hideTimeout = 0;
this.searchIndex = 0;
this.searchActive = false;
this.insideFrame = inFrame;
this.searchLabel = label;
// ----------- DOM Elements
this.DOMSearchField = function()
{ return document.getElementById("MSearchField"); }
this.DOMSearchSelect = function()
{ return document.getElementById("MSearchSelect"); }
this.DOMSearchSelectWindow = function()
{ return document.getElementById("MSearchSelectWindow"); }
this.DOMPopupSearchResults = function()
{ return document.getElementById("MSearchResults"); }
this.DOMPopupSearchResultsWindow = function()
{ return document.getElementById("MSearchResultsWindow"); }
this.DOMSearchClose = function()
{ return document.getElementById("MSearchClose"); }
this.DOMSearchBox = function()
{ return document.getElementById("MSearchBox"); }
// ------------ Event Handlers
// Called when focus is added or removed from the search field.
this.OnSearchFieldFocus = function(isActive)
{
this.Activate(isActive);
}
this.OnSearchSelectShow = function()
{
var searchSelectWindow = this.DOMSearchSelectWindow();
var searchField = this.DOMSearchSelect();
if (this.insideFrame)
{
var left = getXPos(searchField);
var top = getYPos(searchField);
left += searchField.offsetWidth + 6;
top += searchField.offsetHeight;
// show search selection popup
searchSelectWindow.style.display='block';
left -= searchSelectWindow.offsetWidth;
searchSelectWindow.style.left = left + 'px';
searchSelectWindow.style.top = top + 'px';
}
else
{
var left = getXPos(searchField);
var top = getYPos(searchField);
top += searchField.offsetHeight;
// show search selection popup
searchSelectWindow.style.display='block';
searchSelectWindow.style.left = left + 'px';
searchSelectWindow.style.top = top + 'px';
}
// stop selection hide timer
if (this.hideTimeout)
{
clearTimeout(this.hideTimeout);
this.hideTimeout=0;
}
return false; // to avoid "image drag" default event
}
this.OnSearchSelectHide = function()
{
this.hideTimeout = setTimeout(this.name +".CloseSelectionWindow()",
this.closeSelectionTimeout);
}
// Called when the content of the search field is changed.
this.OnSearchFieldChange = function(evt)
{
if (this.keyTimeout) // kill running timer
{
clearTimeout(this.keyTimeout);
this.keyTimeout = 0;
}
var e = (evt) ? evt : window.event; // for IE
if (e.keyCode==40 || e.keyCode==13)
{
if (e.shiftKey==1)
{
this.OnSearchSelectShow();
var win=this.DOMSearchSelectWindow();
for (i=0;i<win.childNodes.length;i++)
{
var child = win.childNodes[i]; // get span within a
if (child.className=='SelectItem')
{
child.focus();
return;
}
}
return;
}
else if (window.frames.MSearchResults.searchResults)
{
var elem = window.frames.MSearchResults.searchResults.NavNext(0);
if (elem) elem.focus();
}
}
else if (e.keyCode==27) // Escape out of the search field
{
this.DOMSearchField().blur();
this.DOMPopupSearchResultsWindow().style.display = 'none';
this.DOMSearchClose().style.display = 'none';
this.lastSearchValue = '';
this.Activate(false);
return;
}
// strip whitespaces
var searchValue = this.DOMSearchField().value.replace(/ +/g, "");
if (searchValue != this.lastSearchValue) // search value has changed
{
if (searchValue != "") // non-empty search
{
// set timer for search update
this.keyTimeout = setTimeout(this.name + '.Search()',
this.keyTimeoutLength);
}
else // empty search field
{
this.DOMPopupSearchResultsWindow().style.display = 'none';
this.DOMSearchClose().style.display = 'none';
this.lastSearchValue = '';
}
}
}
this.SelectItemCount = function(id)
{
var count=0;
var win=this.DOMSearchSelectWindow();
for (i=0;i<win.childNodes.length;i++)
{
var child = win.childNodes[i]; // get span within a
if (child.className=='SelectItem')
{
count++;
}
}
return count;
}
this.SelectItemSet = function(id)
{
var i,j=0;
var win=this.DOMSearchSelectWindow();
for (i=0;i<win.childNodes.length;i++)
{
var child = win.childNodes[i]; // get span within a
if (child.className=='SelectItem')
{
var node = child.firstChild;
if (j==id)
{
node.innerHTML='•';
}
else
{
node.innerHTML=' ';
}
j++;
}
}
}
// Called when an search filter selection is made.
// set item with index id as the active item
this.OnSelectItem = function(id)
{
this.searchIndex = id;
this.SelectItemSet(id);
var searchValue = this.DOMSearchField().value.replace(/ +/g, "");
if (searchValue!="" && this.searchActive) // something was found -> do a search
{
this.Search();
}
}
this.OnSearchSelectKey = function(evt)
{
var e = (evt) ? evt : window.event; // for IE
if (e.keyCode==40 && this.searchIndex<this.SelectItemCount()) // Down
{
this.searchIndex++;
this.OnSelectItem(this.searchIndex);
}
else if (e.keyCode==38 && this.searchIndex>0) // Up
{
this.searchIndex--;
this.OnSelectItem(this.searchIndex);
}
else if (e.keyCode==13 || e.keyCode==27)
{
this.OnSelectItem(this.searchIndex);
this.CloseSelectionWindow();
this.DOMSearchField().focus();
}
return false;
}
// --------- Actions
// Closes the results window.
this.CloseResultsWindow = function()
{
this.DOMPopupSearchResultsWindow().style.display = 'none';
this.DOMSearchClose().style.display = 'none';
this.Activate(false);
}
this.CloseSelectionWindow = function()
{
this.DOMSearchSelectWindow().style.display = 'none';
}
// Performs a search.
this.Search = function()
{
this.keyTimeout = 0;
// strip leading whitespace
var searchValue = this.DOMSearchField().value.replace(/^ +/, "");
var code = searchValue.toLowerCase().charCodeAt(0);
var idxChar = searchValue.substr(0, 1).toLowerCase();
if ( 0xD800 <= code && code <= 0xDBFF && searchValue > 1) // surrogate pair
{
idxChar = searchValue.substr(0, 2);
}
var resultsPage;
var resultsPageWithSearch;
var hasResultsPage;
var idx = indexSectionsWithContent[this.searchIndex].indexOf(idxChar);
if (idx!=-1)
{
var hexCode=idx.toString(16);
resultsPage = this.resultsPath + '/' + indexSectionNames[this.searchIndex] + '_' + hexCode + '.html';
resultsPageWithSearch = resultsPage+'?'+escape(searchValue);
hasResultsPage = true;
}
else // nothing available for this search term
{
resultsPage = this.resultsPath + '/nomatches.html';
resultsPageWithSearch = resultsPage;
hasResultsPage = false;
}
window.frames.MSearchResults.location = resultsPageWithSearch;
var domPopupSearchResultsWindow = this.DOMPopupSearchResultsWindow();
if (domPopupSearchResultsWindow.style.display!='block')
{
var domSearchBox = this.DOMSearchBox();
this.DOMSearchClose().style.display = 'inline';
if (this.insideFrame)
{
var domPopupSearchResults = this.DOMPopupSearchResults();
domPopupSearchResultsWindow.style.position = 'relative';
domPopupSearchResultsWindow.style.display = 'block';
var width = document.body.clientWidth - 8; // the -8 is for IE :-(
domPopupSearchResultsWindow.style.width = width + 'px';
domPopupSearchResults.style.width = width + 'px';
}
else
{
var domPopupSearchResults = this.DOMPopupSearchResults();
var left = getXPos(domSearchBox) + 150; // domSearchBox.offsetWidth;
var top = getYPos(domSearchBox) + 20; // domSearchBox.offsetHeight + 1;
domPopupSearchResultsWindow.style.display = 'block';
left -= domPopupSearchResults.offsetWidth;
domPopupSearchResultsWindow.style.top = top + 'px';
domPopupSearchResultsWindow.style.left = left + 'px';
}
}
this.lastSearchValue = searchValue;
this.lastResultsPage = resultsPage;
}
// -------- Activation Functions
// Activates or deactivates the search panel, resetting things to
// their default values if necessary.
this.Activate = function(isActive)
{
if (isActive || // open it
this.DOMPopupSearchResultsWindow().style.display == 'block'
)
{
this.DOMSearchBox().className = 'MSearchBoxActive';
var searchField = this.DOMSearchField();
if (searchField.value == this.searchLabel) // clear "Search" term upon entry
{
searchField.value = '';
this.searchActive = true;
}
}
else if (!isActive) // directly remove the panel
{
this.DOMSearchBox().className = 'MSearchBoxInactive';
this.DOMSearchField().value = this.searchLabel;
this.searchActive = false;
this.lastSearchValue = ''
this.lastResultsPage = '';
}
}
}
// -----------------------------------------------------------------------
// The class that handles everything on the search results page.
function SearchResults(name)
{
// The number of matches from the last run of <Search()>.
this.lastMatchCount = 0;
this.lastKey = 0;
this.repeatOn = false;
// Toggles the visibility of the passed element ID.
this.FindChildElement = function(id)
{
var parentElement = document.getElementById(id);
var element = parentElement.firstChild;
while (element && element!=parentElement)
{
if (element.nodeName == 'DIV' && element.className == 'SRChildren')
{
return element;
}
if (element.nodeName == 'DIV' && element.hasChildNodes())
{
element = element.firstChild;
}
else if (element.nextSibling)
{
element = element.nextSibling;
}
else
{
do
{
element = element.parentNode;
}
while (element && element!=parentElement && !element.nextSibling);
if (element && element!=parentElement)
{
element = element.nextSibling;
}
}
}
}
this.Toggle = function(id)
{
var element = this.FindChildElement(id);
if (element)
{
if (element.style.display == 'block')
{
element.style.display = 'none';
}
else
{
element.style.display = 'block';
}
}
}
// Searches for the passed string. If there is no parameter,
// it takes it from the URL query.
//
// Always returns true, since other documents may try to call it
// and that may or may not be possible.
this.Search = function(search)
{
if (!search) // get search word from URL
{
search = window.location.search;
search = search.substring(1); // Remove the leading '?'
search = unescape(search);
}
search = search.replace(/^ +/, ""); // strip leading spaces
search = search.replace(/ +$/, ""); // strip trailing spaces
search = search.toLowerCase();
search = convertToId(search);
var resultRows = document.getElementsByTagName("div");
var matches = 0;
var i = 0;
while (i < resultRows.length)
{
var row = resultRows.item(i);
if (row.className == "SRResult")
{
var rowMatchName = row.id.toLowerCase();
rowMatchName = rowMatchName.replace(/^sr\d*_/, ''); // strip 'sr123_'
if (search.length<=rowMatchName.length &&
rowMatchName.substr(0, search.length)==search)
{
row.style.display = 'block';
matches++;
}
else
{
row.style.display = 'none';
}
}
i++;
}
document.getElementById("Searching").style.display='none';
if (matches == 0) // no results
{
document.getElementById("NoMatches").style.display='block';
}
else // at least one result
{
document.getElementById("NoMatches").style.display='none';
}
this.lastMatchCount = matches;
return true;
}
// return the first item with index index or higher that is visible
this.NavNext = function(index)
{
var focusItem;
while (1)
{
var focusName = 'Item'+index;
focusItem = document.getElementById(focusName);
if (focusItem && focusItem.parentNode.parentNode.style.display=='block')
{
break;
}
else if (!focusItem) // last element
{
break;
}
focusItem=null;
index++;
}
return focusItem;
}
this.NavPrev = function(index)
{
var focusItem;
while (1)
{
var focusName = 'Item'+index;
focusItem = document.getElementById(focusName);
if (focusItem && focusItem.parentNode.parentNode.style.display=='block')
{
break;
}
else if (!focusItem) // last element
{
break;
}
focusItem=null;
index--;
}
return focusItem;
}
this.ProcessKeys = function(e)
{
if (e.type == "keydown")
{
this.repeatOn = false;
this.lastKey = e.keyCode;
}
else if (e.type == "keypress")
{
if (!this.repeatOn)
{
if (this.lastKey) this.repeatOn = true;
return false; // ignore first keypress after keydown
}
}
else if (e.type == "keyup")
{
this.lastKey = 0;
this.repeatOn = false;
}
return this.lastKey!=0;
}
this.Nav = function(evt,itemIndex)
{
var e = (evt) ? evt : window.event; // for IE
if (e.keyCode==13) return true;
if (!this.ProcessKeys(e)) return false;
if (this.lastKey==38) // Up
{
var newIndex = itemIndex-1;
var focusItem = this.NavPrev(newIndex);
if (focusItem)
{
var child = this.FindChildElement(focusItem.parentNode.parentNode.id);
if (child && child.style.display == 'block') // children visible
{
var n=0;
var tmpElem;
while (1) // search for last child
{
tmpElem = document.getElementById('Item'+newIndex+'_c'+n);
if (tmpElem)
{
focusItem = tmpElem;
}
else // found it!
{
break;
}
n++;
}
}
}
if (focusItem)
{
focusItem.focus();
}
else // return focus to search field
{
parent.document.getElementById("MSearchField").focus();
}
}
else if (this.lastKey==40) // Down
{
var newIndex = itemIndex+1;
var focusItem;
var item = document.getElementById('Item'+itemIndex);
var elem = this.FindChildElement(item.parentNode.parentNode.id);
if (elem && elem.style.display == 'block') // children visible
{
focusItem = document.getElementById('Item'+itemIndex+'_c0');
}
if (!focusItem) focusItem = this.NavNext(newIndex);
if (focusItem) focusItem.focus();
}
else if (this.lastKey==39) // Right
{
var item = document.getElementById('Item'+itemIndex);
var elem = this.FindChildElement(item.parentNode.parentNode.id);
if (elem) elem.style.display = 'block';
}
else if (this.lastKey==37) // Left
{
var item = document.getElementById('Item'+itemIndex);
var elem = this.FindChildElement(item.parentNode.parentNode.id);
if (elem) elem.style.display = 'none';
}
else if (this.lastKey==27) // Escape
{
parent.searchBox.CloseResultsWindow();
parent.document.getElementById("MSearchField").focus();
}
else if (this.lastKey==13) // Enter
{
return true;
}
return false;
}
this.NavChild = function(evt,itemIndex,childIndex)
{
var e = (evt) ? evt : window.event; // for IE
if (e.keyCode==13) return true;
if (!this.ProcessKeys(e)) return false;
if (this.lastKey==38) // Up
{
if (childIndex>0)
{
var newIndex = childIndex-1;
document.getElementById('Item'+itemIndex+'_c'+newIndex).focus();
}
else // already at first child, jump to parent
{
document.getElementById('Item'+itemIndex).focus();
}
}
else if (this.lastKey==40) // Down
{
var newIndex = childIndex+1;
var elem = document.getElementById('Item'+itemIndex+'_c'+newIndex);
if (!elem) // last child, jump to parent next parent
{
elem = this.NavNext(itemIndex+1);
}
if (elem)
{
elem.focus();
}
}
else if (this.lastKey==27) // Escape
{
parent.searchBox.CloseResultsWindow();
parent.document.getElementById("MSearchField").focus();
}
else if (this.lastKey==13) // Enter
{
return true;
}
return false;
}
}
function setKeyActions(elem,action)
{
elem.setAttribute('onkeydown',action);
elem.setAttribute('onkeypress',action);
elem.setAttribute('onkeyup',action);
}
function setClassAttr(elem,attr)
{
elem.setAttribute('class',attr);
elem.setAttribute('className',attr);
}
function createResults()
{
var results = document.getElementById("SRResults");
for (var e=0; e<searchData.length; e++)
{
var id = searchData[e][0];
var srResult = document.createElement('div');
srResult.setAttribute('id','SR_'+id);
setClassAttr(srResult,'SRResult');
var srEntry = document.createElement('div');
setClassAttr(srEntry,'SREntry');
var srLink = document.createElement('a');
srLink.setAttribute('id','Item'+e);
setKeyActions(srLink,'return searchResults.Nav(event,'+e+')');
setClassAttr(srLink,'SRSymbol');
srLink.innerHTML = searchData[e][1][0];
srEntry.appendChild(srLink);
if (searchData[e][1].length==2) // single result
{
srLink.setAttribute('href',searchData[e][1][1][0]);
if (searchData[e][1][1][1])
{
srLink.setAttribute('target','_parent');
}
var srScope = document.createElement('span');
setClassAttr(srScope,'SRScope');
srScope.innerHTML = searchData[e][1][1][2];
srEntry.appendChild(srScope);
}
else // multiple results
{
srLink.setAttribute('href','javascript:searchResults.Toggle("SR_'+id+'")');
var srChildren = document.createElement('div');
setClassAttr(srChildren,'SRChildren');
for (var c=0; c<searchData[e][1].length-1; c++)
{
var srChild = document.createElement('a');
srChild.setAttribute('id','Item'+e+'_c'+c);
setKeyActions(srChild,'return searchResults.NavChild(event,'+e+','+c+')');
setClassAttr(srChild,'SRScope');
srChild.setAttribute('href',searchData[e][1][c+1][0]);
if (searchData[e][1][c+1][1])
{
srChild.setAttribute('target','_parent');
}
srChild.innerHTML = searchData[e][1][c+1][2];
srChildren.appendChild(srChild);
}
srEntry.appendChild(srChildren);
}
srResult.appendChild(srEntry);
results.appendChild(srResult);
}
}
function init_search()
{
var results = document.getElementById("MSearchSelectWindow");
for (var key in indexSectionLabels)
{
var link = document.createElement('a');
link.setAttribute('class','SelectItem');
link.setAttribute('onclick','searchBox.OnSelectItem('+key+')');
link.href='javascript:void(0)';
link.innerHTML='<span class="SelectionMark"> </span>'+indexSectionLabels[key];
results.appendChild(link);
}
searchBox.OnSelectItem(0);
}
| docs/search/search.js/0 | {
"file_path": "docs/search/search.js",
"repo_id": "docs",
"token_count": 9590
} | 4 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <iostream>
#include <fstream>
#include <sstream>
#include "cutlass/cutlass.h"
#include "cutlass/conv/device/implicit_gemm_convolution.h"
#include "cutlass/reduction/device/reduce_split_k.h"
#include "cutlass/reduction/thread/reduction_operators.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/device/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/reference/host/convolution.h"
#include "cutlass/util/reference/device/convolution.h"
#include "cutlass/util/reference/device/tensor_relu.h"
#include "cutlass/core_io.h"
#include "cutlass/util/tensor_view_io.h"
#include "reference/device/tensor_scale_bias.h"
#include "helper.h"
#define CHECK_GT(val1, val2) \
if((val1) <= (val2)) \
std::cerr << __FILE__ << " " << __LINE__ << ": CHECK_GT failed\n";
#define CHECK_TRUE(val) \
if(!(val)) \
std::cerr << __FILE__ << " " << __LINE__ << ": CHECK_TRUE failed\n";
template <typename Conv2d0_, typename Conv2d1_>
class B2bNonFusedConv2dRun {
public:
using Conv2d0 = Conv2d0_;
using Conv2d1 = Conv2d1_;
using ElementAccumulator = typename Conv2d0::ElementAccumulator;
using ElementCompute = typename Conv2d0::ElementCompute;
static cutlass::conv::Operator const kConvolutionalOperator = Conv2d0::kConvolutionalOperator;
static_assert(kConvolutionalOperator == Conv2d1::kConvolutionalOperator,
"Fused convolution operators must be the same");
public:
/// Initialization
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::Distribution::Kind init_C;
cutlass::Distribution::Kind init_Bias;
uint64_t seed;
cutlass::HostTensor<typename Conv2d0::ElementA, typename Conv2d0::LayoutA> tensor_A0;
cutlass::HostTensor<typename Conv2d0::ElementB, typename Conv2d0::LayoutB> tensor_B0;
cutlass::HostTensor<typename Conv2d0::ElementC, typename Conv2d0::LayoutC> tensor_C0;
cutlass::HostTensor<typename Conv2d0::ElementCompute, typename Conv2d0::LayoutC> tensor_Bias0;
cutlass::HostTensor<typename Conv2d0::ElementC, typename Conv2d0::LayoutC> tensor_D0_computed;
cutlass::HostTensor<typename Conv2d0::ElementC, typename Conv2d0::LayoutC> tensor_D0_reference;
cutlass::HostTensor<typename Conv2d1::ElementB, typename Conv2d1::LayoutB> tensor_B1;
cutlass::HostTensor<typename Conv2d1::ElementC, typename Conv2d1::LayoutC> tensor_C1;
cutlass::HostTensor<typename Conv2d1::ElementCompute, typename Conv2d0::LayoutC> tensor_Bias1;
cutlass::HostTensor<typename Conv2d1::ElementC, typename Conv2d1::LayoutC> tensor_D1_computed;
cutlass::HostTensor<typename Conv2d1::ElementC, typename Conv2d1::LayoutC> tensor_D1_reference;
public:
B2bNonFusedConv2dRun(
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_Bias_ = cutlass::Distribution::Uniform,
uint64_t seed_ = 2080
):
init_A(init_A_), init_B(init_B_), init_C(init_C_), init_Bias(init_Bias_), seed(seed_) {
}
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
void initialize_tensor(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
int scope;
int bits = cutlass::sizeof_bits<Element>::value;
if (bits <= 16) {
scope = 2;
}
else {
scope = 8;
}
cutlass::reference::host::TensorFillRandomUniform(
view, seed, scope, -scope, 0);
}
else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5);
}
else if (dist_kind == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(view.data(), view.capacity());
}
else if (dist_kind == cutlass::Distribution::AllZeros) {
cutlass::reference::host::TensorFill(view, Element(0));
}
else if (dist_kind == cutlass::Distribution::AllOnes) {
cutlass::reference::host::TensorFill(view, Element(1));
}
else {
std::cerr << "Not implemented\n";
}
}
void initialize(
cutlass::conv::Conv2dProblemSize const &problem_size_0,
cutlass::conv::Conv2dProblemSize const &problem_size_1,
uint64_t seed = 2019) {
tensor_A0.resize(implicit_gemm_tensor_a_extent(kConvolutionalOperator, problem_size_0));
tensor_B0.resize(implicit_gemm_tensor_b_extent(kConvolutionalOperator, problem_size_0));
tensor_C0.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size_0));
tensor_Bias0.resize({1, 1, 1, problem_size_0.K});
tensor_D0_computed.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size_0));
tensor_D0_reference.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size_0));
tensor_B1.resize(implicit_gemm_tensor_b_extent(kConvolutionalOperator, problem_size_1));
tensor_C1.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size_1));
tensor_Bias1.resize({1, 1, 1, problem_size_1.K});
tensor_D1_computed.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size_1));
tensor_D1_reference.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size_1));
initialize_tensor(tensor_A0.host_view(), init_A, seed);
initialize_tensor(tensor_B0.host_view(), init_B, seed * 17);
initialize_tensor(tensor_C0.host_view(), init_C, seed * 39);
initialize_tensor(tensor_Bias0.host_view(), init_Bias, seed * 83);
initialize_tensor(tensor_B1.host_view(), init_B, seed * 18);
initialize_tensor(tensor_C1.host_view(), init_C, seed * 40);
initialize_tensor(tensor_Bias1.host_view(), init_Bias, seed * 84);
tensor_A0.sync_device();
tensor_B0.sync_device();
tensor_C0.sync_device();
tensor_Bias0.sync_device();
tensor_D0_computed.sync_device();
tensor_D0_reference.sync_device();
tensor_B1.sync_device();
tensor_C1.sync_device();
tensor_Bias1.sync_device();
tensor_D1_computed.sync_device();
tensor_D1_reference.sync_device();
}
/// Executes one test
bool run(
cutlass::conv::Conv2dProblemSize const &problem_size_0,
cutlass::conv::Conv2dProblemSize const &problem_size_1,
cutlass::conv::SplitKMode const &split_k_mode = cutlass::conv::SplitKMode::kSerial,
ElementCompute alpha0 = ElementCompute(1),
ElementCompute beta0 = ElementCompute(0),
ElementCompute alpha1 = ElementCompute(1),
ElementCompute beta1 = ElementCompute(0),
bool relu = true,
int warm_ups = 1,
int runs = 100) {
initialize(problem_size_0, problem_size_1);
// configure the operator
Conv2d0 conv2d_op_0;
Conv2d1 conv2d_op_1;
typename Conv2d0::Arguments conv2d_args_0(
problem_size_0,
tensor_A0.device_ref(),
tensor_B0.device_ref(),
{tensor_Bias0.device_data(), typename Conv2d0::LayoutC::Stride(0)},
tensor_D0_computed.device_ref(),
{alpha0, beta0},
split_k_mode
);
typename Conv2d1::Arguments conv2d_args_1(
problem_size_1,
tensor_D0_computed.device_ref(),
tensor_B1.device_ref(),
{tensor_Bias1.device_data(), typename Conv2d1::LayoutC::Stride(0)},
tensor_D1_computed.device_ref(),
{alpha1, beta1},
split_k_mode
);
cutlass::Status status = conv2d_op_0.initialize(conv2d_args_0);
CUTLASS_CHECK(status);
status = conv2d_op_1.initialize(conv2d_args_1);
CUTLASS_CHECK(status);
for(int i = 0; i < warm_ups; i++) {
status = conv2d_op_0();
CUTLASS_CHECK(status);
status = conv2d_op_1();
CUTLASS_CHECK(status);
}
//
// Run Conv2d
//
cudaEvent_t start, stop1, stop2;
cudaEventCreate(&start);
cudaEventCreate(&stop1);
cudaEventCreate(&stop2);
cudaEventRecord(start);
for(int i = 0; i < runs; i++) {
// run conv2d operator
status = conv2d_op_0();
CUTLASS_CHECK(status);
}
cudaEventRecord(stop1);
for(int i = 0; i < runs; i++) {
// run conv2d operator
status = conv2d_op_1();
CUTLASS_CHECK(status);
}
cudaEventRecord(stop2);
cudaDeviceSynchronize();
float conv2d0Time, conv2d1Time, totalTime;
cudaEventElapsedTime(&conv2d0Time, start, stop1);
cudaEventElapsedTime(&conv2d1Time, stop1, stop2);
cudaEventElapsedTime(&totalTime, start, stop2);
std::cout << "conv2d 0 time " << conv2d0Time / (float)runs << " ms\n";
std::cout << "conv2d 1 time " << conv2d1Time / (float)runs << " ms\n";
std::cout << "Non-fusion time " << totalTime / (float)runs << " ms\n";
tensor_D0_computed.sync_host();
tensor_D1_computed.sync_host();
bool passed = false;
cutlass::reference::device::Conv2d<
typename Conv2d0::ElementA,
typename Conv2d0::LayoutA,
typename Conv2d0::ElementB,
typename Conv2d0::LayoutB,
typename Conv2d0::ElementC,
typename Conv2d0::LayoutC,
ElementCompute,
ElementAccumulator
>(
kConvolutionalOperator,
problem_size_0,
tensor_A0.device_ref(),
tensor_B0.device_ref(),
{tensor_Bias0.device_data(), typename Conv2d0::LayoutC::Stride(0)},
tensor_D0_reference.device_ref(),
alpha0,
beta0);
if(relu) {
cutlass::reference::device::TensorReLu(tensor_D0_reference.device_view());
}
cutlass::reference::device::Conv2d<
typename Conv2d1::ElementA,
typename Conv2d1::LayoutA,
typename Conv2d1::ElementB,
typename Conv2d1::LayoutB,
typename Conv2d1::ElementC,
typename Conv2d1::LayoutC,
ElementCompute,
ElementAccumulator
>(
kConvolutionalOperator,
problem_size_1,
tensor_D0_reference.device_ref(),
tensor_B1.device_ref(),
{tensor_Bias1.device_data(), typename Conv2d1::LayoutC::Stride(0)},
tensor_D1_reference.device_ref(),
alpha1,
beta1);
if(relu) {
cutlass::reference::device::TensorReLu(tensor_D1_reference.device_view());
}
cudaError_t result = cudaDeviceSynchronize();
CHECK_TRUE(result == cudaSuccess);
// sync host (copy device data to host) for dumping error output in case of mismatches
tensor_D0_reference.sync_host();
tensor_D1_reference.sync_host();
CHECK_GT(cutlass::reference::host::TensorNorm(tensor_D0_computed.host_view()), 0);
CHECK_GT(cutlass::reference::host::TensorNorm(tensor_D0_reference.host_view()), 0);
CHECK_GT(cutlass::reference::host::TensorNorm(tensor_D1_computed.host_view()), 0);
CHECK_GT(cutlass::reference::host::TensorNorm(tensor_D1_reference.host_view()), 0);
passed = cutlass::reference::host::TensorEquals(
tensor_D1_computed.host_view(),
tensor_D1_reference.host_view());
CHECK_TRUE(passed);
if (!passed) {
std::stringstream fname;
fname << "error_B2bImplicitGemm_device_nonfused.txt";
std::cerr << "Dumping results in " << fname.str() << "\n";
std::ofstream results(fname.str());
results << problem_size_0 << std::endl;
results << problem_size_1 << std::endl;
results
<< "\nA0:\n" << tensor_A0.host_view() << "\n"
<< "\nB0:\n" << tensor_B0.host_view() << "\n"
<< "\nC0:\n" << tensor_C0.host_view() << "\n"
<< "\nBias0:\n" << tensor_Bias0.host_view() << "\n"
<< "\nD0 reference:\n" << tensor_D0_reference.host_view() << "\n"
<< "\nD0 computed:\n" << tensor_D0_computed.host_view() << "\n"
<< "\nB1:\n" << tensor_B1.host_view() << "\n"
<< "\nC1:\n" << tensor_C1.host_view() << "\n"
<< "\nBias1:\n" << tensor_Bias1.host_view() << "\n"
<< "\nD1 reference:\n" << tensor_D1_reference.host_view() << "\n"
<< "\nD1 computed:\n" << tensor_D1_computed.host_view();
}
return passed;
}
};
template <typename B2bConv2d_>
class B2bFusedConv2dRun {
public:
using B2bConv2d = B2bConv2d_;
using ElementAccumulator = typename B2bConv2d::ElementAccumulator;
using ElementCompute = typename B2bConv2d::ElementCompute;
static cutlass::conv::Operator const kConvolutionalOperator = B2bConv2d::kConvolutionalOperator;
public:
/// Initialization
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::Distribution::Kind init_C;
cutlass::Distribution::Kind init_Scale;
cutlass::Distribution::Kind init_Bias;
uint64_t seed;
cutlass::HostTensor<typename B2bConv2d::ElementA, typename B2bConv2d::LayoutA> tensor_A0;
cutlass::HostTensor<typename B2bConv2d::ElementB, typename B2bConv2d::LayoutB> tensor_B0;
cutlass::HostTensor<typename B2bConv2d::ElementC, typename B2bConv2d::LayoutC> tensor_C0;
cutlass::HostTensor<typename B2bConv2d::ElementScaleBias, typename B2bConv2d::LayoutScaleBias> tensor_Scale0;
cutlass::HostTensor<typename B2bConv2d::ElementScaleBias, typename B2bConv2d::LayoutScaleBias> tensor_Bias0;
cutlass::HostTensor<ElementAccumulator, typename B2bConv2d::LayoutC> tensor_Z0_reference;
cutlass::HostTensor<typename B2bConv2d::ElementC, typename B2bConv2d::LayoutC> tensor_D0_reference;
cutlass::HostTensor<typename B2bConv2d::ElementB, typename B2bConv2d::LayoutB> tensor_B1;
cutlass::HostTensor<typename B2bConv2d::ElementC, typename B2bConv2d::LayoutC> tensor_C1;
cutlass::HostTensor<typename B2bConv2d::ElementCompute, typename B2bConv2d::LayoutC> tensor_Bias1;
cutlass::HostTensor<typename B2bConv2d::ElementC, typename B2bConv2d::LayoutC> tensor_D1_computed;
cutlass::HostTensor<typename B2bConv2d::ElementC, typename B2bConv2d::LayoutC> tensor_D1_reference;
public:
B2bFusedConv2dRun(
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_Scale_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_Bias_ = cutlass::Distribution::Uniform,
uint64_t seed_ = 2080
):
init_A(init_A_), init_B(init_B_), init_C(init_C_),
init_Scale(init_Scale_), init_Bias(init_Bias_), seed(seed_) {
}
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
void initialize_tensor(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
int scope;
int bits = cutlass::sizeof_bits<Element>::value;
if (bits <= 16) {
scope = 2;
}
else {
scope = 8;
}
cutlass::reference::host::TensorFillRandomUniform(
view, seed, scope, -scope, 0);
}
else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5);
}
else if (dist_kind == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(view.data(), view.capacity());
}
else if (dist_kind == cutlass::Distribution::AllZeros) {
cutlass::reference::host::TensorFill(view, Element(0));
}
else if (dist_kind == cutlass::Distribution::AllOnes) {
cutlass::reference::host::TensorFill(view, Element(1));
}
else {
}
}
void initialize(
cutlass::conv::Conv2dProblemSize const &problem_size_0,
cutlass::conv::Conv2dProblemSize const &problem_size_1,
ElementCompute alpha0,
ElementCompute alpha1,
uint64_t seed = 2019) {
tensor_A0.resize(implicit_gemm_tensor_a_extent(kConvolutionalOperator, problem_size_0));
tensor_B0.resize(implicit_gemm_tensor_b_extent(kConvolutionalOperator, problem_size_0));
tensor_C0.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size_0));
if(alpha0 == ElementCompute(0)) //per-channel scale
tensor_Scale0.resize({1, problem_size_0.K});
tensor_Bias0.resize({1, problem_size_0.K});
tensor_Z0_reference.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size_0));
tensor_D0_reference.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size_0));
tensor_B1.resize(implicit_gemm_tensor_b_extent(kConvolutionalOperator, problem_size_1));
tensor_C1.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size_1));
tensor_Bias1.resize({1, 1, 1, problem_size_1.K});
tensor_D1_computed.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size_1));
tensor_D1_reference.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size_1));
initialize_tensor(tensor_A0.host_view(), init_A, seed);
initialize_tensor(tensor_B0.host_view(), init_B, seed * 17);
initialize_tensor(tensor_C0.host_view(), init_C, seed * 39);
if(alpha0 == ElementCompute(0)) //per-channel scale
initialize_tensor(tensor_Scale0.host_view(), init_Scale, seed * 61);
initialize_tensor(tensor_Bias0.host_view(), init_Bias, seed * 83);
initialize_tensor(tensor_B1.host_view(), init_B, seed * 18);
initialize_tensor(tensor_C1.host_view(), init_C, seed * 40);
initialize_tensor(tensor_Bias1.host_view(), init_Bias, seed * 84);
tensor_A0.sync_device();
tensor_B0.sync_device();
tensor_C0.sync_device();
if(alpha0 == ElementCompute(0)) //per-channel scale
tensor_Scale0.sync_device();
tensor_Bias0.sync_device();
tensor_D0_reference.sync_device();
tensor_B1.sync_device();
tensor_C1.sync_device();
tensor_Bias1.sync_device();
tensor_D1_computed.sync_device();
tensor_D1_reference.sync_device();
}
/// Executes one test
bool run(
cutlass::conv::Conv2dProblemSize const &problem_size_0,
cutlass::conv::Conv2dProblemSize const &problem_size_1,
cutlass::conv::SplitKMode const &split_k_mode = cutlass::conv::SplitKMode::kSerial,
ElementCompute alpha0 = ElementCompute(1),
ElementCompute beta0 = ElementCompute(0),
ElementCompute alpha1 = ElementCompute(1),
ElementCompute beta1 = ElementCompute(0),
bool relu = true,
int warm_ups = 1,
int runs = 100) {
initialize(problem_size_0, problem_size_1, alpha0, alpha1);
// configure the operator
B2bConv2d b2b_conv2d_op;
typename B2bConv2d::Arguments b2b_conv2d_args(
problem_size_0,
problem_size_1,
tensor_A0.device_ref(),
tensor_B0.device_ref(),
tensor_C0.device_ref(),
tensor_Scale0.device_ref(),
tensor_Bias0.device_ref(),
tensor_B1.device_ref(),
{tensor_Bias1.device_data(), typename B2bConv2d::LayoutC::Stride(0)},
tensor_D1_computed.device_ref(),
{alpha0, beta0},
{alpha1, beta1},
split_k_mode
);
cutlass::Status status = b2b_conv2d_op.can_implement(b2b_conv2d_args);
if(status != cutlass::Status::kSuccess) {
std::cout << "Problem sizes not supported.\n"
<< "Requirments:\n"
<< " problem_size_0.N*P*Q = problem_size_1.N*P*Q\n"
<< " problem_size_0.K = problem_size_1.C\n"
<< " problem_size_1.R = problem_size_1.S = 1\n"
<< " ThreadblockShape0::kN = problem_size_0.K\n"
<< " ThreadblockShape1::kN = problem_size_1.K" << std::endl;
}
CUTLASS_CHECK(status);
status = b2b_conv2d_op.initialize(b2b_conv2d_args);
CUTLASS_CHECK(status);
for(int i = 0; i < warm_ups; i++) {
status = b2b_conv2d_op();
CUTLASS_CHECK(status);
}
//
// Run the Conv2d
//
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
for(int i = 0; i < runs; i++) {
// run conv2d operator
status = b2b_conv2d_op();
CUTLASS_CHECK(status);
}
cudaEventRecord(stop);
cudaDeviceSynchronize();
float conv2dTime;
cudaEventElapsedTime(&conv2dTime, start, stop);
std::cout << "Fusion time " << conv2dTime / (float)runs << " ms\n";
tensor_D1_computed.sync_host();
bool passed = false;
cutlass::reference::device::Conv2d<
typename B2bConv2d::ElementA,
typename B2bConv2d::LayoutA,
typename B2bConv2d::ElementB,
typename B2bConv2d::LayoutB,
ElementAccumulator,
typename B2bConv2d::LayoutC,
ElementAccumulator,
ElementAccumulator
>(
kConvolutionalOperator,
problem_size_0,
tensor_A0.device_ref(),
tensor_B0.device_ref(),
tensor_Z0_reference.device_ref(),
tensor_Z0_reference.device_ref(),
ElementAccumulator(1), // intermediate alpha = 1
ElementAccumulator(0) // beta = 0
);
cutlass::reference::device::TensorScaleBiasConv2d<
ElementAccumulator,
typename B2bConv2d::ElementC,
typename B2bConv2d::LayoutC,
ElementCompute,
typename B2bConv2d::LayoutScaleBias
>(
problem_size_0,
tensor_Z0_reference.device_ref(),
tensor_D0_reference.device_ref(),
alpha0,
tensor_Scale0.device_ref(),
tensor_Bias0.device_ref()
);
if(relu) {
cutlass::reference::device::TensorReLu(tensor_D0_reference.device_view());
}
cutlass::reference::device::Conv2d<
typename B2bConv2d::ElementA,
typename B2bConv2d::LayoutA,
typename B2bConv2d::ElementB,
typename B2bConv2d::LayoutB,
typename B2bConv2d::ElementC,
typename B2bConv2d::LayoutC,
ElementCompute,
ElementAccumulator
>(
kConvolutionalOperator,
problem_size_1,
tensor_D0_reference.device_ref(),
tensor_B1.device_ref(),
{tensor_Bias1.device_data(), typename B2bConv2d::LayoutC::Stride(0)},
tensor_D1_reference.device_ref(),
alpha1,
beta1);
if(relu) {
cutlass::reference::device::TensorReLu(tensor_D1_reference.device_view());
}
cudaError_t result = cudaDeviceSynchronize();
CHECK_TRUE(result == cudaSuccess);
// sync host (copy device data to host) for dumping error output in case of mismatches
tensor_D0_reference.sync_host();
tensor_D1_reference.sync_host();
CHECK_GT(cutlass::reference::host::TensorNorm(tensor_D0_reference.host_view()), 0);
CHECK_GT(cutlass::reference::host::TensorNorm(tensor_D1_computed.host_view()), 0);
CHECK_GT(cutlass::reference::host::TensorNorm(tensor_D1_reference.host_view()), 0);
passed = cutlass::reference::host::TensorEquals(
tensor_D1_computed.host_view(),
tensor_D1_reference.host_view());
CHECK_TRUE(passed);
if (!passed) {
std::stringstream fname;
fname << "error_B2bImplicitGemm_device_fused.txt";
std::cerr << "Dumping results in " << fname.str() << "\n";
std::ofstream results(fname.str());
results << problem_size_0 << std::endl;
results << problem_size_1 << std::endl;
results
<< "\nA0:\n" << tensor_A0.host_view() << "\n"
<< "\nB0:\n" << tensor_B0.host_view() << "\n"
<< "\nC0:\n" << tensor_C0.host_view() << "\n"
<< "\nScale0:\n" << tensor_Scale0.host_view() << "\n"
<< "\nBias0:\n" << tensor_Bias0.host_view() << "\n"
<< "\nB1:\n" << tensor_B1.host_view() << "\n"
<< "\nC1:\n" << tensor_C1.host_view() << "\n"
<< "\nBias1:\n" << tensor_Bias1.host_view() << "\n"
<< "\nD1 reference:\n" << tensor_D1_reference.host_view() << "\n"
<< "\nD1 computed:\n" << tensor_D1_computed.host_view();
}
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/13_two_tensor_op_fusion/b2b_conv2d_run.h/0 | {
"file_path": "examples/13_two_tensor_op_fusion/b2b_conv2d_run.h",
"repo_id": "examples",
"token_count": 11067
} | 5 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level GEMM definitions combine threadblock-scoped matrix multiply-add with
the appropriate threadblock-scoped epilogue.
Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are
accommodated by exchanging A and B operands and assuming transposed layouts. Partial
specializations here choose 'device::GemmTransposed' to implement this functionality.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/numeric_types.h"
#include "cutlass/epilogue/threadblock/epilogue.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/kernel/gemm_pipelined.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm75.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm70.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm80.h"
#include "cutlass/gemm/threadblock/default_mma_core_simt.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_epilogue_simt.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
#include "kernel/b2b_gemm.h"
#include "kernel/grouped.h"
#include "threadblock/default_b2b_mma.h"
#include "threadblock/grouped_threadblock_swizzle.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
////////////////////////////////////////////////////////////////////////////////
template <typename T>
using IsGroupedSwizzle = cutlass::gemm::threadblock::detail::IsGroupedSwizzle<T>;
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape0,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape1,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape0,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape1,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp0,
/// Epilogue output operator
typename EpilogueOutputOp1,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation performed by GEMM
typename Operator,
/// Stage accumulator in shared memory
bool SmemAccumulator = false,
/// Whether or not the operation is grouped
typename Enable = void
>
struct DefaultB2bGemm;
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Ampere Architecture
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of A matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape0,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape1,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape0,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape1,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp0,
/// Epilogue output operator
typename EpilogueOutputOp1,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation performed by GEMM
typename Operator>
struct DefaultB2bGemm<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementC,
layout::RowMajor, ElementAccumulator, arch::OpClassTensorOp,
arch::Sm80, ThreadblockShape0, ThreadblockShape1,
WarpShape0, WarpShape1, InstructionShape,
EpilogueOutputOp0, EpilogueOutputOp1, ThreadblockSwizzle, Stages,
Operator, false, typename platform::enable_if<!IsGroupedSwizzle<ThreadblockSwizzle>::value>::type> {
/// Define the threadblock-scoped matrix multiply-accumulate
using B2bMma = typename cutlass::gemm::threadblock::DefaultB2bMma<
ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB,
ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80,
ThreadblockShape0, ThreadblockShape1, WarpShape0, WarpShape1,
InstructionShape, Stages, Operator, EpilogueOutputOp0>::ThreadblockB2bMma;
static const int kPartitionsK1 = ThreadblockShape1::kK / WarpShape1::kK;
/// Define the epilogue
using Epilogue =
typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape1, typename B2bMma::Operator1, kPartitionsK1, EpilogueOutputOp1,
EpilogueOutputOp1::kCount>::Epilogue;
/// Define the kernel-level GEMM operator.
using B2bGemmKernel = kernel::B2bGemm<B2bMma, Epilogue, ThreadblockSwizzle>;
};
/// Partial specialization for Ampere Architecture with grouped operation
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of A matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape0,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape1,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape0,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape1,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp0,
/// Epilogue output operator
typename EpilogueOutputOp1,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation performed by GEMM
typename Operator>
struct DefaultB2bGemm<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementC,
layout::RowMajor, ElementAccumulator, arch::OpClassTensorOp,
arch::Sm80, ThreadblockShape0, ThreadblockShape1,
WarpShape0, WarpShape1, InstructionShape,
EpilogueOutputOp0, EpilogueOutputOp1, ThreadblockSwizzle, Stages,
Operator, false, typename platform::enable_if<IsGroupedSwizzle<ThreadblockSwizzle>::value>::type> {
/// Define the threadblock-scoped matrix multiply-accumulate
using B2bMma = typename cutlass::gemm::threadblock::DefaultB2bMma<
ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB,
ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80,
ThreadblockShape0, ThreadblockShape1, WarpShape0, WarpShape1,
InstructionShape, Stages, Operator, EpilogueOutputOp0>::ThreadblockB2bMma;
static const int kPartitionsK1 = ThreadblockShape1::kK / WarpShape1::kK;
/// Define the epilogue
using Epilogue =
typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape1, typename B2bMma::Operator1, kPartitionsK1, EpilogueOutputOp1,
EpilogueOutputOp1::kCount>::Epilogue;
/// Define the kernel-level GEMM operator.
using UnderlyingB2bGemmKernel = kernel::B2bGemm<B2bMma, Epilogue, ThreadblockSwizzle>;
using B2bGemmKernel = kernel::GroupedKernel<UnderlyingB2bGemmKernel>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Turing Architecture
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape0,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape1,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape0,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape1,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp0,
/// Epilogue output operator
typename EpilogueOutputOp1,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Operation performed by GEMM
typename Operator
>
struct DefaultB2bGemm<
ElementA, LayoutA, kAlignmentA,
ElementB, LayoutB, kAlignmentB,
ElementC, layout::RowMajor,
ElementAccumulator,
arch::OpClassTensorOp,
arch::Sm75,
ThreadblockShape0,
ThreadblockShape1,
WarpShape0,
WarpShape1,
InstructionShape,
EpilogueOutputOp0,
EpilogueOutputOp1,
ThreadblockSwizzle,
2,
Operator,
false,
typename platform::enable_if<!IsGroupedSwizzle<ThreadblockSwizzle>::value>::type
> {
/// Define the threadblock-scoped matrix multiply-accumulate
using B2bMma = typename cutlass::gemm::threadblock::DefaultB2bMma<
ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementAccumulator,
layout::RowMajor,
arch::OpClassTensorOp,
arch::Sm75,
ThreadblockShape0,
ThreadblockShape1,
WarpShape0,
WarpShape1,
InstructionShape,
2,
Operator,
EpilogueOutputOp0
>::ThreadblockB2bMma;
static const int kPartitionsK1 = ThreadblockShape1::kK / WarpShape1::kK;
/// Define the epilogue
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape1,
typename B2bMma::Operator1,
kPartitionsK1,
EpilogueOutputOp1,
EpilogueOutputOp1::kCount
>::Epilogue;
/// Define the kernel-level GEMM operator.
using B2bGemmKernel = kernel::B2bGemm<B2bMma, Epilogue, ThreadblockSwizzle>;
};
/// Partial specialization for Ampere Integer Matrix Multiply Interleaved layout
template <
/// Element type for A matrix operand
typename ElementA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape0,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape1,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape0,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape1,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp0,
/// Epilogue output operator
typename EpilogueOutputOp1,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Number of Interleaved k
int InterleavedK,
/// Operation performed by GEMM
typename Operator>
struct DefaultB2bGemm<
ElementA, layout::ColumnMajorInterleaved<InterleavedK>, kAlignmentA,
ElementB, layout::RowMajorInterleaved<InterleavedK>, kAlignmentB,
ElementC, layout::ColumnMajorInterleaved<InterleavedK>, int32_t,
arch::OpClassTensorOp, arch::Sm80,
ThreadblockShape0, ThreadblockShape1, WarpShape0, WarpShape1,
InstructionShape, EpilogueOutputOp0, EpilogueOutputOp1,
ThreadblockSwizzle, Stages,
Operator, false, typename platform::enable_if<!IsGroupedSwizzle<ThreadblockSwizzle>::value>::type> {
using LayoutA = layout::ColumnMajorInterleaved<InterleavedK>;
using LayoutB = layout::RowMajorInterleaved<InterleavedK>;
using LayoutC = layout::ColumnMajorInterleaved<InterleavedK>;
using ElementAccumulator = int32_t;
/// Define the threadblock-scoped matrix multiply-accumulate
using B2bMma = typename cutlass::gemm::threadblock::DefaultB2bMma<
ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB,
ElementAccumulator, LayoutC, arch::OpClassTensorOp, arch::Sm80,
ThreadblockShape0, ThreadblockShape1, WarpShape0, WarpShape1,
InstructionShape, Stages, Operator, EpilogueOutputOp0,
true>::ThreadblockB2bMma;
static const int kPartitionsK1 = ThreadblockShape1::kK / WarpShape1::kK;
/// Define the epilogue
using Epilogue = typename cutlass::epilogue::threadblock::
DefaultInterleavedEpilogueTensorOp<
ThreadblockShape1, typename B2bMma::Operator1, kPartitionsK1, EpilogueOutputOp1,
64 / sizeof_bits<ElementC>::value, InterleavedK>::Epilogue;
/// Define the kernel-level GEMM operator.
using B2bGemmKernel = kernel::B2bGemm<B2bMma, Epilogue, ThreadblockSwizzle>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Turing Integer Tensor Core Interleaved layout
template <
/// Element type for A matrix operand
typename ElementA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape0,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape1,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape0,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape1,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp0,
/// Epilogue output operator
typename EpilogueOutputOp1,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of Interleaved k
int InterleavedK,
/// Operation performed by GEMM
typename Operator>
struct DefaultB2bGemm<ElementA, layout::ColumnMajorInterleaved<InterleavedK>,
kAlignmentA, ElementB,
layout::RowMajorInterleaved<InterleavedK>, kAlignmentB,
ElementC, layout::ColumnMajorInterleaved<InterleavedK>,
int32_t, arch::OpClassTensorOp, arch::Sm75,
ThreadblockShape0, ThreadblockShape1, WarpShape0, WarpShape1,
InstructionShape, EpilogueOutputOp0, EpilogueOutputOp1,
ThreadblockSwizzle, 2, Operator, false,
typename platform::enable_if<!IsGroupedSwizzle<ThreadblockSwizzle>::value>::type> {
using LayoutA = layout::ColumnMajorInterleaved<InterleavedK>;
using LayoutB = layout::RowMajorInterleaved<InterleavedK>;
using LayoutC = layout::ColumnMajorInterleaved<InterleavedK>;
using ElementAccumulator = int32_t;
/// Define the threadblock-scoped matrix multiply-accumulate
using B2bMma = typename cutlass::gemm::threadblock::DefaultB2bMma<
ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementAccumulator, LayoutC,
arch::OpClassTensorOp, arch::Sm75, ThreadblockShape0, ThreadblockShape1,
WarpShape0, WarpShape1, InstructionShape, 2, Operator, EpilogueOutputOp0, true>::ThreadblockB2bMma;
static const int kPartitionsK1 = ThreadblockShape1::kK / WarpShape1::kK;
/// Define the epilogue for the 2nd Gemm
using Epilogue = typename cutlass::epilogue::threadblock::
DefaultInterleavedEpilogueTensorOp<
ThreadblockShape1, typename B2bMma::Operator1, kPartitionsK1, EpilogueOutputOp1,
64 / sizeof_bits<ElementC>::value, InterleavedK>::Epilogue;
/// Define the kernel-level GEMM operator.
using B2bGemmKernel = kernel::B2bGemm<B2bMma, Epilogue, ThreadblockSwizzle>;
};
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
| examples/13_two_tensor_op_fusion/kernel/default_b2b_gemm.h/0 | {
"file_path": "examples/13_two_tensor_op_fusion/kernel/default_b2b_gemm.h",
"repo_id": "examples",
"token_count": 6675
} | 6 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/arch.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator_2dthreadtile.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm70.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm75.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm80.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_access_iterator.h"
#include "threadblock/b2b_mma_pipelined_smem_accumulator.h"
#include "threadblock/b2b_mma_multistage_smem_accumulator.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Specialization for row-major output with 2-stage pipeline
/// Accumulator will be staged in shared memory.
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape0,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape1,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape0,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape1,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Operation performed by GEMM
typename Operator,
/// Epilogue output operator
typename EpilogueOutputOp>
struct DefaultB2bMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator, layout::RowMajor,
arch::OpClassTensorOp, ArchTag,
ThreadblockShape0, ThreadblockShape1,
WarpShape0, WarpShape1,
InstructionShape, 2, Operator, EpilogueOutputOp, false, true> {
// Define the MmaCore components
using MmaCore0 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape0, WarpShape0, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator, layout::RowMajor,
arch::OpClassTensorOp, 2, Operator>;
using MmaCore1 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape1, WarpShape1, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator, layout::RowMajor,
arch::OpClassTensorOp, 2, Operator>;
// Define iterators over tiles from the A operand
using IteratorA0 =
cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore0::Shape::kM, MmaCore0::Shape::kK>,
ElementA, LayoutA, 1, typename MmaCore0::IteratorThreadMapA, kAlignmentA>;
// Define iterators over tiles from the B operand
using IteratorB0 =
cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore0::Shape::kK, MmaCore0::Shape::kN>,
ElementB, LayoutB, 0, typename MmaCore0::IteratorThreadMapB, kAlignmentB>;
// Define iterators over tiles from the B operand
using IteratorB1 =
cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore1::Shape::kK, MmaCore1::Shape::kN>,
ElementB, LayoutB, 0, typename MmaCore1::IteratorThreadMapB, kAlignmentB>;
// Warp-level GEMM components
using WarpMmaTensorOp0 = typename MmaCore0::MmaTensorOp;
using WarpMmaTensorOp1 = typename MmaCore1::MmaTensorOp;
// Use fragment iterator for the accumulator
using SmemAccumulatorLayout = cutlass::layout::RowMajor;
using FragmentIteratorAccumulator = cutlass::epilogue::warp::FragmentIteratorTensorOp<
WarpShape0, InstructionShape,
ElementAccumulator,
typename WarpMmaTensorOp0::Policy::Operator::FragmentC,
SmemAccumulatorLayout
>;
/// Define iterators over tiles from scale/bias vectors
using ElementScaleBias = typename EpilogueOutputOp::ElementCompute;
using LayoutScaleBias = layout::RowMajor; //vector layout doesn't really matter
static int const kElementsPerAccess = 2;
using IteratorAccumulatorScaleBias =
cutlass::transform::threadblock::VectorIterator<
cutlass::transform::threadblock::PredicatedVectorAccessIterator<
cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kN>,
cutlass::MatrixShape<WarpShape0::kM, WarpShape0::kN>,
ElementScaleBias, LayoutScaleBias, kElementsPerAccess>
>;
// Store Accumulator tiles to Shared Memory
using SmemIteratorD0 =
cutlass::epilogue::warp::TileIteratorTensorOp<
WarpShape0,
InstructionShape,
typename EpilogueOutputOp::ElementOutput,
SmemAccumulatorLayout
>;
static int const kThreadCount = 32;
// load warp tile from Shared Memory accumulator
using WarpIteratorA1 = cutlass::gemm::warp::MmaTensorOpMultiplicandTileAccessIterator<
MatrixShape<WarpShape1::kM, WarpShape1::kK>, cutlass::gemm::Operand::kA,
ElementA, SmemAccumulatorLayout,
MatrixShape<InstructionShape::kM, InstructionShape::kK>,
WarpMmaTensorOp1::Policy::OpDelta::kRow, kThreadCount, true>;
// Define the threadblock-scoped pipelined matrix multiply
using ThreadblockB2bMma = cutlass::gemm::threadblock::B2bMmaPipelinedSmemAccumulator<
typename MmaCore0::Shape, IteratorA0, typename MmaCore0::SmemIteratorA,
IteratorB0, typename MmaCore0::SmemIteratorB,
IteratorAccumulatorScaleBias,
FragmentIteratorAccumulator, SmemIteratorD0,
typename MmaCore1::Shape, WarpIteratorA1,
IteratorB1, typename MmaCore1::SmemIteratorB,
ElementAccumulator, layout::RowMajor,
EpilogueOutputOp,
typename MmaCore0::MmaPolicy, typename MmaCore1::MmaPolicy>;
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization for row-major output for multi-stage
/// Accumulator will be staged in shared memory.
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape0,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape1,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape0,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape1,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Number of stages used in the multistage mainloop
int Stages,
/// Operation performed by GEMM
typename Operator,
/// Epilogue output operator
typename EpilogueOutputOp>
struct DefaultB2bMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator, layout::RowMajor,
arch::OpClassTensorOp, ArchTag,
ThreadblockShape0, ThreadblockShape1,
WarpShape0, WarpShape1,
InstructionShape, Stages, Operator, EpilogueOutputOp, false, true> {
static cutlass::arch::CacheOperation::Kind const CacheOpA =
((sizeof_bits<ElementA>::value * kAlignmentA) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const CacheOpB =
((sizeof_bits<ElementB>::value * kAlignmentB) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
// Define the MmaCore components
using MmaCore0 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape0, WarpShape0, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
Stages, Operator, false, CacheOpA, CacheOpB>;
using MmaCore1 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape1, WarpShape1, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
Stages, Operator, false, CacheOpA, CacheOpB>;
// Define iterators over tiles from the A operand
using ThreadMapA0 = typename MmaCore0::IteratorThreadMapA;
using AccessTypeA0 = cutlass::Array<ElementA, kAlignmentA>;
using IteratorA0 =
cutlass::transform::threadblock::PredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kK>,
ElementA, LayoutA, 1, ThreadMapA0, AccessTypeA0>;
// Define iterators over tiles from the B operand
using ThreadMapB0 = typename MmaCore0::IteratorThreadMapB;
using AccessTypeB0 = cutlass::Array<ElementB, kAlignmentB>;
using IteratorB0 =
cutlass::transform::threadblock::PredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape0::kK, ThreadblockShape0::kN>,
ElementB, LayoutB, 0, ThreadMapB0, AccessTypeB0>;
// Define iterators over tiles from the B operand
using ThreadMapB1 = typename MmaCore1::IteratorThreadMapB;
using AccessTypeB1 = cutlass::Array<ElementB, kAlignmentB>;
using IteratorB1 =
cutlass::transform::threadblock::PredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape1::kK, ThreadblockShape1::kN>,
ElementB, LayoutB, 0, ThreadMapB1, AccessTypeB1>;
// Warp-level GEMM components
using WarpMmaTensorOp0 = typename MmaCore0::MmaTensorOp;
using WarpMmaTensorOp1 = typename MmaCore1::MmaTensorOp;
// Use fragment iterator for the accumulator
using SmemAccumulatorLayout = cutlass::layout::RowMajor;
using FragmentIteratorAccumulator = cutlass::epilogue::warp::FragmentIteratorTensorOp<
WarpShape0, InstructionShape,
ElementAccumulator,
typename WarpMmaTensorOp0::Policy::Operator::FragmentC,
SmemAccumulatorLayout
>;
/// Define iterators over tiles from scale/bias vectors
using ElementScaleBias = typename EpilogueOutputOp::ElementCompute;
using LayoutScaleBias = layout::RowMajor; //vector layout doesn't really matter
static int const kElementsPerAccess = 2;
using IteratorAccumulatorScaleBias =
cutlass::transform::threadblock::VectorIterator<
cutlass::transform::threadblock::PredicatedVectorAccessIterator<
cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kN>,
cutlass::MatrixShape<WarpShape0::kM, WarpShape0::kN>,
ElementScaleBias, LayoutScaleBias, kElementsPerAccess>
>;
// Store Accumulator tiles to Shared Memory
using SmemIteratorD0 =
cutlass::epilogue::warp::TileIteratorTensorOp<
WarpShape0,
InstructionShape,
typename EpilogueOutputOp::ElementOutput,
SmemAccumulatorLayout
>;
static int const kThreadCount = 32;
// load warp tile from Shared Memory accumulator
using WarpIteratorA1 = cutlass::gemm::warp::MmaTensorOpMultiplicandTileAccessIterator<
MatrixShape<WarpShape1::kM, WarpShape1::kK>, cutlass::gemm::Operand::kA,
ElementA, SmemAccumulatorLayout,
MatrixShape<InstructionShape::kM, InstructionShape::kK>,
WarpMmaTensorOp1::Policy::OpDelta::kRow, kThreadCount, true>;
// Define the threadblock-scoped pipelined matrix multiply
using ThreadblockB2bMma = cutlass::gemm::threadblock::B2bMmaMultistageSmemAccumulator<
typename MmaCore0::Shape, IteratorA0, typename MmaCore0::SmemIteratorA,
MmaCore0::kCacheOpA,
IteratorB0, typename MmaCore0::SmemIteratorB, MmaCore0::kCacheOpB,
IteratorAccumulatorScaleBias,
FragmentIteratorAccumulator, SmemIteratorD0,
typename MmaCore1::Shape, WarpIteratorA1,
IteratorB1, typename MmaCore1::SmemIteratorB, MmaCore1::kCacheOpB,
ElementAccumulator, layout::RowMajor,
EpilogueOutputOp,
typename MmaCore0::MmaPolicy, typename MmaCore1::MmaPolicy, Stages>;
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization for column-major-interleaved output with 2-stage pipeline
/// Accumulator will be staged in shared memory.
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Tag indicating architecture to tune for
typename OperatorClass,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape0,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape1,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape0,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape1,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Operation performed by GEMM
typename Operator,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Number of Interleaved K
int InterleavedK>
struct DefaultB2bMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator,
layout::ColumnMajorInterleaved<InterleavedK>, OperatorClass, arch::Sm75,
ThreadblockShape0, ThreadblockShape1, WarpShape0, WarpShape1,
InstructionShape, 2, Operator, EpilogueOutputOp, true, true> {
// Define the MmaCore components
using MmaCore0 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape0, WarpShape0, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator,
layout::ColumnMajorInterleaved<InterleavedK>, OperatorClass, 2, Operator,
true>;
using MmaCore1 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape1, WarpShape1, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator,
layout::ColumnMajorInterleaved<InterleavedK>, OperatorClass, 2, Operator,
true>;
static_assert(kAlignmentA == 128 / sizeof_bits<ElementA>::value,
"Alignment must match thread data map's vector length");
static_assert(kAlignmentB ==128 / sizeof_bits<ElementB>::value,
"Alignment must match thread data map's vector length");
// Define iterators over tiles from the A operand
using IteratorA0 = cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore0::Shape::kM, MmaCore0::Shape::kK>, ElementA,
LayoutA, 1, typename MmaCore0::IteratorThreadMapA>;
// Define iterators over tiles from the B operand
using IteratorB0 = cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore0::Shape::kK, MmaCore0::Shape::kN>, ElementB,
LayoutB, 0, typename MmaCore0::IteratorThreadMapB>;
// Define iterators over tiles from the B operand
using IteratorB1 =
cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore1::Shape::kK, MmaCore1::Shape::kN>,
ElementB, LayoutB, 0, typename MmaCore1::IteratorThreadMapB>;
// Warp-level GEMM components
using WarpMmaTensorOp0 = typename MmaCore0::MmaTensorOp;
using WarpMmaTensorOp1 = typename MmaCore1::MmaTensorOp;
// Use fragment iterator for the accumulator
using SmemAccumulatorLayout = cutlass::layout::ColumnMajorInterleaved<16>;
using FragmentIteratorAccumulator = cutlass::epilogue::warp::FragmentIteratorTensorOp<
WarpShape0, InstructionShape,
ElementAccumulator,
typename WarpMmaTensorOp0::Policy::Operator::FragmentC,
SmemAccumulatorLayout
>;
/// Define iterators over tiles from scale/bias vectors
using ElementScaleBias = typename EpilogueOutputOp::ElementCompute;
using LayoutScaleBias = layout::RowMajor; //vector layout doesn't really matter
static int const kElementsPerAccess = 4; //For interleaved layout
using IteratorAccumulatorScaleBias =
cutlass::transform::threadblock::VectorIterator<
cutlass::transform::threadblock::PredicatedVectorAccessIterator<
cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kN>,
cutlass::MatrixShape<WarpShape0::kM, WarpShape0::kN>,
ElementScaleBias, LayoutScaleBias, kElementsPerAccess>
>;
// Store Accumulator tiles to Shared Memory
using SmemIteratorD0 =
cutlass::epilogue::warp::TileIteratorTensorOp<
WarpShape0,
InstructionShape,
typename EpilogueOutputOp::ElementOutput,
SmemAccumulatorLayout
>;
static int const kThreadCount = 32;
// load warp tile from Shared Memory accumulator
using WarpIteratorA1 = cutlass::gemm::warp::MmaTensorOpMultiplicandTileAccessIterator<
MatrixShape<WarpShape1::kM, WarpShape1::kK>, cutlass::gemm::Operand::kA,
ElementA, SmemAccumulatorLayout,
MatrixShape<InstructionShape::kM, InstructionShape::kK>,
WarpMmaTensorOp1::Policy::OpDelta::kRow, kThreadCount, true>;
// Define the threadblock-scoped pipelined matrix multiply
using ThreadblockB2bMma = cutlass::gemm::threadblock::B2bMmaPipelinedSmemAccumulator<
typename MmaCore0::Shape, IteratorA0, typename MmaCore0::SmemIteratorA,
IteratorB0, typename MmaCore0::SmemIteratorB,
IteratorAccumulatorScaleBias,
FragmentIteratorAccumulator, SmemIteratorD0,
typename MmaCore1::Shape, WarpIteratorA1,
IteratorB1, typename MmaCore1::SmemIteratorB,
ElementAccumulator, layout::ColumnMajorInterleaved<InterleavedK>,
EpilogueOutputOp,
typename MmaCore0::MmaPolicy, typename MmaCore1::MmaPolicy>;
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization for column-major-interleaved output with multi-stage
/// Accumulator will be staged in shared memory.
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Tag indicating architecture to tune for
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape0,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape1,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape0,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape1,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Number of stages used in the multistage mainloop
int Stages,
/// Operation performed by GEMM
typename Operator,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Number of Interleaved K
int InterleavedK>
struct DefaultB2bMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator,
layout::ColumnMajorInterleaved<InterleavedK>, OperatorClass, ArchTag,
ThreadblockShape0, ThreadblockShape1, WarpShape0, WarpShape1,
InstructionShape, Stages, Operator, EpilogueOutputOp, true, true> {
// Define the MmaCore components
using MmaCore0 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape0, WarpShape0, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator,
layout::ColumnMajorInterleaved<InterleavedK>, OperatorClass, Stages,
Operator, true>;
using MmaCore1 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape1, WarpShape1, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator,
layout::ColumnMajorInterleaved<InterleavedK>, OperatorClass, Stages,
Operator, true>;
// Define iterators over tiles from the A operand
using ThreadMapA0 = typename MmaCore0::IteratorThreadMapA;
using AccessTypeA = cutlass::Array<ElementA, kAlignmentA>;
using IteratorA0 =
cutlass::transform::threadblock::PredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kK>,
ElementA, LayoutA, 1, ThreadMapA0, AccessTypeA>;
// Define iterators over tiles from the B operand
using ThreadMapB0 = typename MmaCore0::IteratorThreadMapB;
using AccessTypeB = cutlass::Array<ElementB, kAlignmentB>;
using IteratorB0 =
cutlass::transform::threadblock::PredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape1::kK, ThreadblockShape1::kN>,
ElementB, LayoutB, 0, ThreadMapB0, AccessTypeB>;
// Define iterators over tiles from the B operand
using ThreadMapB1 = typename MmaCore1::IteratorThreadMapB;
using IteratorB1 =
cutlass::transform::threadblock::PredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape1::kK, ThreadblockShape1::kN>,
ElementB, LayoutB, 0, ThreadMapB1, AccessTypeB>;
// Warp-level GEMM components
using WarpMmaTensorOp0 = typename MmaCore0::MmaTensorOp;
using WarpMmaTensorOp1 = typename MmaCore1::MmaTensorOp;
using MmaPolicy0 = typename MmaCore0::MmaPolicy;
using MmaPolicy1 = typename MmaCore1::MmaPolicy;
// Use fragment iterator for the accumulator
using SmemAccumulatorLayout = cutlass::layout::ColumnMajorInterleaved<16>;
using FragmentIteratorAccumulator = cutlass::epilogue::warp::FragmentIteratorTensorOp<
WarpShape0, InstructionShape,
ElementAccumulator,
typename WarpMmaTensorOp0::Policy::Operator::FragmentC,
SmemAccumulatorLayout
>;
/// Define iterators over tiles from scale/bias vectors
using ElementScaleBias = typename EpilogueOutputOp::ElementCompute;
using LayoutScaleBias = layout::RowMajor; //vector layout doesn't really matter
static int const kElementsPerAccess = 4;
using IteratorAccumulatorScaleBias =
cutlass::transform::threadblock::VectorIterator<
cutlass::transform::threadblock::PredicatedVectorAccessIterator<
cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kN>,
cutlass::MatrixShape<WarpShape0::kM, WarpShape0::kN>,
ElementScaleBias, LayoutScaleBias, kElementsPerAccess>
>;
// Store Accumulator tiles to Shared Memory
using SmemIteratorD0 =
cutlass::epilogue::warp::TileIteratorTensorOp<
WarpShape0,
InstructionShape,
typename EpilogueOutputOp::ElementOutput,
SmemAccumulatorLayout
>;
static int const kThreadCount = 32;
// load warp tile from Shared Memory accumulator
using WarpIteratorA1 = cutlass::gemm::warp::MmaTensorOpMultiplicandTileAccessIterator<
MatrixShape<WarpShape1::kM, WarpShape1::kK>, cutlass::gemm::Operand::kA,
ElementA, SmemAccumulatorLayout,
MatrixShape<InstructionShape::kM, InstructionShape::kK>,
WarpMmaTensorOp1::Policy::OpDelta::kRow, kThreadCount, true >;
// Define the threadblock-scoped multistage matrix multiply
using ThreadblockB2bMma = cutlass::gemm::threadblock::B2bMmaMultistageSmemAccumulator<
typename MmaCore0::Shape, IteratorA0, typename MmaCore0::SmemIteratorA,
MmaCore0::kCacheOpA,
IteratorB0, typename MmaCore0::SmemIteratorB, MmaCore0::kCacheOpB,
IteratorAccumulatorScaleBias,
FragmentIteratorAccumulator, SmemIteratorD0,
typename MmaCore1::Shape, WarpIteratorA1,
IteratorB1, typename MmaCore1::SmemIteratorB, MmaCore1::kCacheOpB,
ElementAccumulator, layout::ColumnMajorInterleaved<InterleavedK>,
EpilogueOutputOp,
typename MmaCore0::MmaPolicy, typename MmaCore1::MmaPolicy, Stages>;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| examples/13_two_tensor_op_fusion/threadblock/default_b2b_mma_smem_accumulator.h/0 | {
"file_path": "examples/13_two_tensor_op_fusion/threadblock/default_b2b_mma_smem_accumulator.h",
"repo_id": "examples",
"token_count": 9543
} | 7 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
This example requires NVIDIA Maxwell GPU or beyond.
*/
// Standard Library includes
#include <iostream>
#include <sstream>
#include <vector>
// CUTLASS Includes
#include "cutlass/cutlass.h"
#include "cutlass/core_io.h"
#include "cutlass/functional.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/warp/mma_simt.h"
#include "cutlass/epilogue/warp/fragment_iterator_simt.h"
#include "cutlass/epilogue/warp/tile_iterator_simt.h"
// CUTLASS Utility Includes
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/gemm_complex.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
// Define the overal warp-level problem shape
int const kM = 14;
int const kN = 27;
int const kK = 17;
///////////////////////////////////////////////////////////////////////////////////////////////////
// Define a warp-level GEMM operator.
//
// This template could be part of the CUTLASS Template Library or implemented internally. This
// wraps the matrix multiply operation and epilogue with a GEMM-like interface that can be
// instantiated in device code.
namespace cutlass {
namespace gemm {
namespace warp {
template <
typename Shape,
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementScalar
>
class GemmSimt {
public:
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<4, 8>,
cutlass::layout::RowMajorInterleaved<2>,
cutlass::gemm::GemmShape<4, 4, 1>
>;
using MmaWarp = cutlass::gemm::warp::MmaSimt<
cutlass::gemm::GemmShape<16, 32, 8>,
float,
cutlass::layout::RowMajor,
float,
cutlass::layout::ColumnMajor,
float,
cutlass::layout::RowMajor,
Policy
>;
// Number of 'K groups'
int const kKgroups = Shape::kK;
using FragmentIterator = cutlass::epilogue::warp::FragmentIteratorSimt<
typename MmaWarp::Shape,
typename MmaWarp::ThreadMma,
layout::RowMajor, // SMEM layout
typename MmaWarp::Policy
>;
using AccumulatorTileIterator = cutlass::epilogue::warp::TileIteratorSimtCanonical<
typename MmaWarp::Shape,
typename MmaWarp::ThreadMma,
float, // ElementAccumulator
layout::RowMajor, // SMEM layout
typename MmaWarp::Policy
>;
using TensorRefA = typename MmaWarp::IteratorA::TensorRef;
using TensorRefB = typename MmaWarp::IteratorB::TensorRef;
using TensorRefC = typename AccumulatorTileIterator::TensorRef;
public:
CUTLASS_HOST_DEVICE
GemmSimt() { }
CUTLASS_DEVICE
void operator()(
ElementScalar alpha,
TensorRefA ref_A,
TensorRefB ref_B,
ElementScalar beta,
TensorRefC ref_C,
TensorRefC ref_D,
int lane_id) const {
// Instantiate iterators pointing to slices of the A and B matrices in shared memory
typename MmaWarp::IteratorA iter_A(ref_A, {Shape::kM, Shape::kK}, lane_id);
typename MmaWarp::IteratorB iter_B(ref_B, {Shape::kK, Shape::kN}, lane_id);
// Instantiate and clear accumulator tile holding the C matrix
typename MmaWarp::FragmentC accum;
accum.clear();
// Instantiate the warp-level matrix multiply operator
MmaWarp mma_op;
// Instantiate fragments holding the slice of the matrix held by each warp
typename MmaWarp::FragmentA frag_A[2];
typename MmaWarp::FragmentB frag_B[2];
// Load fragments from shared memory
iter_A.load(frag_A[0]);
iter_B.load(frag_B[0]);
++iter_A;
++iter_B;
// Load fragments from shared memory
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < kKgroups; ++k) {
// Load fragments from shared memory
iter_A.load(frag_A[(k + 1) % 2]);
iter_B.load(frag_B[(k + 1) % 2]);
++iter_A;
++iter_B;
// Compute the matrix multiply
mma_op(accum, frag_A[k % 2], frag_B[k % 2], accum);
}
// Instantiate iterators
FragmentIterator accum_frag_it(accum);
AccumulatorTileIterator source_tile_it(ref_C, {Shape::kM, Shape::kN}, lane_id);
AccumulatorTileIterator dest_tile_it(ref_D, {Shape::kM, Shape::kN}, lane_id);
// Define function objects for linear scaling operation
cutlass::multiplies<typename FragmentIterator::Fragment> mul_source;
cutlass::multiply_add<typename FragmentIterator::Fragment> mul_add_accumulator;
// Iterate over the epilogue components
CUTLASS_PRAGMA_UNROLL
for (int idx = 0; idx < FragmentIterator::kIterations; ++idx) {
// Define storage for slices of the accumulators
typename FragmentIterator::Fragment accum_fragment;
typename FragmentIterator::Fragment source_fragment;
// Select a slice of accumulators from the accumulator tile
accum_frag_it.load(accum_fragment);
++accum_frag_it;
// Load a corresponding slice from Shared memory
source_tile_it.load(source_fragment);
++source_tile_it;
// Compute linear scaling - alpha * AB + beta * C
source_fragment = mul_source(beta, source_fragment);
accum_fragment = mul_add_accumulator(alpha, accum_fragment, source_fragment);
// Store the result to shared memory
dest_tile_it.store(accum_fragment);
++dest_tile_it;
}
}
};
} // namespace warp
} // namespace gemm
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
// Sample kernel demonstrating a collective GEMM operation by a warp on arbitrary matrices held
// in Shared Memory.
__global__ void kernel(
float *D_gmem,
float alpha,
float const *A_gmem,
float const *B_gmem,
float beta,
float const *C_gmem) {
// Define several matrices in shared memory
__shared__ float A[kM][kK];
__shared__ float B[kN][kK];
__shared__ float C[kM][kN];
// Copy data into SMEM
if (threadIdx.x == 0) {
CUTLASS_PRAGMA_NO_UNROLL
for (int m = 0; m < kM; ++m) {
for (int k = 0; k < kK; ++k) {
A[m][k] = A_gmem[m * kK + k];
}
}
CUTLASS_PRAGMA_NO_UNROLL
for (int n = 0; n < kN; ++n) {
for (int k = 0; k < kK; ++k) {
B[n][k] = B_gmem[n * kK + k];
}
}
CUTLASS_PRAGMA_NO_UNROLL
for (int m = 0; m < kM; ++m) {
CUTLASS_PRAGMA_NO_UNROLL
for (int n = 0; n < kN; ++n) {
C[m][n] = C_gmem[m * kN + n];
}
}
}
__syncthreads();
//
// Instantiate a warp-level matrix multiply operator given the fundamental instruction shape (8x8x4),
// overall shape, data type of each operand, and layout of each operand.
//
using GemmSimt = cutlass::gemm::warp::GemmSimt<
cutlass::gemm::GemmShape<kM, kN, kK>,
float, // Data type of A elements
cutlass::layout::RowMajor, // Layout of A matrix
float, // Data type of B elements
cutlass::layout::ColumnMajor, // Layout of B matrix
float, // Data type of C elements
cutlass::layout::RowMajor, // Layout of C matrix
float // Scalar type of alpha and beta
>;
// Instantiate the GEMM operator
GemmSimt gemm;
// Execute the warp-level GEMM operation
gemm(
alpha,
{&A[0][0], kK},
{&B[0][0], kK},
beta,
{&C[0][0], kN},
{&C[0][0], kN},
threadIdx.x);
__syncthreads();
// Copy data into SMEM
if (threadIdx.x == 0) {
CUTLASS_PRAGMA_NO_UNROLL
for (int m = 0; m < kM; ++m) {
CUTLASS_PRAGMA_NO_UNROLL
for (int n = 0; n < kN; ++n) {
D_gmem[m * kN + n] = C[m][n];
}
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, const char *arg[]) {
cutlass::HostTensor<float, cutlass::layout::RowMajor> A({kM, kK});
cutlass::HostTensor<float, cutlass::layout::ColumnMajor> B({kK, kN});
cutlass::HostTensor<float, cutlass::layout::RowMajor> C({kM, kN});
cutlass::HostTensor<float, cutlass::layout::RowMajor> D({kM, kN});
uint64_t seed = 2020;
float max = 8;
float min = -8;
std::cout << "Simt canonical GEMM problem size = (" << cutlass::gemm::GemmShape<kM, kN, kK>() <<")" << std::endl;
cutlass::reference::host::TensorFillRandomUniform(
A.host_view(),
seed,
max,
min,
0
);
cutlass::reference::host::TensorFillRandomUniform(
B.host_view(),
seed + 17,
max,
min,
0
);
#if 0 // Debug: fill A sequentially and B as Identity matrix for debugging
cutlass::reference::host::BlockFillSequential(
A.host_view().data(), A.host_view().capacity());
cutlass::reference::host::TensorFillIdentity(B.host_view());
#endif
cutlass::reference::host::TensorFillRandomUniform(
C.host_view(),
seed + 31,
max,
min,
0
);
A.sync_device();
B.sync_device();
C.sync_device();
D.sync_device();
dim3 grid(1, 1);
dim3 block(32, 1, 1);
float alpha = 1.0f;
float beta = 0.0f;
kernel<<< grid, block >>>(
D.device_data(),
alpha,
A.device_data(),
B.device_data(),
beta,
C.device_data()
);
cudaError_t result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
std::cerr << "Failed to synchronize device after kernel launch." << std::endl;
return -1;
}
D.sync_host();
// Compute reference on host
cutlass::HostTensor<float, cutlass::layout::RowMajor> D_ref({kM, kN}, false);
cutlass::reference::host::TensorCopy(D_ref.host_view(), C.host_view());
cutlass::reference::host::Gemm<
float, cutlass::layout::RowMajor,
float, cutlass::layout::ColumnMajor,
float, cutlass::layout::RowMajor,
float, float> reference_gemm;
reference_gemm(
{kM, kN, kK},
alpha,
A.host_ref(),
B.host_ref(),
beta,
D_ref.host_ref(),
float()
);
// Verify reference matches computed
if (!cutlass::reference::host::TensorEquals(
D.host_view(),
D_ref.host_view())) {
std::cerr
<< "A =\n" << A.host_view()
<< "\n\nB = \n" << B.host_view()
<< "\n\nC = " << C.host_view()
<< "\n\nRef =\n" << D_ref.host_view()
<< "\n\nD =\n" << D.host_view() << "\n\n";
std::cerr << "Error - device results mismatch host reference." << std::endl;
return -1;
}
std::cout << "Passed" << std::endl;
return 0;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
| examples/20_simt_canonical/simt_canonical.cu/0 | {
"file_path": "examples/20_simt_canonical/simt_canonical.cu",
"repo_id": "examples",
"token_count": 4815
} | 8 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief GEMM kernel to support the epilogue visitor model
for customized softmax partial reduction epilogue fusion.
This source file will likely be moved to `include/cutlass/gemm/kernel/` in the future once
its usage has been stabilized. For now, it is included in this example to demonstrate
some basic output fusion options.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/semaphore.h"
#include "cutlass/trace.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_ ///! Threadblock swizzling function
>
struct GemmWithEpilogueVisitor {
public:
using Mma = Mma_;
using Epilogue = Epilogue_;
using EpilogueVisitor = typename Epilogue::Visitor;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using ElementA = typename Mma::IteratorA::Element;
using LayoutA = typename Mma::IteratorA::Layout;
using TensorRefA = TensorRef<ElementA, LayoutA>;
using ElementB = typename Mma::IteratorB::Element;
using LayoutB = typename Mma::IteratorB::Layout;
using TensorRefB = TensorRef<ElementB, LayoutB>;
using ElementC = typename EpilogueVisitor::ElementOutput;
using LayoutC = typename Epilogue::Layout;
using TensorRefC = TensorRef<ElementC, LayoutC>;
static ComplexTransform const kTransformA = Mma::kTransformA;
static ComplexTransform const kTransformB = Mma::kTransformB;
using Operator = typename Mma::Operator;
using OperatorClass = typename Mma::Operator::OperatorClass;
using ThreadblockShape = typename Mma::Shape;
using WarpShape = typename Mma::Operator::Shape;
using InstructionShape = typename Mma::Policy::Operator::InstructionShape;
using ArchTag = typename Mma::ArchTag;
using ElementNorm = typename EpilogueVisitor::ElementNorm;
using ElementSum = typename EpilogueVisitor::ElementSum;
static int const kStages = Mma::kStages;
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = EpilogueVisitor::kElementsPerAccess;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
/// Split-K preserves splits that are 128b aligned
static int const kSplitKAlignment = const_max(
128 / sizeof_bits<ElementA>::value,
128 / sizeof_bits<ElementB>::value
);
//
// Structures
//
/// Argument structure
struct Arguments {
//
// Data members
//
GemmUniversalMode mode;
GemmCoord problem_size;
int batch_count;
TensorRefA ref_A;
TensorRefB ref_B;
TensorRefC ref_C;
TensorRefC ref_D;
ElementNorm *ptr_Max;
ElementSum *ptr_Sum;
int64_t batch_stride_A;
int64_t batch_stride_B;
typename EpilogueVisitor::Arguments epilogue_visitor;
//
// Methods
//
Arguments():
mode(GemmUniversalMode::kGemm),
batch_count(1)
{ }
/// constructs an arguments structure
Arguments(
GemmUniversalMode mode_,
GemmCoord problem_size_,
int batch_count_,
TensorRefA ref_A_,
TensorRefB ref_B_,
TensorRefC ref_C_,
TensorRefC ref_D_,
ElementNorm *ptr_Max_,
ElementSum *ptr_Sum_,
int64_t batch_stride_A_,
int64_t batch_stride_B_,
typename EpilogueVisitor::Arguments epilogue_visitor_
):
mode(mode_),
problem_size(problem_size_),
batch_count(batch_count_),
ref_A(ref_A_),
ref_B(ref_B_),
ref_C(ref_C_),
ref_D(ref_D_),
ptr_Max(ptr_Max_),
ptr_Sum(ptr_Sum_),
batch_stride_A(batch_stride_A_),
batch_stride_B(batch_stride_B_),
epilogue_visitor(epilogue_visitor_)
{
}
};
//
// Structure for precomputing values in host memory and passing to kernels
//
/// Parameters structure
struct Params {
cutlass::gemm::GemmCoord problem_size;
cutlass::gemm::GemmCoord grid_tiled_shape;
int swizzle_log_tile;
typename Mma::IteratorA::Params params_A;
typename Mma::IteratorB::Params params_B;
typename EpilogueVisitor::OutputTileIterator::Params params_C;
typename EpilogueVisitor::OutputTileIterator::Params params_D;
GemmUniversalMode mode;
int batch_count;
int gemm_k_size;
void * ptr_A;
void * ptr_B;
ElementC * ptr_C;
ElementC * ptr_D;
ElementNorm * ptr_Max;
ElementSum * ptr_Sum;
int64_t batch_stride_A;
int64_t batch_stride_B;
typename EpilogueVisitor::Params epilogue_visitor;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params():
swizzle_log_tile(0),
params_A(0),
params_B(0),
params_C(0),
params_D(0),
batch_count(0),
gemm_k_size(0),
mode(cutlass::gemm::GemmUniversalMode::kGemm),
ptr_A(nullptr),
ptr_B(nullptr),
ptr_C(nullptr),
ptr_D(nullptr),
ptr_Max(nullptr),
ptr_Sum(nullptr),
batch_stride_A(0),
batch_stride_B(0)
{ }
Params(
Arguments const &args
):
problem_size(args.problem_size),
swizzle_log_tile(0),
params_A(args.ref_A.layout()),
params_B(args.ref_B.layout()),
params_C(args.ref_C.layout()),
params_D(args.ref_D.layout()),
mode(args.mode),
batch_count(args.batch_count),
gemm_k_size(args.problem_size.k()),
ptr_A(args.ref_A.data()),
ptr_B(args.ref_B.data()),
ptr_C(args.ref_C.data()),
ptr_D(args.ref_D.data()),
ptr_Max(args.ptr_Max),
ptr_Sum(args.ptr_Sum),
batch_stride_A(args.batch_stride_A),
batch_stride_B(args.batch_stride_B),
epilogue_visitor(args.epilogue_visitor)
{
ThreadblockSwizzle threadblock_swizzle;
grid_tiled_shape = threadblock_swizzle.get_tiled_shape(
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.batch_count);
if (args.mode == GemmUniversalMode::kGemm || args.mode == GemmUniversalMode::kGemmSplitKParallel) {
int const kAlignK = const_max(const_max(128 / sizeof_bits<ElementA>::value, 128 / sizeof_bits<ElementB>::value), 1);
gemm_k_size = round_up(ceil_div(args.problem_size.k(), args.batch_count), kAlignK);
if (gemm_k_size) {
grid_tiled_shape.k() = ceil_div(args.problem_size.k(), gemm_k_size);
}
}
swizzle_log_tile = threadblock_swizzle.get_log_tile(grid_tiled_shape);
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma::SharedStorage main_loop;
struct {
typename Epilogue::SharedStorage epilogue;
typename EpilogueVisitor::SharedStorage visitor;
} epilogue;
};
public:
//
// Methods
//
CUTLASS_DEVICE
GemmWithEpilogueVisitor() { }
/// Determines whether kernel satisfies alignment
static Status can_implement(
cutlass::gemm::GemmCoord const & problem_size) {
CUTLASS_TRACE_HOST("GemmWithEpilogueVisitor::can_implement()");
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
bool isAMisaligned = false;
bool isBMisaligned = false;
bool isCMisaligned = false;
if (platform::is_same<LayoutA, layout::RowMajor>::value) {
isAMisaligned = problem_size.k() % kAlignmentA;
} else if (platform::is_same<LayoutA, layout::ColumnMajor>::value) {
isAMisaligned = problem_size.m() % kAlignmentA;
} else if (platform::is_same<LayoutA, layout::ColumnMajorInterleaved<32>>::value
|| platform::is_same<LayoutA, layout::ColumnMajorInterleaved<64>>::value) {
isAMisaligned = problem_size.k() % kAlignmentA;
}
if (platform::is_same<LayoutB, layout::RowMajor>::value) {
isBMisaligned = problem_size.n() % kAlignmentB;
} else if (platform::is_same<LayoutB, layout::ColumnMajor>::value) {
isBMisaligned = problem_size.k() % kAlignmentB;
} else if (platform::is_same<LayoutB, layout::RowMajorInterleaved<32>>::value
|| platform::is_same<LayoutB, layout::RowMajorInterleaved<64>>::value) {
isBMisaligned = problem_size.k() % kAlignmentB;
}
if (platform::is_same<LayoutC, layout::RowMajor>::value) {
isCMisaligned = problem_size.n() % kAlignmentC;
} else if (platform::is_same<LayoutC, layout::ColumnMajor>::value) {
isCMisaligned = problem_size.m() % kAlignmentC;
} else if (platform::is_same<LayoutC, layout::ColumnMajorInterleaved<32>>::value
|| platform::is_same<LayoutC, layout::ColumnMajorInterleaved<64>>::value) {
isCMisaligned = problem_size.n() % kAlignmentC;
}
if (isAMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for A operand");
return Status::kErrorMisalignedOperand;
}
if (isBMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for B operand");
return Status::kErrorMisalignedOperand;
}
if (isCMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for C operand");
return Status::kErrorMisalignedOperand;
}
CUTLASS_TRACE_HOST(" returning kSuccess");
return Status::kSuccess;
}
static Status can_implement(Arguments const &args) {
return can_implement(args.problem_size);
}
#define SPLIT_K_ENABLED 1
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
int offset_k = 0;
int problem_size_k = params.problem_size.k();
ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A);
ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B);
#if SPLIT_K_ENABLED
//
// Fetch pointers based on mode.
//
if (params.mode == GemmUniversalMode::kGemm ||
params.mode == GemmUniversalMode::kGemmSplitKParallel) {
if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) {
problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size;
}
offset_k = threadblock_tile_offset.k() * params.gemm_k_size;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_A += threadblock_tile_offset.k() * params.batch_stride_A;
ptr_B += threadblock_tile_offset.k() * params.batch_stride_B;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_A = static_cast<ElementA * const *>(params.ptr_A)[threadblock_tile_offset.k()];
ptr_B = static_cast<ElementB * const *>(params.ptr_B)[threadblock_tile_offset.k()];
}
#endif
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
threadblock_tile_offset.m() * Mma::Shape::kM,
offset_k,
};
cutlass::MatrixCoord tb_offset_B{
offset_k,
threadblock_tile_offset.n() * Mma::Shape::kN
};
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(
params.params_A,
ptr_A,
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_A);
typename Mma::IteratorB iterator_B(
params.params_B,
ptr_B,
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_B);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
mma(
gemm_k_iterations,
accumulators,
iterator_A,
iterator_B,
accumulators);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.n() * Mma::Shape::kN
);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
//
// Construct the epilogue visitor
//
EpilogueVisitor epilogue_visitor(
params.epilogue_visitor,
shared_storage.epilogue.visitor,
params.problem_size.mn(),
thread_idx,
warp_idx,
lane_idx,
params.params_C,
params.params_D,
params.ptr_C,
params.ptr_D,
params.ptr_Max,
params.ptr_Sum,
threadblock_offset,
blockIdx.y *params.problem_size.m() );
if (params.mode == GemmUniversalMode::kGemm) {
// Indicate which position in a serial reduction the output operator is currently updating
epilogue_visitor.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
}
else if (params.mode == GemmUniversalMode::kBatched || params.mode == GemmUniversalMode::kArray) {
epilogue_visitor.set_batch_index(threadblock_tile_offset.k());
}
// Construct the epilogue
Epilogue epilogue(
shared_storage.epilogue.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Execute the epilogue operator to update the destination tensor.
epilogue(epilogue_visitor, accumulators);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/35_gemm_softmax/gemm_with_epilogue_visitor.h/0 | {
"file_path": "examples/35_gemm_softmax/gemm_with_epilogue_visitor.h",
"repo_id": "examples",
"token_count": 6415
} | 9 |
# Customizable Python Interface Examples
This directory contains examples of using the CUTLASS Python interface with a variety of configurations for kernels.
For all the tests, add `--print_cuda` to print the underlying CUDA kernel. Use `-h` or `--help` to display the help message.
## GEMM Examples
The GEMM examples use numpy to create input tensors and verify the results.
### GEMM F64 Example
Example 1: SM80_Device_Gemm_f64t_f64n_f64n_tensor_op_f64_32x32x16_16x16x16
```python
python gemm.py -i 8 8 4 -ta float64 -tb float64 -tc float64 -tacc float64 -m multiply_add -op TensorOp -b 32 32 16 -s 4 -w 2 2 1 -cc 80 -la ColumnMajor -aa 1 -lb RowMajor -ab 1 -lc RowMajor -ac 1 -te float64 -ep LinearCombination -sw IdentitySwizzle1 -p 512 256 128 -alpha 1.0 -beta 0.5 -gm Gemm -k 1
```
Example 2: SM80_Device_Gemm_f64n_f64t_f64n_tensor_op_f64_64x64x16_32x32x16, split_k(2)_serial
```python
python gemm.py -i 8 8 4 -ta float64 -tb float64 -tc float64 -tacc float64 -m multiply_add -op TensorOp -b 64 64 16 -s 4 -w 2 2 1 -cc 80 -la RowMajor -aa 1 -lb ColumnMajor -ab 1 -lc RowMajor -ac 1 -te float64 -ep LinearCombination -sw IdentitySwizzle1 -p 512 256 128 -alpha 1.0 -beta 0.5 -gm Gemm -k 2
```
### GEMM F32 Example
Example 1: SM80_Device_Gemm_f32n_f32t_f32n_tensor_op_bf16_f32_128x128x32_64x64x32
```python
python gemm.py -i 16 8 8 -ta float32 -tb float32 -tc float32 -tacc float32 -m multiply_add_fast_bf16 -op TensorOp -b 128 128 32 -s 3 -w 2 2 1 -cc 80 -la RowMajor -aa 4 -lb ColumnMajor -ab 4 -lc RowMajor -ac 4 -te float32 -ep LinearCombination -sw IdentitySwizzle1 -p 512 256 128 -alpha 1.0 -beta 0.5 -gm Gemm -k 1
```
Example 2: SM80_Device_Gemm_f32t_f32t_f32n_tensor_op_f32_128x128x32_64x64x32, split_k(2)_parallel
```python
python gemm.py -i 16 8 8 -ta float32 -tb float32 -tc float32 -tacc float32 -m multiply_add -op TensorOp -b 128 128 32 -s 3 -w 2 2 1 -cc 80 -la ColumnMajor -aa 4 -lb ColumnMajor -ab 4 -lc RowMajor -ac 4 -te float32 -ep LinearCombination -sw IdentitySwizzle1 -p 512 256 128 -alpha 1.0 -beta 0.5 -gm GemmSplitKParallel -k 2
```
Example 3: SM80_Device_Gemm_f32t_f32t_f32n_tensor_op_fast_accurate_f32_64x64x32_32x32x32, split_k(4)_serial
```python
python gemm.py -i 16 8 8 -ta float32 -tb float32 -tc float32 -tacc float32 -m multiply_add_fast_f32 -op TensorOp -b 64 64 32 -s 3 -w 2 2 1 -cc 80 -la ColumnMajor -aa 4 -lb ColumnMajor -ab 4 -lc RowMajor -ac 4 -te float32 -ep LinearCombination -sw IdentitySwizzle1 -p 512 256 128 -alpha 1.0 -beta 0.5 -gm Gemm -k 4
```
### GEMM F16 Example
Example 1: SM80_Device_Gemm_f32t_f32n_f32t_tensor_op_bf16_f32_128x128x32_64x64x32
```python
python gemm.py -i 16 8 16 -ta float16 -tb float16 -tc float32 -tacc float32 -m multiply_add -op TensorOp -b 128 128 32 -s 3 -w 2 2 1 -cc 80 -la ColumnMajor -aa 8 -lb RowMajor -ab 8 -lc ColumnMajor -ac 4 -te float32 -ep LinearCombination -sw IdentitySwizzle4 -p 512 256 128 -alpha 1.0 -beta 0.5 -gm Gemm -k 1
```
Example 2: SM80_Device_Gemm_f16t_f16t_f16n_tensor_op_f32_128x128x64_64x64x64, split_k(2)_serial
```python
python gemm.py -i 16 8 16 -ta float16 -tb float16 -tc float16 -tacc float32 -m multiply_add -op TensorOp -b 128 128 64 -s 3 -w 2 2 1 -cc 80 -la ColumnMajor -aa 8 -lb ColumnMajor -ab 8 -lc RowMajor -ac 8 -te float32 -ep LinearCombination -sw IdentitySwizzle2 -p 512 256 128 -alpha 1.0 -beta 0.5 -gm Gemm -k 2
```
Example 3: SM80_Device_Gemm_f16t_f16t_f32n_tensor_op_f32_256x128x64_64x64x64, split_k(3)_serial
```python
python gemm.py -i 16 8 16 -ta float16 -tb float16 -tc float32 -tacc float32 -m multiply_add -op TensorOp -b 256 128 64 -s 3 -w 4 2 1 -cc 80 -la ColumnMajor -aa 8 -lb ColumnMajor -ab 8 -lc RowMajor -ac 4 -te float32 -ep LinearCombination -sw IdentitySwizzle1 -p 512 256 128 -alpha 1.0 -beta 0.5 -gm GemmSplitKParallel -k 3
```
### GEMM BF16 Example
Example 1: Device_Gemm_bf16t_bf16t_f32n_tensor_op_f32_64x128x64_32x64x64, split_k(5)_parallel
```python
python gemm.py -i 16 8 16 -ta bfloat16 -tb bfloat16 -tc float32 -tacc float32 -m multiply_add -op TensorOp -b 64 128 64 -s 3 -w 2 2 1 -cc 80 -la ColumnMajor -aa 8 -lb ColumnMajor -ab 8 -lc RowMajor -ac 4 -te float32 -ep LinearCombination -sw IdentitySwizzle2 -p 512 256 128 -alpha 1.0 -beta 0.5 -gm GemmSplitKParallel -k 5
```
### GEMM Int8 Example
Example 1: SM80_Device_Gemm_s8n_s8t_s8n_tensor_op_s32_256x128x128_64x64x128
```python
python gemm.py -i 16 8 32 -ta int8 -tb int8 -tc int8 -tacc int32 -m multiply_add -op TensorOp -b 128 128 128 -s 3 -w 2 2 1 -cc 80 -la RowMajor -aa 16 -lb ColumnMajor -ab 16 -lc RowMajor -ac 16 -te float32 -ep FastLinearCombinationClamp -sw IdentitySwizzle2 -p 512 512 512 -alpha 1.0 -beta 0.0 -gm Gemm -k 1
```
### Batched & Array GEMM
Example 1: Batched GEMM
```python
python gemm.py -i 16 8 8 -ta float32 -tb float32 -tc float32 -tacc float32 -m multiply_add_fast_bf16 -op TensorOp -b 128 128 32 -s 3 -w 2 2 1 -cc 80 -la RowMajor -aa 4 -lb ColumnMajor -ab 4 -lc RowMajor -ac 4 -te float32 -ep LinearCombination -sw BatchedIdentitySwizzle -p 512 256 128 -alpha 1.0 -beta 0.5 -gm Batched -k 1 -batch 3
```
Example 2: Array GEMM
```python
python gemm.py -i 16 8 16 -ta float16 -tb float16 -tc float32 -tacc float32 -m multiply_add -op TensorOp -b 128 128 32 -s 3 -w 2 2 1 -cc 80 -la ColumnMajor -aa 8 -lb RowMajor -ab 8 -lc ColumnMajor -ac 4 -te float32 -ep LinearCombination -sw IdentitySwizzle4 -p 512 256 128 -alpha 1.0 -beta 0.5 -gm Array -k 1 -batch 2
```
***
## GEMM Grouped Examples
The GEMM Grouped examples use numpy to create input tensors and verify the results.
Example 1: SM80_Device_GemmGrouped_f16t_f16t_f32t_tensor_op_f32_128x128x32_64x64x32, device schedule
```python
python gemm_grouped.py -i 16 8 16 -ta float16 -tb float16 -tc float32 -tacc float32 -m multiply_add -op TensorOp -b 128 128 32 -s 3 -w 2 2 1 -cc 80 -la ColumnMajor -aa 8 -lb ColumnMajor -ab 8 -lc ColumnMajor -ac 4 -te float32 -ep LinearCombination -p ./grouped_gemm_problem_size.csv -alpha 1.0 -beta 0.0 -pm Device
```
Example 2: SM80_Device_GemmGrouped_f64n_f64n_f64t_tensor_op_f64_64x64x16_32x32x16, host schedule
```python
python gemm_grouped.py -i 8 8 4 -ta float64 -tb float64 -tc float64 -tacc float64 -m multiply_add -op TensorOp -b 64 64 16 -s 4 -w 2 2 1 -cc 80 -la RowMajor -aa 1 -lb RowMajor -ab 1 -lc ColumnMajor -ac 1 -te float64 -ep LinearCombination -p ./grouped_gemm_problem_size.csv -alpha 1.0 -beta 1.0 -pm Host
```
Example 3: SM80_Device_GemmGrouped_f32n_f32n_f32n_simt_f32_128x64x8_64x32x1, device schedule
```python
python gemm_grouped.py -i 1 1 1 -ta float32 -tb float32 -tc float32 -tacc float32 -m multiply_add -op Simt -b 128 64 8 -s 4 -w 2 2 1 -cc 80 -la RowMajor -aa 1 -lb RowMajor -ab 1 -lc RowMajor -ac 1 -te float32 -ep LinearCombination -p ./grouped_gemm_problem_size.csv -alpha 2.0 -beta 1.0 -pm Device
```
Example 4: SM80_Device_GemmGrouped_f16t_f16t_f32t_tensor_op_f32_128x128x32_64x64x32, device schedule
```python
python gemm_grouped.py -i 16 8 16 -ta float16 -tb float16 -tc float32 -tacc float32 -m multiply_add -op TensorOp -b 128 128 32 -s 3 -w 2 2 1 -cc 80 -la ColumnMajor -aa 8 -lb ColumnMajor -ab 8 -lc ColumnMajor -ac 4 -te float32 -ep LinearCombination -p ./grouped_gemm_problem_size.csv -alpha 2.0 -beta 1.0 -pm Device
```
***
## Conv2d Example
The Conv2d examples use pytorch to create input tensors and verify the results. Pytorch can be installed following the [official website](https://pytorch.org/get-started/locally/).
### Conv2d F32 Fprop
Example 1: SM80_Device_Conv2d_Fprop_Analytic_ImplicitGemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32
```python
python conv2d.py -i 16 8 8 -ta float32 -tb float32 -tc float32 -tacc float32 -m multiply_add -op TensorOp -b 128 128 16 -s 3 -w 2 2 1 -cc 80 -la TensorNHWC -aa 4 -lb TensorNHWC -ab 4 -lc TensorNHWC -ac 4 -te float32 -ep LinearCombination -sw IdentitySwizzle1 -co fprop -st Strided -ia optimized -sm Serial -k 1 -nhwc 1 13 17 8 -krsc 24 3 3 8 -pad 0 0 0 0 -stride 2 2 -dilation 1 1 -alpha 1.0 -beta 0.0
```
Example 2: SM80_Device_Conv2d_Fprop_Optimized_ImplicitGemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32_align2
```python
python conv2d.py -i 16 8 8 -ta float32 -tb float32 -tc float32 -tacc float32 -m multiply_add -op TensorOp -b 128 128 16 -s 3 -w 2 2 1 -cc 80 -la TensorNHWC -aa 2 -lb TensorNHWC -ab 2 -lc TensorNHWC -ac 2 -te float32 -ep LinearCombination -sw IdentitySwizzle2 -co fprop -st Strided -ia optimized -sm Serial -k 2 -nhwc 1 4 4 12 -krsc 8 3 3 12 -pad 0 0 0 0 -stride 3 3 -dilation 1 1 -alpha 1.0 -beta 1.0
```
Example 3: SM80_Device_Conv2d_Fprop_Analytic_ImplicitGemm_f32nhwc_f32nhwc_f32nhwc_simt_f32
```python
python conv2d.py -i 1 1 1 -ta float32 -tb float32 -tc float32 -tacc float32 -m multiply_add -op Simt -b 128 128 8 -s 4 -w 4 2 1 -cc 80 -la TensorNHWC -aa 4 -lb TensorNHWC -ab 4 -lc TensorNHWC -ac 1 -te float32 -ep LinearCombination -sw IdentitySwizzle4 -co fprop -st Strided -ia analytic -sm Parallel -k 3 -nhwc 1 71 80 32 -krsc 64 5 5 32 -pad 2 2 2 2 -stride 2 2 -dilation 1 1 -alpha 1.0 -beta 1.0
```
### Conv2d F32 Wgrad
Example 1: Device_Conv2d_Wgrad_Optimized_ImplicitGemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32_align1
```python
python conv2d.py -i 16 8 8 -ta float32 -tb float32 -tc float32 -tacc float32 -m multiply_add -op TensorOp -b 128 128 32 -s 3 -w 2 2 1 -cc 80 -la TensorNHWC -aa 1 -lb TensorNHWC -ab 1 -lc TensorNHWC -ac 4 -te float32 -ep LinearCombination -sw IdentitySwizzle1 -co wgrad -st Strided -ia optimized -sm Serial -k 1 -nhwc 1 8 8 1 -krsc 1 3 3 1 -pad 1 1 1 1 -stride 1 1 -dilation 1 1 -alpha 1.0 -beta 0.0
```
Example 2: Device_Conv2d_Wgrad_Analytic_ImplicitGemm_f32nhwc_f32nhwc_f32nhwc_simt_f32
```python
python conv2d.py -i 1 1 1 -ta float32 -tb float32 -tc float32 -tacc float32 -m multiply_add -op Simt -b 128 128 8 -s 4 -w 2 4 1 -cc 80 -la TensorNHWC -aa 4 -lb TensorNHWC -ab 4 -lc TensorNHWC -ac 1 -te float32 -ep LinearCombination -sw IdentitySwizzle1 -co wgrad -st Strided -ia optimized -sm Serial -k 2 -nhwc 1 27 27 256 -krsc 512 3 3 256 -pad 1 1 1 1 -stride 2 1 -dilation 1 1 -alpha 1.0 -beta 0.0
```
### Conv2d F32 Dgrad
Example 1: Device_Conv2d_Dgrad_Analytic_ImplicitGemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32
```python
python conv2d.py -i 16 8 8 -ta float32 -tb float32 -tc float32 -tacc float32 -m multiply_add -op TensorOp -b 128 128 16 -s 3 -w 2 2 1 -cc 80 -la TensorNHWC -aa 4 -lb TensorNHWC -ab 4 -lc TensorNHWC -ac 4 -te float32 -ep LinearCombination -sw StridedDgradIdentitySwizzle1 -co dgrad -st Strided -ia optimized -sm Serial -k 2 -nhwc 1 27 27 256 -krsc 512 3 3 256 -pad 1 1 1 1 -stride 2 1 -dilation 1 1 -alpha 1.0 -beta 0.0
```
### Conv2d F16 Fprop
Example 1: SM80_Device_Conv2d_Fprop_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f32
```python
python conv2d.py -i 16 8 16 -ta float16 -tb float16 -tc float16 -tacc float32 -m multiply_add -op TensorOp -b 128 128 64 -s 3 -w 2 2 1 -cc 80 -la TensorNHWC -aa 8 -lb TensorNHWC -ab 8 -lc TensorNHWC -ac 8 -te float32 -ep LinearCombination -sw IdentitySwizzle1 -co fprop -st Strided -ia optimized -sm Serial -k 1 -nhwc 1 27 27 256 -krsc 512 3 3 256 -pad 1 1 1 1 -stride 2 1 -dilation 1 1 -alpha 1.0 -beta 0.0
```
Example 2: SM80_Device_Conv2d_Fprop_Few_Channels_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f32_channels_2
```python
python conv2d.py -i 16 8 16 -ta float16 -tb float16 -tc float16 -tacc float32 -m multiply_add -op TensorOp -b 128 128 64 -s 3 -w 2 2 1 -cc 80 -la TensorNHWC -aa 2 -lb TensorNHWC -ab 2 -lc TensorNHWC -ac 8 -te float32 -ep LinearCombination -sw IdentitySwizzle1 -co fprop -st Strided -ia few_channels -sm Serial -k 1 -nhwc 1 16 16 2 -krsc 16 3 3 2 -pad 1 1 1 1 -stride 2 2 -dilation 1 1 -alpha 1.0 -beta 0.0
```
Example 3: SM80_Device_Conv2d_Fprop_Fixed_Channels_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f32_channels_8
```python
python conv2d.py -i 16 8 16 -ta float16 -tb float16 -tc float16 -tacc float32 -m multiply_add -op TensorOp -b 128 128 64 -s 3 -w 2 2 1 -cc 80 -la TensorNHWC -aa 8 -lb TensorNHWC -ab 8 -lc TensorNHWC -ac 8 -te float32 -ep LinearCombination -sw IdentitySwizzle2 -co fprop -st Strided -ia fixed_channels -sm Serial -k 1 -nhwc 1 8 8 8 -krsc 16 3 3 8 -pad 1 1 1 1 -stride 2 2 -dilation 1 1 -alpha 1.0 -beta 0.0
```
Example 4: SM80_Device_Conv2d_Strided_Dgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_128x128_32x3_64x64x32_align4
```python
python conv2d.py -i 16 8 16 -ta float16 -tb float16 -tc float16 -tacc float32 -m multiply_add -op TensorOp -b 128 128 32 -s 3 -w 2 2 1 -cc 80 -la TensorNHWC -aa 4 -lb TensorNHWC -ab 4 -lc TensorNHWC -ac 4 -te float32 -ep LinearCombination -sw StridedDgradIdentitySwizzle1 -co dgrad -st Strided -ia optimized -sm Serial -k 1 -nhwc 1 56 56 12 -krsc 8 1 1 12 -pad 0 0 0 0 -stride 2 2 -dilation 1 1 -alpha 1.0 -beta 0.0
```
## Epilogue
### Bias
To replace C with a bias vector, add `-bias` flag.
### Activation function
Example 1: ReLU
```python
python gemm.py -i 8 8 4 -ta float64 -tb float64 -tc float64 -tacc float64 -m multiply_add -op TensorOp -b 32 32 16 -s 4 -w 2 2 1 -cc 80 -la ColumnMajor -aa 1 -lb RowMajor -ab 1 -lc RowMajor -ac 1 -te float64 -ep LinearCombination -sw IdentitySwizzle1 -p 512 256 128 -alpha 1.0 -beta 0.5 -gm Gemm -k 1 -bias -activ relu
```
Example 2: leaky ReLU
```python
python gemm.py -i 8 8 4 -ta float64 -tb float64 -tc float64 -tacc float64 -m multiply_add -op TensorOp -b 64 64 16 -s 4 -w 2 2 1 -cc 80 -la RowMajor -aa 1 -lb ColumnMajor -ab 1 -lc RowMajor -ac 1 -te float64 -ep LinearCombination -sw IdentitySwizzle1 -p 512 256 128 -alpha 1.0 -beta 0.5 -gm Gemm -k 2 -bias -activ leaky_relu -activ_arg 0.2
```
Example 3: tanh (alpha=0 to avoid saturation)
```python
python gemm.py -i 16 8 8 -ta float32 -tb float32 -tc float32 -tacc float32 -m multiply_add -op TensorOp -b 128 128 32 -s 3 -w 2 2 1 -cc 80 -la ColumnMajor -aa 4 -lb ColumnMajor -ab 4 -lc RowMajor -ac 4 -te float32 -ep LinearCombination -sw IdentitySwizzle1 -p 512 256 128 -alpha 1.0 -beta 0.5 -gm GemmSplitKParallel -k 2 -bias -activ tanh
```
Example 4: sigmoid
```python
python gemm_grouped.py -i 8 8 4 -ta float64 -tb float64 -tc float64 -tacc float64 -m multiply_add -op TensorOp -b 64 64 16 -s 4 -w 2 2 1 -cc 80 -la RowMajor -aa 1 -lb RowMajor -ab 1 -lc ColumnMajor -ac 1 -te float64 -ep LinearCombination -p ./grouped_gemm_problem_size.csv -alpha 0.0 -beta 0.5 -pm Host -bias -activ sigmoid -bias -activ sigmoid
```
Example 5: SiLU
```python
python conv2d.py -i 16 8 8 -ta float32 -tb float32 -tc float32 -tacc float32 -m multiply_add -op TensorOp -b 128 128 16 -s 3 -w 2 2 1 -cc 80 -la TensorNHWC -aa 2 -lb TensorNHWC -ab 2 -lc TensorNHWC -ac 2 -te float32 -ep LinearCombination -sw IdentitySwizzle2 -co fprop -st Strided -ia optimized -sm Serial -k 2 -nhwc 1 4 4 12 -krsc 8 3 3 12 -pad 0 0 0 0 -stride 3 3 -dilation 1 1 -alpha 0.0 -beta 0.5 -bias -activ silu
```
Example 6: HardSwish
```python
python conv2d.py -i 16 8 16 -ta float16 -tb float16 -tc float16 -tacc float32 -m multiply_add -op TensorOp -b 128 128 64 -s 3 -w 2 2 1 -cc 80 -la TensorNHWC -aa 2 -lb TensorNHWC -ab 2 -lc TensorNHWC -ac 8 -te float32 -ep LinearCombination -sw IdentitySwizzle1 -co fprop -st Strided -ia few_channels -sm Serial -k 1 -nhwc 1 16 16 2 -krsc 16 3 3 2 -pad 1 1 1 1 -stride 2 2 -dilation 1 1 -alpha 0.0 -beta 0.5 -bias -activ hardswish
```
Example 7: GELU
```python
python gemm.py -i 16 8 16 -ta bfloat16 -tb bfloat16 -tc float32 -tacc float32 -m multiply_add -op TensorOp -b 64 128 64 -s 3 -w 2 2 1 -cc 80 -la ColumnMajor -aa 8 -lb ColumnMajor -ab 8 -lc RowMajor -ac 4 -te float32 -ep LinearCombination -sw IdentitySwizzle2 -p 512 256 128 -alpha 0.0 -beta 0.5 -gm GemmSplitKParallel -k 5 -bias -activ gelu
```
| examples/40_cutlass_py/customizable/README.md/0 | {
"file_path": "examples/40_cutlass_py/customizable/README.md",
"repo_id": "examples",
"token_count": 6237
} | 10 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/////////////////////////////////////////////////////////////////////////////////////////////////
#include <vector>
#include <iostream>
#include <fstream>
#include "kernel_backward.h"
#include "cutlass/util/device_memory.h"
#include "cutlass/util/host_tensor.h"
using Arch = cutlass::arch::Sm80;
static constexpr int kMaxK = 128;
template <typename ArchTag, typename Element, int kMaxK>
struct DefaultKernel {
// Some heuristics to select the best kernel (tested on Sm60, Sm70, Sm80)
// NOTE: Requires quite a lot of shmem for Sm80+,
// so might require tweaking those manually for Sm86/Sm89
static constexpr bool kSupports64x128 =
ArchTag::kMinComputeCapability >= 80 ||
(ArchTag::kMinComputeCapability >= 70 &&
cutlass::sizeof_bits<Element>::value <= 16);
static constexpr int kBlockSizeI = kSupports64x128 && kMaxK > 64 ? 128 : 64;
static constexpr bool kIsHalf = cutlass::sizeof_bits<Element>::value <= 16;
static constexpr bool kOutputInRF = kIsHalf && kMaxK <= kBlockSizeI;
static constexpr bool kPreload = kIsHalf && ArchTag::kMinComputeCapability >= 80 && kOutputInRF;
static constexpr int kBlockSizeJ = kPreload && kMaxK > 64 ? 128 : 64;
using Kernel = AttentionBackwardKernel<
Arch,
Element,
true, // kIsAligned_
false, // kApplyDropout_
kPreload, // kPreload_
kBlockSizeI, // kBlockSizeI_,
kBlockSizeJ, // kBlockSizeJ_,
kMaxK, // kMaxK
false, // kKeysQueriesAlignedToBlockSize
true // kEnableSplitKeys
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace {
template <typename T> struct TypeName;
template <> struct TypeName<float> { static constexpr const char* Name = "f32"; };
template <> struct TypeName<cutlass::half_t> { static constexpr const char* Name = "f16"; };
template <> struct TypeName<cutlass::bfloat16_t> { static constexpr const char* Name = "b16"; };
void readExpect(std::string const& expected) {
std::string read;
std::cin >> read;
if (read != expected) {
std::cerr << "FATAL: Read '" << read << "' but expected '" << expected << "'" << std::endl;
std::exit(1);
}
}
/// Helpers to read from stdin
template <typename Element>
cutlass::HostTensor<Element, cutlass::layout::RowMajor> readTensorOnDevice(std::string const& expectedName) {
readExpect("tensor_begin");
readExpect(std::string(TypeName<Element>::Name) + ":" + expectedName);
uint64_t len = 0;
std::cin >> len;
readExpect("file");
std::string filename;
std::cin >> filename;
cutlass::HostTensor<Element, cutlass::layout::RowMajor> tensor({int64_t(1), int64_t(len / sizeof(Element))});
uint8_t* data = (uint8_t*)tensor.host_data();
std::fstream myFile(filename, std::ios::in | std::ios::binary );
myFile.read((char*)data, len);
readExpect("tensor_end");
tensor.sync_device();
return tensor;
}
int64_t readInt64(std::string const& expectedName) {
readExpect(expectedName);
int64_t s = 0;
std::cin >> s;
return s;
}
float readFloat(std::string const& expectedName) {
readExpect(expectedName);
float s = 0;
std::cin >> s;
return s;
}
// Writing
template <typename Element>
void writeTensor(std::string const& name, cutlass::HostTensor<Element, cutlass::layout::RowMajor>& tensor) {
tensor.sync_host(); // device->host
size_t u8len = tensor.size() * sizeof(Element);
// Python is expected to provide a file name to write to
readExpect("tmpfile");
std::string tmpfile;
std::cin >> tmpfile;
uint8_t* data = (uint8_t*)tensor.host_data();
std::fstream myFile(tmpfile, std::ios::out | std::ios::binary );
myFile.write((char*)data, u8len);
myFile.close();
std::cout << "tensor_begin " << TypeName<Element>::Name << ":" << name << " ";
std::cout << u8len << " file " << tmpfile << " tensor_end" << std::endl;
}
void writeInt64(std::string const& name, int64_t value) {
std::cout << name << " " << value << std::endl;
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Element>
int runKernel() {
using Kernel = typename DefaultKernel<Arch, Element, kMaxK>::Kernel;
#define READ_I64(NAME) p.NAME = (decltype(p.NAME))readInt64(#NAME)
#define READ_TENSOR_AND_STRIDES_BMH(DT, NAME, NAME_XS) \
auto storage##NAME = readTensorOnDevice<DT>(#NAME); \
p.NAME##_ptr = storage##NAME.device_data(); \
READ_I64(NAME_XS##_strideB); \
READ_I64(NAME_XS##_strideM); \
READ_I64(NAME_XS##_strideH);
#define CUDA_CHECK(FN) { \
auto cudaError = FN; \
if (cudaError != cudaSuccess) { \
std::cerr << "FATAL: " #FN " failed: " << cudaGetErrorString(cudaError) << std::endl; \
return -1; \
} \
}
typename Kernel::Params p;
p.scale = readFloat("scale");
READ_I64(head_dim);
READ_I64(head_dim_value);
READ_I64(num_queries);
READ_I64(num_keys);
READ_I64(num_heads);
READ_I64(custom_mask_type);
READ_I64(num_batches);
int64_t repeat_count = readInt64("repeat_count");
READ_I64(num_splits_key);
READ_TENSOR_AND_STRIDES_BMH(Element, query, q);
READ_TENSOR_AND_STRIDES_BMH(Element, key, k);
READ_TENSOR_AND_STRIDES_BMH(Element, value, v);
auto lse = readTensorOnDevice<typename Kernel::lse_scalar_t>("logsumexp");
p.logsumexp_ptr = lse.device_data();
p.lse_strideB = readInt64("lse_strideB");
p.lse_strideH = readInt64("lse_strideH");
// output
auto stOutput = readTensorOnDevice<Element>("output");
p.output_ptr = stOutput.device_data();
READ_I64(o_strideB);
auto o_strideM = readInt64("o_strideM");
if (o_strideM != p.o_strideM()) {
std::cerr << "Invalid `o_strideM`: " << o_strideM << " - expected " << p.o_strideM();
return 2;
}
READ_I64(o_strideH);
READ_TENSOR_AND_STRIDES_BMH(Element, grad_output, gO);
auto stDelta = readTensorOnDevice<typename Kernel::accum_t>("delta");
p.delta_ptr = stDelta.device_data();
READ_I64(delta_strideB);
READ_I64(delta_strideH);
// Allocate workspace
if (p.workspace_size()) {
cudaMalloc(&p.workspace, p.workspace_size());
}
// Allocate outputs in BMHK format
p.gQKV_strideM_multiplier = 1;
p.gQ_strideH = p.head_dim;
p.gQ_strideB = p.gQ_strideM() * p.num_queries;
p.gK_strideH = p.head_dim;
p.gK_strideB = p.gK_strideM() * p.num_keys;
p.gV_strideH = p.head_dim_value;
p.gV_strideB = p.gV_strideM() * p.num_keys;
cutlass::HostTensor<Element, cutlass::layout::RowMajor> gQ({int64_t(1), p.gQ_strideB * p.num_batches});
cutlass::HostTensor<Element, cutlass::layout::RowMajor> gK({int64_t(1), p.gK_strideB * p.num_batches});
cutlass::HostTensor<Element, cutlass::layout::RowMajor> gV({int64_t(1), p.gV_strideB * p.num_batches});
p.grad_query_ptr = gQ.device_data();
p.grad_key_ptr = gK.device_data();
p.grad_value_ptr = gV.device_data();
if (!Kernel::check_supported(p)) {
std::cerr << "FATAL: Kernel does not support these inputs" << std::endl;
return 2;
}
// Run kernel
cudaDeviceSynchronize();
auto kernel_fn = attention_kernel_backward_batched_impl<Kernel>;
size_t smem_bytes = sizeof(typename Kernel::SharedStorage);
CUDA_CHECK(cudaFuncSetAttribute(kernel_fn, cudaFuncAttributeMaxDynamicSharedMemorySize, int(smem_bytes)));
kernel_fn<<<p.getBlocksGrid(), p.getThreadsGrid(), smem_bytes>>>(p);
// Write outputs
std::cout << "OK ";
writeTensor("grad_query", gQ);
writeInt64("gQ_strideB", p.gQ_strideB);
writeInt64("gQ_strideM", p.gQ_strideM());
writeInt64("gQ_strideH", p.gQ_strideH);
writeTensor("grad_key", gK);
writeInt64("gK_strideB", p.gK_strideB);
writeInt64("gK_strideM", p.gK_strideM());
writeInt64("gK_strideH", p.gK_strideH);
writeTensor("grad_value", gV);
writeInt64("gV_strideB", p.gV_strideB);
writeInt64("gV_strideM", p.gV_strideM());
writeInt64("gV_strideH", p.gV_strideH);
// Timing
cudaEvent_t events[2];
for (auto & event : events) {
CUDA_CHECK(cudaEventCreate(&event));
}
CUDA_CHECK(cudaEventRecord(events[0]));
for (int i = 0; i < repeat_count; ++i) {
kernel_fn<<<p.getBlocksGrid(), p.getThreadsGrid(), smem_bytes>>>(p);
}
CUDA_CHECK(cudaEventRecord(events[1]));
CUDA_CHECK(cudaEventSynchronize(events[1]));
// Measure elapsed runtime
float runtime_ms = 0;
CUDA_CHECK(cudaEventElapsedTime(&runtime_ms, events[0], events[1]));
std::cout << "runtime_ms " << runtime_ms / float(repeat_count) << std::endl;
return 0;
}
int main() {
std::ios_base::sync_with_stdio(false);
std::string dtype;
std::cin >> dtype;
std::cerr << "Running kernel with dtype: " << dtype << std::endl;
if (dtype == "f16") {
return runKernel<cutlass::half_t>();
} else if (dtype == "b16") {
return runKernel<cutlass::bfloat16_t>();
} else if (dtype == "f32") {
return runKernel<float>();
} else {
std::cerr << "FATAL: Unknown dtype: " << dtype << std::endl;
return 3;
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/41_fused_multi_head_attention/fused_multi_head_attention_backward.cu/0 | {
"file_path": "examples/41_fused_multi_head_attention/fused_multi_head_attention_backward.cu",
"repo_id": "examples",
"token_count": 4331
} | 11 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief This defines a "fragment" iterator for visiting the fragments of an accumulator tile
that participate in one warp-level store operation.
Typically, the accumulator tile is the largest single block of register-backed storage
within the kernel. Storing it to memory is best accomplished by partitioning it into
smaller tiles and storing these sequentially.
Round trips through shared memory during the Epilogue phase require partitioning, as
shared memory capacity is typically insufficient for a threadblock's total accumulator
size.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/epilogue/warp/tensor_op_policy.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace warp {
////////////////////////////////////////////////////////////////////////////////
///
template <
typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape)
typename OperatorShape, ///< matrix multiply operation shape (concept: gemm::GemmShape)
typename OperatorElementC, ///< matrix multiply operation data type (concept: data type)
typename OperatorFragmentC, ///< matrix multiply operation fragment (concept: Array)
typename Layout ///< target shared memory layout
>
class FusedBiasActFragmentIteratorTensorOp;
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for row-major shared memory
template <
typename WarpShape_, ///< shape of the warp-level GEMM tile
typename OperatorShape_, ///< matrix multiply operation shape (concept: gemm::GemmShape)
typename OperatorElementC_, ///< matrix multiply operation data type (concept: data type)
typename OperatorFragmentC_ ///< matrix multiply operation fragment (concept: Array)
>
class FusedBiasActFragmentIteratorTensorOp<WarpShape_, OperatorShape_, OperatorElementC_, OperatorFragmentC_, layout::RowMajor> {
public:
using WarpShape = WarpShape_;
using OperatorShape = OperatorShape_;
using OperatorElementC = OperatorElementC_;
using OperatorFragmentC = OperatorFragmentC_;
using Layout = layout::RowMajor;
using Policy = TensorOpPolicy<WarpShape, OperatorShape, Layout>;
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<
OperatorElementC,
Policy::OperatorCount::kColumn * Policy::kElementsPerAccess>;
/// This is the complete warp-level accumulator tile.
using AccumulatorTile = Array<
OperatorElementC,
OperatorFragmentC::kElements * Policy::OperatorCount::kRow * Policy::OperatorCount::kColumn>;
using OutputAccumulatorTile = AccumulatorTile;
/// Number of times this iterator can be incremented
static int const kIterations = Policy::kIterations;
private:
/// Internal access type
using AccessType = Array<OperatorElementC, Policy::kElementsPerAccess>;
private:
//
// Data members
//
/// Accumulator tile
AccessType *accumulators_;
/// Internal index
int index_;
public:
/// Constructs an iterator
CUTLASS_HOST_DEVICE
FusedBiasActFragmentIteratorTensorOp(AccumulatorTile &accum):
accumulators_(reinterpret_cast<AccessType *>(&accum)),
index_(0) {
}
/// Increments
CUTLASS_HOST_DEVICE
FusedBiasActFragmentIteratorTensorOp &operator++() {
++index_;
return *this;
}
/// Decrements
CUTLASS_HOST_DEVICE
FusedBiasActFragmentIteratorTensorOp &operator--() {
--index_;
return *this;
}
/// Loads a fragment from the referenced part of the accumulator tile
CUTLASS_HOST_DEVICE
void load(Fragment &frag, int index_offset = 0) const {
int index = index_ + index_offset;
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::OperatorCount::kColumn; ++n) {
int accumulator_access_offset =
index + n * Policy::kAccumulatorColumnStride / Policy::kElementsPerAccess;
frag_ptr[n] = accumulators_[accumulator_access_offset];
}
}
/// Stores a fragment from the referenced part of the accumulator tile
CUTLASS_HOST_DEVICE
void store(Fragment &frag, int index_offset = 0) const {
int index = index_ + index_offset;
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::OperatorCount::kColumn; ++n) {
int accumulator_access_offset =
index + n * Policy::kAccumulatorColumnStride / Policy::kElementsPerAccess;
accumulators_[accumulator_access_offset] = frag_ptr[n];
}
}
};
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| examples/44_multi_gemm_ir_and_codegen/fixed_impl/epilogue/warp/fused_bias_act_fragment_iterator_tensor_op.h/0 | {
"file_path": "examples/44_multi_gemm_ir_and_codegen/fixed_impl/epilogue/warp/fused_bias_act_fragment_iterator_tensor_op.h",
"repo_id": "examples",
"token_count": 1960
} | 12 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#define TI(tag) \
cudaEvent_t _event_start_ ##tag; \
cudaEvent_t _event_end_ ##tag; \
float _event_time_ ##tag; \
cudaEventCreate(& _event_start_ ##tag); \
cudaEventCreate(& _event_end_ ##tag); \
cudaEventRecord(_event_start_ ##tag);
#define TO(tag, str, times) \
cudaEventRecord(_event_end_ ##tag); \
cudaEventSynchronize(_event_end_ ##tag); \
cudaEventElapsedTime(&_event_time_ ##tag, _event_start_ ##tag, _event_end_ ##tag); \
float _event_time_once_ ##tag = _event_time_ ##tag / times; \
printf("%20s:\t %10.3fus\t", str, _event_time_once_ ##tag * 1000); \
cudaDeviceSynchronize(); \
printf("%20s string: %s\n",str, cudaGetErrorString(cudaGetLastError()));
template<typename T>
struct memory_unit{
T* host_ptr;
T* device_ptr;
int size_bytes;
int elements;
void h2d(){
cudaMemcpy(device_ptr, host_ptr, size_bytes, cudaMemcpyHostToDevice);
}
void d2h(){
cudaMemcpy(host_ptr, device_ptr, size_bytes, cudaMemcpyDeviceToHost);
}
void free_all(){
free(host_ptr);
cudaFree(device_ptr);
}
memory_unit(int elements_): size_bytes(elements_ * sizeof(T)), elements(elements_){
host_ptr = (T*) malloc(elements_ * sizeof(T));
cudaMalloc((void**)&device_ptr, elements_ * sizeof(T));
}
void init(int abs_range = 1){
for(int i = 0; i < elements; i++){
host_ptr[i] = T(rand() % 100 / float(100) * 2 * abs_range - abs_range);
}
h2d();
}
};
template<typename T>
int check_result(T * a, T * b, int N){
int cnt = 0;
for(int i = 0; i < N; i ++){
float std = float(a[i]);
float my = float(b[i]);
if(abs(std - my) / abs(std) > 1e-2)
{
// printf("my: %f , std: %f\n", my, std);
cnt++;
}
}
printf("total err: %d / %d\n", cnt, N);
return cnt;
}
| examples/44_multi_gemm_ir_and_codegen/utils.h/0 | {
"file_path": "examples/44_multi_gemm_ir_and_codegen/utils.h",
"repo_id": "examples",
"token_count": 1365
} | 13 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/***************************************************************************************************
Example contrasting the Stream-K parallel decomposition for GEMM threadblocks versus the
"classic data-parallel" and "Split-K" decompositions + residual add.
For more details regarding the Stream-K method, see "Stream-K: Work-centric Parallel Decomposition
for Dense Matrix-Matrix Multiplication on the GPU" (https://arxiv.org/abs/2301.03598)
Requires NVIDIA Ampere or newer device (SM80+).
- To lock persistence mode, power (400W), clocks (1005MHz) for evaluation (assumes device 0 and A100)
cutlass$ sudo nvidia-smi -pm 1 -i 0
cutlass$ sudo nvidia-smi -i 0 -pl 400
cutlass$ sudo nvidia-smi -i 0 -lgc 1005
- Build and run:
cutlass$ mkdir build
cutlass$ cd build
cutlass/build$ cmake .. -DCUTLASS_NVCC_ARCHS=80
cutlass/build$ make 47_ampere_gemm_universal_streamk_broadcast
cutlass/build$ ./examples/47_ampere_gemm_universal_streamk/47_ampere_gemm_universal_streamk_broadcast
- Reset clocks when done:
cutlass$ sudo nvidia-smi -rgc
**************************************************************************************************/
#include <iostream>
#include <string>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm_universal.h"
#include "cutlass/gemm/device/gemm_universal_with_broadcast.h"
#include "cutlass/gemm/device/gemm_universal_streamk_with_broadcast.h"
#include "cutlass/epilogue/thread/linear_combination_residual_block.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/error_metrics.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_foreach.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/epilogue/threadblock/fusion/visitors.hpp"
#include "cutlass/gemm/kernel/default_gemm_universal_with_visitor.h"
#include "cutlass/gemm/device/gemm_universal_adapter.h"
#include "helper.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
/// GEMM kernel configurations (cutlass_tensorop_h16816gemm_128x128_32x4_nn_align8)
/////////////////////////////////////////////////////////////////////////////////////////////////
// A matrix configuration
using ElementA = cutlass::half_t; // Element type for A matrix operand
using LayoutA = cutlass::layout::RowMajor; // Layout type for A matrix operand
constexpr int AlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value; // Memory access granularity/alignment of A matrix in units of elements (up to 16 bytes)
// B matrix configuration
using ElementB = cutlass::half_t; // Element type for B matrix operand
using LayoutB = cutlass::layout::RowMajor; // Layout type for B matrix operand
constexpr int AlignmentB = 128 / cutlass::sizeof_bits<ElementB>::value; // Memory access granularity/alignment of B matrix in units of elements (up to 16 bytes)
// C1/C2/D matrix configuration
using ElementC = cutlass::half_t; // Element type for C matrix operands
using LayoutC = cutlass::layout::RowMajor; // Layout type for C matrix operands
constexpr int AlignmentC = 128 / cutlass::sizeof_bits<ElementC>::value; // Memory access granularity/alignment of C matrices in units of elements (up to 16 bytes)
// Output matrix configuration
using ElementOutput = cutlass::half_t; // Element type for output matrix operands
using LayoutOutput = cutlass::layout::RowMajor; // Layout type for output matrix operands
// constexpr int AlignmentOutput = 128 / cutlass::sizeof_bits<ElementOutput>::value; // Memory access granularity/alignment of output matrices in units of elements (up to 16 bytes)
// Multiply-accumulate blocking/pipelining details
using ElementAccumulator = cutlass::half_t; // Element type for internal accumulation
using ElementCompute = cutlass::half_t; // Element type for compute
using ArchTag = cutlass::arch::Sm80; // Tag indicating the minimum SM that supports the intended feature
using OperatorClass = cutlass::arch::OpClassTensorOp; // Operator class tag
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; // Threadblock-level tile size (concept: GemmShape)
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; // Warp-level tile size (concept: GemmShape)
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; // Instruction-level tile size (concept: GemmShape)
constexpr int NumStages = 4; // Number of global->shared pipeline stages used in the GEMM mainloop
constexpr int EVTEpilogueStages = 1; // Number of epilogue stages in EVT
// Residual block configuration
// Epilogue output operator
/// Using LinearCombinationResidualBlock
/// Models a residual block of the form: UnaryOp(BinaryOp(BinaryOp(ActivationOp(TensorOp(X) + bias), residual1), residual2))
using EpilogueOp = cutlass::epilogue::thread::LinearCombinationResidualBlock<
ElementOutput, // Element type for output matrix
ElementAccumulator, // Element type from internal accumulation
ElementCompute, // Element type from internal accumulation
ElementC, // Element type for C1/C2/D matrix operands
AlignmentC, // Memory access granularity of C and D matrix in units of elements
cutlass::epilogue::thread::Identity, // Activation
cutlass::plus, // Binary operation 1
cutlass::epilogue::thread::Identity, // Unary operation
cutlass::plus // Binary operation 2
>;
// Reference device GEMM implementation type
using DeviceGemmReference = cutlass::reference::device::Gemm<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
ElementAccumulator>;
// Classic data-parallel device GEMM implementation type
using DeviceGemmBasic = cutlass::gemm::device::GemmUniversalWithBroadcast<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOp,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
NumStages,
AlignmentA,
AlignmentB>;
// StreamK device GEMM implementation type with EVT
using namespace cute;
using OutputTileThreadMap = cutlass::epilogue::threadblock::OutputTileThreadLayout<
ThreadblockShape,
WarpShape,
ElementC,
AlignmentC,
EVTEpilogueStages
>;
using Accum = cutlass::epilogue::threadblock::VisitorAccFetch;
using Bias = cutlass::epilogue::threadblock::VisitorRowBroadcast<
OutputTileThreadMap, ElementC,
cute::Stride<_0, _1, int32_t> // StrideMNL
>;
using C1 = cutlass::epilogue::threadblock::VisitorAuxLoad<
OutputTileThreadMap, ElementC,
cute::Stride<int64_t, _1, int64_t> // StrideMNL
>;
using C2 = cutlass::epilogue::threadblock::VisitorAuxLoad<
OutputTileThreadMap, ElementC,
cute::Stride<int64_t, _1, int64_t> // StrideMNL
>;
using Compute0 = cutlass::epilogue::threadblock::VisitorCompute<
cutlass::plus, ElementCompute, ElementCompute,
cutlass::FloatRoundStyle::round_to_nearest
>;
using EVTCompute0 = cutlass::epilogue::threadblock::Sm80EVT<
Compute0,
Accum,
Bias>;
using Compute1 = cutlass::epilogue::threadblock::VisitorCompute<
cutlass::plus, ElementCompute, ElementCompute,
cutlass::FloatRoundStyle::round_to_nearest
>;
using EVTCompute1 = cutlass::epilogue::threadblock::Sm80EVT<
Compute1,
EVTCompute0,
C1>;
using Compute2 = cutlass::epilogue::threadblock::VisitorCompute<
cutlass::plus, ElementOutput, ElementCompute,
cutlass::FloatRoundStyle::round_to_nearest
>;
using EVTCompute2 = cutlass::epilogue::threadblock::Sm80EVT<
Compute2,
EVTCompute1,
C2>;
using D = cutlass::epilogue::threadblock::VisitorAuxStore<
OutputTileThreadMap, ElementOutput, cutlass::FloatRoundStyle::round_to_nearest,
cute::Stride<int64_t, _1, int64_t> // StrideMNL
>;
using EVTD = cutlass::epilogue::threadblock::Sm80EVT<
D,
EVTCompute2>;
using EVTKernelStreamK =
typename cutlass::gemm::kernel::DefaultGemmWithVisitor<
ElementA, LayoutA, cutlass::ComplexTransform::kNone, AlignmentA,
ElementB, LayoutB, cutlass::ComplexTransform::kNone, AlignmentB,
ElementC, LayoutC, AlignmentC,
ElementAccumulator,
ElementCompute,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
ThreadblockShape,
WarpShape,
InstructionShape,
EVTD,
cutlass::gemm::threadblock::ThreadblockSwizzleStreamK,
NumStages,
cutlass::arch::OpMultiplyAdd,
EVTEpilogueStages
>::GemmKernel;
using DeviceGemmStreamK = cutlass::gemm::device::GemmUniversalAdapter<EVTKernelStreamK>;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Testbed utility types
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Result structure
struct Result
{
double avg_runtime_ms;
double gflops;
cutlass::Status status;
cudaError_t error;
bool passed;
Result(
double avg_runtime_ms = 0,
double gflops = 0,
cutlass::Status status = cutlass::Status::kSuccess,
cudaError_t error = cudaSuccess)
:
avg_runtime_ms(avg_runtime_ms), gflops(gflops), status(status), error(error), passed(true)
{}
};
/// Command line options parsing
struct Options
{
std::string command_name;
bool help;
cutlass::gemm::GemmCoord problem_size;
float alpha;
float beta;
int split_k_factor;
int avail_sms;
int iterations;
bool real;
cutlass::HostTensor<ElementA, LayoutA> tensor_a;
cutlass::HostTensor<ElementB, LayoutB> tensor_b;
cutlass::HostTensor<ElementC, LayoutC> tensor_c1;
cutlass::HostTensor<ElementC, LayoutC> tensor_c2;
cutlass::HostTensor<ElementC, LayoutC> tensor_d;
cutlass::HostTensor<ElementC, LayoutC> tensor_ref_d;
cutlass::HostTensor<ElementC, LayoutC> tensor_Vector;
// cutlass::HostTensor<ElementC, LayoutC> tensor_Tensor;
Options(std::string command_name) :
command_name(command_name),
help(false),
problem_size({2048, 2048, 2048}),
alpha(1.0f),
beta(1.0f),
split_k_factor(1),
avail_sms(-1), // Number of device SMs to use is unlimited
real(false),
iterations(10000)
{}
bool valid() const
{
return true;
}
void parse(int argc, char const **args)
{
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
cmd.get_cmd_line_argument("m", problem_size.m());
cmd.get_cmd_line_argument("n", problem_size.n());
cmd.get_cmd_line_argument("k", problem_size.k());
cmd.get_cmd_line_argument("alpha", alpha);
cmd.get_cmd_line_argument("beta", beta);
cmd.get_cmd_line_argument("split", split_k_factor);
cmd.get_cmd_line_argument("iterations", iterations);
real = cmd.check_cmd_line_flag("real");
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const
{
out
<< "Performs a GEMM computation.\n"
<< "\n"
<< "Options:\n"
<< "\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --m=<int> GEMM M dimension\n"
<< " --n=<int> GEMM N dimension\n"
<< " --k=<int> GEMM K dimension\n"
<< " --alpha=<f32> Epilogue scalar alpha\n"
<< " --beta=<f32> Epilogue scalar beta\n\n"
<< " --split=<int> Split-K factor to emulate\n\n"
<< " --real If specified, initializes with real values instead of whole numbers. Errors are to be expected.\n\n"
<< " --iterations=<int> Number of profiling iterations to perform.\n\n";
out
<< "\n\nExamples:\n\n"
<< "$ " << command_name << " --m=1024 --n=512 --k=1024 --alpha=2 --beta=0.707 \n\n";
return out;
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const
{
// Two flops per multiply-add
return 2.0 * double(problem_size.product()) / double(1.0e9) / runtime_s;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// GEMM evaluation
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Populates a DeviceGemmBasic::Arguments structure from the given commandline options
typename DeviceGemmBasic::Arguments args_from_options(
const DeviceGemmBasic &device_gemm,
const Options &options,
cutlass::HostTensor<ElementA, LayoutA> &tensor_a,
cutlass::HostTensor<ElementB, LayoutB> &tensor_b,
cutlass::HostTensor<ElementC, LayoutC> &tensor_c1,
cutlass::HostTensor<ElementC, LayoutC> &tensor_c2,
cutlass::HostTensor<ElementC, LayoutC> &tensor_d,
cutlass::HostTensor<ElementC, LayoutC> &tensor_Vector /*,
cutlass::HostTensor<ElementC, LayoutC> &tensor_Tensor */
)
{
return typename DeviceGemmBasic::Arguments(
cutlass::gemm::GemmUniversalMode::kGemm, // universal mode
options.problem_size, // problem_size
options.split_k_factor, // batch count / splitk slices
{ // epilogue parameters
ElementAccumulator(options.alpha),
ElementAccumulator(options.beta)
},
tensor_a.device_data(), // ptr_A
tensor_b.device_data(), // ptr_B
tensor_c1.device_data(), // ptr_C1
tensor_c2.device_data(), // ptr_C2
tensor_d.device_data(), // ptr_D
tensor_Vector.device_data(), // ptr_Vector
/* tensor_Tensor.device_data(), */nullptr,// ptr_Tensor
options.problem_size.mk().product(), // batch_stride_A
options.problem_size.nk().product(), // batch_stride_B
options.problem_size.mn().product(), // batch_stride_C1
options.problem_size.mn().product(), // batch_stride_C2
options.problem_size.mn().product(), // batch_stride_D
options.problem_size.mn().product(), // batch_stride_Vector
options.problem_size.mn().product(), // batch_stride_Tensor
tensor_a.layout().stride(0), // stride_a
tensor_b.layout().stride(0), // stride_b
tensor_c1.layout().stride(0), // stride_c1
tensor_c2.layout().stride(0), // stride_c2
tensor_d.layout().stride(0), // stride_d
/*tensor_Vector.layout().stride(0)*/0, // stride_Vector
/*tensor_Tensor.layout().stride(0)*/0); // stride_Tensor
}
/// Populates a DeviceGemmStreamK::Arguments structure from the given commandline options
typename DeviceGemmStreamK::Arguments args_from_options(
const DeviceGemmStreamK &device_gemm,
const Options &options,
cutlass::HostTensor<ElementA, LayoutA> &tensor_a,
cutlass::HostTensor<ElementB, LayoutB> &tensor_b,
cutlass::HostTensor<ElementC, LayoutC> &tensor_c1,
cutlass::HostTensor<ElementC, LayoutC> &tensor_c2,
cutlass::HostTensor<ElementC, LayoutC> &tensor_d,
cutlass::HostTensor<ElementC, LayoutC> &tensor_Vector/*,
cutlass::HostTensor<ElementC, LayoutC> &tensor_Tensor*/
)
{
typename EVTD::Arguments callback_args{
{
{
{
{}, // Accum
{tensor_Vector.device_data(), ElementC(0), {_0{}, _1{}, int32_t(options.problem_size.n())}}, // Bias
{} // Compute0
}, // EVTCompute0
{tensor_c1.device_data(), ElementC(0), {options.problem_size.n(), _1{}, options.problem_size.mn().product()}}, // C1
{} // Compute1
}, // EVTCompute1
{tensor_c2.device_data(), ElementC(0), {options.problem_size.n(), _1{}, options.problem_size.mn().product()}}, // C2
{} // Compute2
}, // EVTCompute2
{tensor_d.device_data(), {options.problem_size.n(), _1{}, options.problem_size.mn().product()}}, // D
}; // EVTD
return typename DeviceGemmStreamK::Arguments(
cutlass::gemm::GemmUniversalMode::kGemm, // universal mode
options.problem_size, // problem_size
options.split_k_factor, // batch count / splitk slices
callback_args, // argument of EVT callbacks
tensor_a.device_data(), // ptr_A
tensor_b.device_data(), // ptr_B
nullptr, // ptr_C (unused)
nullptr, // ptr_D (unused)
options.problem_size.mk().product(), // batch_stride_A
options.problem_size.nk().product(), // batch_stride_B
0, // batch_stride_C (unused)
0, // batch_stride_D (unused)
tensor_a.layout().stride(0), // stride_a
tensor_b.layout().stride(0), // stride_b
0, // stride_c (unused)
0, // stride_d (unused)
options.avail_sms); // avail_sms
}
/// Execute a given example GEMM computation
template <typename DeviceGemmT>
Result run(std::string description, Options &options)
{
// Display test description
std::cout << std::endl << description << std::endl;
// Zero-initialize test output matrix D
cutlass::reference::host::TensorFill(options.tensor_d.host_view());
options.tensor_d.sync_device();
// Instantiate CUTLASS kernel depending on templates
DeviceGemmT device_gemm;
// Create a structure of gemm kernel arguments suitable for invoking an instance of DeviceGemmT
auto arguments = args_from_options(device_gemm, options,
options.tensor_a, options.tensor_b, options.tensor_c1, options.tensor_c2, options.tensor_d,
options.tensor_Vector/*, options.tensor_Tensor*/);
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size = DeviceGemmT::get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
// Check the problem size is supported or not
CUTLASS_CHECK(device_gemm.can_implement(arguments));
// Initialize CUTLASS kernel with arguments and workspace pointer
CUTLASS_CHECK(device_gemm.initialize(arguments, workspace.get()));
// Correctness / Warmup iteration
CUTLASS_CHECK(device_gemm());
// Copy output data from CUTLASS and reference kernel to host for comparison
options.tensor_d.sync_host();
// Check if output from CUTLASS kernel and reference kernel are equal or not
Result result;
result.passed = cutlass::reference::host::TensorEquals(
options.tensor_d.host_view(),
options.tensor_ref_d.host_view());
double err = cutlass::reference::host::TensorRelativeErrorMetric(
options.tensor_d.host_view(),
options.tensor_ref_d.host_view());
std::cout << " Disposition: " << (result.passed ? "Passed" : "Failed") << " \t Relative error: " << err << std::endl;
// Run profiling loop
if (options.iterations > 0)
{
GpuTimer timer;
timer.start();
for (int iter = 0; iter < options.iterations; ++iter) {
CUTLASS_CHECK(device_gemm());
}
timer.stop();
// Compute average runtime and GFLOPs.
float elapsed_ms = timer.elapsed_millis();
result.avg_runtime_ms = double(elapsed_ms) / double(options.iterations);
result.gflops = options.gflops(result.avg_runtime_ms / 1000.0);
std::cout << " Avg runtime: " << result.avg_runtime_ms << " ms" << std::endl;
std::cout << " GFLOPs: " << result.gflops << std::endl;
}
// TODO: uncomment when results match
//if (!result.passed) {
// exit(-1);
//}
return result;
}
/// Program entrypoint
int main(int argc, const char **argv)
{
// CUTLASS must be compiled with CUDA 11.0 Toolkit to run these examples.
if (!(__CUDACC_VER_MAJOR__ >= 11)) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
// Returning zero so this test passes on older Toolkits. Its actions are no-op.
return 0;
}
// Current device must must have compute capability at least 80
cudaDeviceProp props;
int current_device_id;
CUDA_CHECK(cudaGetDevice(¤t_device_id));
CUDA_CHECK(cudaGetDeviceProperties(&props, current_device_id));
if (!((props.major * 10 + props.minor) >= 80))
{
std::cerr << "Ampere Tensor Core operations must be run on a machine with compute capability at least 80."
<< std::endl;
// Returning zero so this test passes on older Toolkits. Its actions are no-op.
return 0;
}
// Parse commandline options
Options options("ampere_streamk_broadcast_gemm");
options.parse(argc, argv);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
std::cout <<
options.iterations << " timing iterations of " <<
options.problem_size.m() << " x " <<
options.problem_size.n() << " x " <<
options.problem_size.k() << " matrix-matrix multiply" << std::endl;
if (!options.valid()) {
std::cerr << "Invalid problem." << std::endl;
return -1;
}
//
// Initialize GEMM datasets
//
// Initialize tensors using CUTLASS helper functions
options.tensor_a.resize(options.problem_size.mk()); // <- Create matrix A with dimensions M x K
options.tensor_b.resize(options.problem_size.kn()); // <- Create matrix B with dimensions K x N
options.tensor_c1.resize(options.problem_size.mn()); // <- Create matrix C1 with dimensions M x N
options.tensor_c2.resize(options.problem_size.mn()); // <- Create matrix C2 with dimensions M x N
options.tensor_d.resize(options.problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from CUTLASS kernel
options.tensor_ref_d.resize(options.problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from reference kernel
options.tensor_Vector.resize({1, options.problem_size.n()}); // <- Create broadcast vector with dimensions N x 1
// options.tensor_Tensor.resize(options.problem_size.mn()); // <- Create T matrix with dimensions M x N
int _init_bits = options.real ? -1 : 0;
// Fill matrix A on host with uniform-random data [-2, 2]
cutlass::reference::host::TensorFillRandomUniform(
options.tensor_a.host_view(),
1,
ElementA(2),
ElementA(-2), _init_bits);
// Fill matrix B on host with uniform-random data [-2, 2]
cutlass::reference::host::TensorFillRandomUniform(
options.tensor_b.host_view(),
1,
ElementB(2),
ElementB(-2), _init_bits);
// Fill matrix C1 on host with uniform-random data [-2, 2]
cutlass::reference::host::TensorFillRandomUniform(
options.tensor_c1.host_view(),
1,
ElementC(2),
ElementC(-2), _init_bits);
// Fill matrix C2 on host with uniform-random data [-2, 2]
cutlass::reference::host::TensorFillRandomUniform(
options.tensor_c2.host_view(),
1,
ElementC(2),
ElementC(-2), _init_bits);
cutlass::reference::host::TensorFillRandomUniform(
options.tensor_Vector.host_view(),
1,
ElementC(2),
ElementC(-2), _init_bits);
//
// Compute reference output
//
// Copy data from host to GPU
options.tensor_a.sync_device();
options.tensor_b.sync_device();
options.tensor_c1.sync_device();
options.tensor_c2.sync_device();
options.tensor_Vector.sync_device();
// options.tensor_Tensor.sync_device();
// Zero-initialize reference output matrix D
cutlass::reference::host::TensorFill(options.tensor_ref_d.host_view());
options.tensor_ref_d.sync_device();
// Create instantiation for device reference gemm kernel
DeviceGemmReference gemm_reference;
// Launch device reference gemm kernel
gemm_reference(
options.problem_size,
ElementAccumulator(options.alpha),
options.tensor_a.device_ref(),
options.tensor_b.device_ref(),
ElementAccumulator(options.beta),
options.tensor_c1.device_ref(),
options.tensor_ref_d.device_ref());
// Wait for kernels to finish
CUDA_CHECK(cudaDeviceSynchronize());
// Copy output data from reference kernel to host for comparison
options.tensor_ref_d.sync_host();
// Add broadcast vector (without multiplier)
// This is only possible because BinaryOp is addition, and UnaryOps are identity.
// This makes the addition of broadcast vector commutable.
/// identity(plus(identity(alpha * (a * b) + v), beta * c)) ==
/// alpha * a * b + v + beta * c ==
/// (alpha * a * b + beta * c) + v ==
/// GEMM(a, b, c) + v
// Vector broadcast on host
for (int i=0; i < options.problem_size.m(); ++i) {
for (int j=0; j < options.problem_size.n(); ++j) {
options.tensor_ref_d.host_view().ref().at({i, j}) += options.tensor_Vector.host_view().ref().at({0, j});
options.tensor_ref_d.host_view().ref().at({i, j}) += options.tensor_c2.host_view().ref().at({i, j});
}
}
// Sync back with device just in case
options.tensor_ref_d.sync_device();
//
// Evaluate CUTLASS kernels
//
// Test default operation
if (options.split_k_factor == 1)
{
// Compare basic data-parallel version versus StreamK version using default load-balancing heuristics
Result basic_dp = run<DeviceGemmBasic>("Basic data-parallel GEMM", options);
Result streamk_default = run<DeviceGemmStreamK>("StreamK GEMM with default load-balancing", options);
printf(" Speedup vs Basic-DP: %.3f\n", (basic_dp.avg_runtime_ms / streamk_default.avg_runtime_ms));
// Show that StreamK can emulate basic data-parallel GEMM when we set the number of SMs to load-balance across = 1
options.avail_sms = 1; // Set loadbalancing width to 1 SM (no load balancing)
Result streamk_dp = run<DeviceGemmStreamK>("StreamK emulating basic data-parallel GEMM", options);
options.avail_sms = -1; // Reset loadbalancing width to unspecified SMs (i.e., the number of device SMs)
printf(" Speedup vs Basic-DP: %.3f\n", (basic_dp.avg_runtime_ms / streamk_dp.avg_runtime_ms));
options.split_k_factor++; // Increment splitting factor for next evaluation
}
// Show that StreamK can emulate "Split-K" with a tile-splitting factor
Result basic_splitk = run<DeviceGemmBasic>(
std::string("Basic split-K GEMM with tile-splitting factor ") + std::to_string(options.split_k_factor),
options);
Result streamk_splitk = run<DeviceGemmStreamK>(
std::string("StreamK emulating Split-K GEMM with tile-splitting factor ") + std::to_string(options.split_k_factor),
options);
printf(" Speedup vs Basic-SplitK: %.3f\n", (basic_splitk.avg_runtime_ms / streamk_splitk.avg_runtime_ms));
return 0;
}
| examples/47_ampere_gemm_universal_streamk/ampere_gemm_universal_streamk_broadcast.cu/0 | {
"file_path": "examples/47_ampere_gemm_universal_streamk/ampere_gemm_universal_streamk_broadcast.cu",
"repo_id": "examples",
"token_count": 12478
} | 14 |
<jupyter_start><jupyter_text>Basic example of using the CUTLASS Python interfaceThis notebook walks through a basic example of using the CUTLASS Python interface to declare, compile, and run GEMMs.[](https://colab.research.google.com/github/NVIDIA/cutlass/blob/main/examples/python/00_basic_gemm.ipynb) Prerequisites for running on ColabThis notebook requires an NVIDIA GPU. If `nvidia-smi` fails, go to Runtime -> Change runtime type -> Hardware accelerator and confirm a GPU is selected.<jupyter_code>!#nvidia-smi<jupyter_output><empty_output><jupyter_text>If running on Colab, you will need to install the CUTLASS Python interface. To do so, uncomment the following line and run the cell:<jupyter_code>!#pip install nvidia-cutlass<jupyter_output><empty_output><jupyter_text>General setupWe first import various packages needed for the example and construct the input and output tensors that will be used in our example.<jupyter_code>import numpy as np
import random
import cutlass
# This controls whether the C++ GEMM declaration will be printed at each step.
# Set to `False` to omit this information.
print_module = True
m = 128
n = m
k = m
dtype = np.float16
type_A = np.float16
type_B = np.float16
type_C = np.float16
type_D = np.float16
np.random.seed(1234)
random.seed(1234)
scope_min = -4
scope_max = 4
tensor_A = np.ceil(np.random.uniform(low=scope_min, high=scope_max, size=(m, k)).astype(type_A))
tensor_B = np.ceil(np.random.uniform(low=scope_min, high=scope_max, size=(k, n)).astype(type_B))
tensor_C = np.ceil(np.random.uniform(low=scope_min, high=scope_max, size=(m, n)).astype(type_C))
alpha = np.float16(1.)
beta = np.float16(0.)
tensor_D = np.zeros(tensor_C.shape).astype(type_D)<jupyter_output><empty_output><jupyter_text>Declaring and running a GEMMTo get started, one only needs to provide the tensors declared above to the `cutlass.op.Gemm` call.This sets up a default GEMM operation for the given device on which you are running.Assuming that we are running on SM80, this default to using a GEMM that leverages FP16 Tensor Core operations.Calling `plan.run()` will generate the CUTLASS C++ kernel in question, compile it, and run it on the tensors we previously passed in. By setting `print_module` to `true`, the C++ code that is emitted is printed.<jupyter_code># We specify `element_accumulator` here so as to match the kernel run by NumPy below. However,
# specifying `element_accumulator` is not required if it is the same as `element`
plan = cutlass.Gemm(element=dtype, layout=cutlass.LayoutType.RowMajor, element_accumulator=np.float32)
plan.run(tensor_A, tensor_B, tensor_C, tensor_D, print_module=print_module)<jupyter_output><empty_output><jupyter_text>There are many other ways to construct a plan from `cutlass.op.Gemm` (e.g., by specifiying they types and layouts of each operand, by providing representative tensors as inputs). For more details on these, see the documentation in the `cutlass.op.Gemm` constructor. We then compare the output to running the GEMM using NumPy.<jupyter_code>tensor_D_numpy = (alpha * (tensor_A @ tensor_B)) + (beta * tensor_C)
np.testing.assert_array_equal(tensor_D, tensor_D_numpy)<jupyter_output><empty_output><jupyter_text>Note that one could use the same kernel just declared for tensors provided by other frameworks beyond NumPy, such as PyTorch or CuPy. Changing operation modesBy default, the CUTLASS Python interface will try to use Tensor Core operations whenever possible. If the configuration provided to `cutlass.op.Gemm` is not supported on Tensor Cores, the interface will fall back to using a SIMT kernel.The operation mode currently in use can be returned via the `plan.opclass` property. In this case Tensor Core operations.<jupyter_code>print(plan.opclass)<jupyter_output><empty_output><jupyter_text>Suppose that we don't want to use Tensor Cores for this GEMM. One can change to using CUTLASS's SIMT GEMMs by setting the plan's `opclass` field.As is shown in the printed output, the emitted kernel uses template parameters that fit CUTLASS's SIMT GEMMs.Also notice that, this time around, we provided tensor parameters to `plan.run()`. One is free to provide different parameters to `plan.run()` than were passed in at the initial call to `cutlass.op.Gemm`, provided that the passed-in tensors have the same data type and layout as those passed in on intialization.<jupyter_code>tensor_D_simt = np.zeros(tensor_C.shape).astype(type_D)
plan.opclass = cutlass.OpcodeClass.Simt
plan.run(tensor_A, tensor_B, tensor_C, tensor_D_simt, alpha, beta, print_module=print_module)<jupyter_output><empty_output><jupyter_text>If we compare the output of the Tensor Core and SIMT GEMMs we just ran we see that they are equal.<jupyter_code>np.testing.assert_array_equal(tensor_D, tensor_D_simt)<jupyter_output><empty_output><jupyter_text>Running cached kernelsYou may have noticed that the `plan.run()` calls for the previous two kernels took some time to execute. This is because the kernel being emitted had not yet been compiled.CUTLASS caches compiled binaries so that recompilation isn't necessary every time a kernel is run. For example, if we change modes back to using Tensor Cores and call `plan.run()` again (with a different set of tensor parameters), you'll find the call to return much faster.<jupyter_code>m = 2400
n = 3232
k = 4096
tensor_A = np.ceil(np.random.uniform(low=scope_min, high=scope_max, size=(m, k)).astype(type_A))
tensor_B = np.ceil(np.random.uniform(low=scope_min, high=scope_max, size=(k, n)).astype(type_B))
tensor_C = np.ceil(np.random.uniform(low=scope_min, high=scope_max, size=(m, n)).astype(type_C))
tensor_D = np.zeros(tensor_C.shape).astype(type_D)
alpha = np.float16(1.)
beta = np.float16(2.)
plan.opclass = cutlass.OpcodeClass.TensorOp
plan.run(tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta, print_module=print_module)<jupyter_output><empty_output><jupyter_text>Running non-default GEMMsThe previous examples showed how it is simple to get started running a default GEMM kernel in CUTLASS. But, what do you do if you want a bit more control over the parameters to the GEMM?Under the hood, CUTLASS enumerates the different GEMM configuration parameters possible for this kernel from the CUTLASS profiler. The code below shows how one can access the tile descriptions for the kernels (e.g., cluster, threadblock, and warp shape).<jupyter_code>tiles = plan.tile_descriptions()
print('{} tile descriptions returned'.format(len(tiles)))
num_print = 10
print('First {} tile descriptions are:'.format(num_print))
for td in tiles[:num_print]:
print(td)<jupyter_output><empty_output><jupyter_text>Next, we'll pick one of these configurations at random and compile and run it.<jupyter_code>tiles = [td for td in tiles if td.threadblock_shape[0] >= 128]
idx = random.randint(0, len(tiles)-1)
td = tiles[idx]
print('Tile description {} is: {}'.format(idx, td))
plan.compile(td)
plan.run(tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta, print_module=print_module)<jupyter_output><empty_output><jupyter_text>One can also change the swizzling function used by the kernel. For example, one can modify the kernel to use the stream K feature of CUTLASS via:<jupyter_code># Stream K is exposed through the threadblock swizzle method for pre-SM90 kernels,
# and via the tile_scheduler attribute of the TileDescription for post-SM90 kernels
if plan.cc < 90:
plan.swizzling_functor = cutlass.swizzle.ThreadblockSwizzleStreamK
plan.run(tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta, print_module=print_module)
else:
# Stream-K is currently only supported for warp-specialized cooperative kernels
td.kernel_schedule = cutlass.KernelScheduleType.TmaWarpSpecializedCooperative
td.epilogue_schedule = cutlass.EpilogueScheduleType.TmaWarpSpecializedCooperative
td.tile_scheduler = cutlass.TileSchedulerType.StreamK
plan.compile(td)
plan.run(tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta, print_module=print_module)<jupyter_output><empty_output><jupyter_text>Handling errorsThe CUTLASS Python interface attempts to catch runtime and compilation errors in Python so as to provide more understandable error messages.Here's an example in which we try to use too many stages for a given GEMM kernel. Normally, this would result in a runtime error due to the GPU having insufficient shared memory to launch the kernel with 8 stages. The CUTLASS Python interface is able to detect this issue before compiling the kernel, and reports it back to the user. Uncomment and run the code below to see this error.<jupyter_code># td = tiles[0]
# td.stages = 8
# plan.compile(td)<jupyter_output><empty_output><jupyter_text>Specializations for other data typesVarious CUTLASS kernels specialized for specific data types can also be run via the Python interface.For example, the code below shows how to declare and run a GEMM using the 3xTF32 feature (see corresponding C++ example [here](https://github.com/NVIDIA/cutlass/blob/main/examples/27_ampere_3xtf32_fast_accurate_tensorop_gemm/27_ampere_3xtf32_fast_accurate_tensorop_gemm.cu)).<jupyter_code>from cutlass.backend.utils.device import device_cc
# 3xTF32 requires SM80 or higher
if device_cc() >= 80:
plan = cutlass.op.Gemm(element=np.float32, layout=cutlass.LayoutType.RowMajor)
plan.math_operation = cutlass.MathOperation.multiply_add_fast_f32
# Create input/output tensors in FP32
A, B = [np.ones((128, 128)).astype(np.float32) for _ in range(2)]
C, D = [np.zeros((128, 128)).astype(np.float32) for _ in range(2)]
# Run the GEMM
plan.run(A, B, C, D, print_module=print_module)<jupyter_output><empty_output><jupyter_text>Additionally, one can run CUTLASS's FP8 GEMMs if using a frontend library capable of allocating and initializing FP8 tensors (e.g., PyTorch)<jupyter_code>try:
import torch
except ImportError:
print("PyTorch is not available. Skipping FP8 example")
import sys; sys.exit(0)
if not hasattr(torch, "float8_e4m3fn"):
print("Version of PyTorch does not have the float8_e4m3fn data type. Skipping FP8 example")
import sys; sys.exit(0)
# FP8 is supported through the CUTLASS Python interface on SM90 and higher
if device_cc() >= 90:
plan = cutlass.op.Gemm(element=torch.float8_e4m3fn, element_C=torch.float32, element_accumulator=torch.float32,
layout_A=cutlass.LayoutType.RowMajor, layout_B=cutlass.LayoutType.ColumnMajor,
layout_C=cutlass.LayoutType.ColumnMajor)
# Create input/output tensors in FP8
A, B = [torch.ones((128, 128)).to(torch.float8_e4m3fn).to("cuda") for _ in range(2)]
C, D = [torch.zeros((128, 128)).to(torch.float8_e4m3fn).to("cuda") for _ in range(2)]
# Run the GEMM
plan.run(A, B, C, D, print_module=print_module)<jupyter_output><empty_output> | examples/python/00_basic_gemm.ipynb/0 | {
"file_path": "examples/python/00_basic_gemm.ipynb",
"repo_id": "examples",
"token_count": 3596
} | 15 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/** Common algorithms on (hierarchical) tensors */
#pragma once
#include <cute/config.hpp>
#include <cute/tensor.hpp>
namespace cute
{
//
// for_each
//
template <class Engine, class Layout, class UnaryOp>
CUTE_HOST_DEVICE constexpr
void
for_each(Tensor<Engine,Layout> const& tensor, UnaryOp&& op)
{
CUTE_UNROLL
for (int i = 0; i < size(tensor); ++i) {
op(tensor(i));
}
}
template <class Engine, class Layout, class UnaryOp>
CUTE_HOST_DEVICE constexpr
void
for_each(Tensor<Engine,Layout>& tensor, UnaryOp&& op)
{
CUTE_UNROLL
for (int i = 0; i < size(tensor); ++i) {
op(tensor(i));
}
}
// Accept mutable temporaries
template <class Engine, class Layout, class UnaryOp>
CUTE_HOST_DEVICE constexpr
void
for_each(Tensor<Engine,Layout>&& tensor, UnaryOp&& op)
{
return for_each(tensor, op);
}
//
// transform
//
// Similar to std::transform but does not return number of elements affected
template <class Engine, class Layout, class UnaryOp>
CUTE_HOST_DEVICE constexpr
void
transform(Tensor<Engine,Layout>& tensor, UnaryOp&& op)
{
CUTE_UNROLL
for (int i = 0; i < size(tensor); ++i) {
tensor(i) = op(tensor(i));
}
}
// Accept mutable temporaries
template <class Engine, class Layout, class UnaryOp>
CUTE_HOST_DEVICE constexpr
void
transform(Tensor<Engine,Layout>&& tensor, UnaryOp&& op)
{
return transform(tensor, op);
}
// Similar to std::transform transforms one tensors and assigns it to another
template <class EngineIn, class LayoutIn,
class EngineOut, class LayoutOut,
class UnaryOp>
CUTE_HOST_DEVICE constexpr
void
transform(Tensor<EngineIn, LayoutIn > const& tensor_in,
Tensor<EngineOut,LayoutOut> & tensor_out,
UnaryOp&& op)
{
CUTE_UNROLL
for (int i = 0; i < size(tensor_in); ++i) {
tensor_out(i) = op(tensor_in(i));
}
}
// Accept mutable temporaries
template <class EngineIn, class LayoutIn,
class EngineOut, class LayoutOut,
class UnaryOp>
CUTE_HOST_DEVICE constexpr
void
transform(Tensor<EngineIn, LayoutIn > const& tensor_in,
Tensor<EngineOut,LayoutOut> && tensor_out,
UnaryOp&& op)
{
return transform(tensor_in, tensor_out, op);
}
// Similar to std::transform with a binary operation
// Takes two tensors as input and one tensor as output.
// Applies the binary_op to tensor_in1 and tensor_in2 and
// assigns it to tensor_out
template <class EngineIn1, class LayoutIn1,
class EngineIn2, class LayoutIn2,
class EngineOut, class LayoutOut,
class BinaryOp>
CUTE_HOST_DEVICE constexpr
void
transform(Tensor<EngineIn1,LayoutIn1> const& tensor_in1,
Tensor<EngineIn2,LayoutIn2> const& tensor_in2,
Tensor<EngineOut,LayoutOut> & tensor_out,
BinaryOp&& op)
{
CUTE_UNROLL
for (int i = 0; i < size(tensor_in1); ++i) {
tensor_out(i) = op(tensor_in1(i), tensor_in2(i));
}
}
// Accept mutable temporaries
template <class EngineIn1, class LayoutIn1,
class EngineIn2, class LayoutIn2,
class EngineOut, class LayoutOut,
class BinaryOp>
CUTE_HOST_DEVICE constexpr
void
transform(Tensor<EngineIn1,LayoutIn1> const& tensor_in1,
Tensor<EngineIn2,LayoutIn2> const& tensor_in2,
Tensor<EngineOut,LayoutOut> && tensor_out,
BinaryOp&& op)
{
return transform(tensor_in1, tensor_in2, tensor_out, op);
}
} // end namespace cute
| include/cute/algorithm/tensor_algorithms.hpp/0 | {
"file_path": "include/cute/algorithm/tensor_algorithms.hpp",
"repo_id": "include",
"token_count": 1853
} | 16 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/arch/mma.hpp>
// Config
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 900) && defined(__CUDA_ARCH_FEAT_SM90_ALL))
# define CUTE_ARCH_MMA_SM90A_ENABLED
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
namespace cute {
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA Descriptor and utilities
// GMMA enums and utilities
namespace GMMA
{
enum class LayoutType : uint8_t {
INTERLEAVE = 0,
B128 = 1,
B64 = 2,
B32 = 3,
};
CUTE_HOST_DEVICE char const* to_string(LayoutType const& t) {
switch (t) {
case LayoutType::INTERLEAVE: return "INTERLEAVE";
case LayoutType::B128: return "B128";
case LayoutType::B64: return "B64";
case LayoutType::B32: return "B32";
}
return nullptr;
}
#if !defined(__CUDACC_RTC__)
// Output operator for all enums in this namespace
CUTE_HOST std::ostream& operator<<(std::ostream& os, LayoutType const& t) {
char const* s = to_string(t);
if (s) {
std::operator<<(os, s); // Explicit call to avoid ambiguity
} else {
os.setstate(std::ios_base::failbit);
}
return os;
}
#endif // !defined(__CUDACC_RTC__)
} // end namespace GMMA
union GmmaDescriptor
{
CUTE_HOST_DEVICE constexpr
GmmaDescriptor() noexcept : desc_(0) {}
CUTE_HOST_DEVICE constexpr
GmmaDescriptor(uint64_t desc) noexcept : desc_(desc) {}
CUTE_HOST_DEVICE constexpr
GmmaDescriptor(GmmaDescriptor const& t) noexcept : desc_(t.desc_) {}
CUTE_HOST_DEVICE constexpr
GmmaDescriptor(GmmaDescriptor && t) noexcept : desc_(t.desc_) {}
CUTE_HOST_DEVICE constexpr
GmmaDescriptor& operator=(GmmaDescriptor const& t) noexcept {
desc_ = t.desc_;
return *this;
}
CUTE_HOST_DEVICE constexpr
GmmaDescriptor& operator=(GmmaDescriptor && t) noexcept {
desc_ = t.desc_;
return *this;
}
uint64_t desc_;
uint32_t reg32_[2];
uint16_t reg16_[4];
// Bitfield implementation avoids the need for shifts in assignment
struct {
// start_address, bit [0,14), 4LSB not included
uint16_t start_address_ : 14, : 2; // 14 bits [0,14), 2 bits unused
// leading dimension byte offset, bit [16,30), 4LSB not included
// For N: This is the stride from the first col to the second col of the 8x2 brick in INTERLEAVED
// Unused for all SWIZZLE_* layouts (and assumed to be 1)
// For T: This is the stride from the first 8 rows to the next 8 rows.
uint16_t leading_byte_offset_ : 14, : 2; // 14 bits [0,14), 2 bits unused
// stride dimension byte offset, bit [32,46), 4LSB not included
// For N: This is the stride from the first 8 rows to the next 8 rows.
// For T: This is the stride fro mthe first 8 cols to the next 8 cols.
uint16_t stride_byte_offset_ : 14, : 2; // 14 bits [0,14), 2 bits unused
// base_offset, bit [49,52)
// Valid only for SWIZZLE_128B and SWIZZLE_64B
uint8_t : 1, base_offset_ : 3, : 4; // 1 bit unused, 3 bits [1,4), 4 bits unused
// layout type, bit [62,64)
// SWIZZLE_NONE = 0, SWIZZLE_32B = 3, SWIZZLE_64B = 2, SWIZZLE_128B = 1
uint8_t : 6, layout_type_ : 2; // 6 bits unused, 2 bits [6,8)
} bitfield;
// Decay to a uint64_t
CUTE_HOST_DEVICE constexpr
operator uint64_t() const noexcept { return desc_; }
// Printer
CUTE_HOST_DEVICE friend void print(GmmaDescriptor const& t)
{
#if !defined(__CUDACC_RTC__)
printf("GmmaDescriptor: 0x%016llx\n", static_cast<unsigned long long>(t.desc_));
printf(" start_addr : 0x%04x\n", t.bitfield.start_address_);
printf(" leading_off: 0x%04x (%d)\n", t.bitfield.leading_byte_offset_, t.bitfield.leading_byte_offset_);
printf(" stride_off : 0x%04x (%d)\n", t.bitfield.stride_byte_offset_, t.bitfield.stride_byte_offset_);
printf(" base_offset: 0x%01x\n", t.bitfield.base_offset_);
printf(" layout_type: 0x%01x (%s)\n", t.bitfield.layout_type_, to_string(static_cast<GMMA::LayoutType>(t.bitfield.layout_type_)));
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cute
////////////////////////////////////////////////////////////////////////////////////////////////////
| include/cute/arch/mma_sm90_desc.hpp/0 | {
"file_path": "include/cute/arch/mma_sm90_desc.hpp",
"repo_id": "include",
"token_count": 2093
} | 17 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/container/tuple.hpp>
#include <cute/numeric/integral_constant.hpp>
#include <cute/algorithm/functional.hpp>
#include <cute/algorithm/tuple_algorithms.hpp>
#include <cute/util/type_traits.hpp>
namespace cute
{
template <class... T>
struct ArithmeticTuple : tuple<T...>
{
template <class... U>
CUTE_HOST_DEVICE constexpr
ArithmeticTuple(ArithmeticTuple<U...> const& u)
: tuple<T...>(static_cast<tuple<U...> const&>(u)) {}
template <class... U>
CUTE_HOST_DEVICE constexpr
ArithmeticTuple(tuple<U...> const& u)
: tuple<T...>(u) {}
template <class... U>
CUTE_HOST_DEVICE constexpr
ArithmeticTuple(U const&... u)
: tuple<T...>(u...) {}
};
template <class... T>
struct is_tuple<ArithmeticTuple<T...>> : true_type {};
template <class... Ts>
struct is_flat<ArithmeticTuple<Ts...>> : is_flat<tuple<Ts...>> {};
template <class... T>
CUTE_HOST_DEVICE constexpr
auto
make_arithmetic_tuple(T const&... t) {
return ArithmeticTuple<T...>(t...);
}
template <class T>
CUTE_HOST_DEVICE constexpr
auto
as_arithmetic_tuple(T const& t) {
if constexpr (is_tuple<T>::value) {
return detail::tapply(t, [](auto const& x){ return as_arithmetic_tuple(x); },
[](auto const&... a){ return make_arithmetic_tuple(a...); },
tuple_seq<T>{});
} else {
return t;
}
}
//
// Numeric operators
//
// Addition
template <class... T, class... U>
CUTE_HOST_DEVICE constexpr
auto
operator+(ArithmeticTuple<T...> const& t, ArithmeticTuple<U...> const& u) {
constexpr int R = cute::max(int(sizeof...(T)), int(sizeof...(U)));
return transform_apply(append<R>(t,Int<0>{}), append<R>(u,Int<0>{}), plus{}, [](auto const&... a){ return make_arithmetic_tuple(a...); });
}
template <class... T, class... U>
CUTE_HOST_DEVICE constexpr
auto
operator+(ArithmeticTuple<T...> const& t, tuple<U...> const& u) {
return t + ArithmeticTuple<U...>(u);
}
template <class... T, class... U>
CUTE_HOST_DEVICE constexpr
auto
operator+(tuple<T...> const& t, ArithmeticTuple<U...> const& u) {
return ArithmeticTuple<T...>(t) + u;
}
// Subtraction
template <class... T, class... U>
CUTE_HOST_DEVICE constexpr
auto
operator-(ArithmeticTuple<T...> const& t, ArithmeticTuple<U...> const& u) {
constexpr int R = cute::max(int(sizeof...(T)), int(sizeof...(U)));
return transform_apply(append<R>(t,Int<0>{}), append<R>(u,Int<0>{}), minus{}, [](auto const&... a){ return make_arithmetic_tuple(a...); });
}
template <class... T, class... U>
CUTE_HOST_DEVICE constexpr
auto
operator-(ArithmeticTuple<T...> const& t, tuple<U...> const& u) {
return t - ArithmeticTuple<U...>(u);
}
template <class... T, class... U>
CUTE_HOST_DEVICE constexpr
auto
operator-(tuple<T...> const& t, ArithmeticTuple<U...> const& u) {
return ArithmeticTuple<T...>(t) - u;
}
// Negation
template <class... T>
CUTE_HOST_DEVICE constexpr
auto
operator-(ArithmeticTuple<T...> const& t) {
return transform_apply(t, negate{}, [](auto const&... a){ return make_arithmetic_tuple(a...); });
}
//
// Special cases
//
template <auto t, class... U>
CUTE_HOST_DEVICE constexpr
ArithmeticTuple<U...> const&
operator+(C<t>, ArithmeticTuple<U...> const& u) {
static_assert(t == 0, "Arithmetic tuple op+ error!");
return u;
}
template <class... T, auto u>
CUTE_HOST_DEVICE constexpr
ArithmeticTuple<T...> const&
operator+(ArithmeticTuple<T...> const& t, C<u>) {
static_assert(u == 0, "Arithmetic tuple op+ error!");
return t;
}
template <auto t, class... U>
CUTE_HOST_DEVICE constexpr
ArithmeticTuple<U...> const&
operator-(C<t>, ArithmeticTuple<U...> const& u) {
static_assert(t == 0, "Arithmetic tuple op- error!");
return -u;
}
template <class... T, auto u>
CUTE_HOST_DEVICE constexpr
ArithmeticTuple<T...> const&
operator-(ArithmeticTuple<T...> const& t, C<u>) {
static_assert(u == 0, "Arithmetic tuple op- error!");
return t;
}
//
// ArithmeticTupleIterator
//
template <class ArithTuple>
struct ArithmeticTupleIterator
{
using value_type = ArithTuple;
using element_type = ArithTuple;
using reference = ArithTuple;
ArithTuple coord_;
CUTE_HOST_DEVICE constexpr
ArithmeticTupleIterator(ArithTuple const& coord = {}) : coord_(coord) {}
CUTE_HOST_DEVICE constexpr
ArithTuple const& operator*() const { return coord_; }
template <class Coord>
CUTE_HOST_DEVICE constexpr
auto operator[](Coord const& c) const { return *(*this + c); }
template <class Coord>
CUTE_HOST_DEVICE constexpr
auto operator+(Coord const& c) const {
return ArithmeticTupleIterator<decltype(coord_ + c)>(coord_ + c);
}
};
template <class Tuple>
CUTE_HOST_DEVICE constexpr
auto
make_inttuple_iter(Tuple const& t) {
return ArithmeticTupleIterator(as_arithmetic_tuple(t));
}
template <class T0, class T1, class... Ts>
CUTE_HOST_DEVICE constexpr
auto
make_inttuple_iter(T0 const& t0, T1 const& t1, Ts const&... ts) {
return make_inttuple_iter(cute::make_tuple(t0, t1, ts...));
}
//
// ArithmeticTuple "basis" elements
// A ScaledBasis<T,N> is a (at least) rank-N+1 ArithmeticTuple:
// (_0,_0,...,T,_0,...)
// with value T in the Nth mode
template <class T, int N>
struct ScaledBasis : private tuple<T>
{
CUTE_HOST_DEVICE constexpr
ScaledBasis(T const& t = {}) : tuple<T>(t) {}
CUTE_HOST_DEVICE constexpr
decltype(auto) value() { return get<0>(static_cast<tuple<T> &>(*this)); }
CUTE_HOST_DEVICE constexpr
decltype(auto) value() const { return get<0>(static_cast<tuple<T> const&>(*this)); }
CUTE_HOST_DEVICE static constexpr
auto mode() { return Int<N>{}; }
};
template <class T>
struct is_scaled_basis : false_type {};
template <class T, int N>
struct is_scaled_basis<ScaledBasis<T,N>> : true_type {};
template <class T, int N>
struct is_integral<ScaledBasis<T,N>> : true_type {};
// Get the scalar T out of a ScaledBasis
template <class SB>
CUTE_HOST_DEVICE constexpr auto
basis_value(SB const& e)
{
if constexpr (is_scaled_basis<SB>::value) {
return basis_value(e.value());
} else {
return e;
}
CUTE_GCC_UNREACHABLE;
}
// Apply the N... pack to another Tuple
template <class SB, class Tuple>
CUTE_HOST_DEVICE constexpr auto
basis_get(SB const& e, Tuple const& t)
{
if constexpr (is_scaled_basis<SB>::value) {
return basis_get(e.value(), get<SB::mode()>(t));
} else {
return t;
}
CUTE_GCC_UNREACHABLE;
}
namespace detail {
template <class T, int... I>
CUTE_HOST_DEVICE constexpr
auto
to_atuple_i(T const& t, seq<I...>) {
return make_arithmetic_tuple((void(I),Int<0>{})..., t);
}
} // end namespace detail
// Turn a ScaledBases<T,N> into a rank-N+1 ArithmeticTuple
// with N prefix 0s: (_0,_0,...N...,_0,T)
template <class T, int N>
CUTE_HOST_DEVICE constexpr
auto
as_arithmetic_tuple(ScaledBasis<T,N> const& t) {
return detail::to_atuple_i(as_arithmetic_tuple(t.value()), make_seq<N>{});
}
namespace detail {
template <int... Ns>
struct Basis;
template <>
struct Basis<> {
using type = Int<1>;
};
template <int N, int... Ns>
struct Basis<N,Ns...> {
using type = ScaledBasis<typename Basis<Ns...>::type, N>;
};
} // end namespace detail
// Shortcut for writing ScaledBasis<ScaledBasis<ScaledBasis<Int<1>, N0>, N1>, ...>
// E<> := _1
// E<0> := (_1,_0,_0,...)
// E<1> := (_0,_1,_0,...)
// E<0,0> := ((_1,_0,_0,...),_0,_0,...)
// E<0,1> := ((_0,_1,_0,...),_0,_0,...)
// E<1,0> := (_0,(_1,_0,_0,...),_0,...)
// E<1,1> := (_0,(_0,_1,_0,...),_0,...)
template <int... N>
using E = typename detail::Basis<N...>::type;
template <class Shape>
CUTE_HOST_DEVICE constexpr
auto
make_basis_like(Shape const& shape)
{
if constexpr (is_integral<Shape>::value) {
return Int<1>{};
} else {
// Generate bases for each rank of shape
return transform(tuple_seq<Shape>{}, shape, [](auto I, auto si) {
// Generate bases for each rank of si and add an i on front
using I_type = decltype(I);
return transform_leaf(make_basis_like(si), [](auto e) {
// MSVC has trouble capturing variables as constexpr,
// so that they can be used as template arguments.
// This is exactly what the code needs to do with i, unfortunately.
// The work-around is to define i inside the inner lambda,
// by using just the type from the enclosing scope.
constexpr int i = I_type::value;
return ScaledBasis<decltype(e), i>{};
});
});
}
CUTE_GCC_UNREACHABLE;
}
//
// Arithmetic
//
template <class T, int M, class U>
CUTE_HOST_DEVICE constexpr
auto
safe_div(ScaledBasis<T,M> const& b, U const& u)
{
auto t = safe_div(b.value(), u);
return ScaledBasis<decltype(t),M>{t};
}
template <class T, int M, class U>
CUTE_HOST_DEVICE constexpr
auto
shape_div(ScaledBasis<T,M> const& b, U const& u)
{
auto t = shape_div(b.value(), u);
return ScaledBasis<decltype(t),M>{t};
}
// Equality
template <class T, int N, class U, int M>
CUTE_HOST_DEVICE constexpr
auto
operator==(ScaledBasis<T,N> const& t, ScaledBasis<U,M> const& u) {
return bool_constant<M == N>{} && t.value() == u.value();
}
// Not equal to anything else
template <class T, int N, class U>
CUTE_HOST_DEVICE constexpr
false_type
operator==(ScaledBasis<T,N> const&, U const&) {
return {};
}
template <class T, class U, int M>
CUTE_HOST_DEVICE constexpr
false_type
operator==(T const&, ScaledBasis<U,M> const&) {
return {};
}
// Abs
template <class T, int N>
CUTE_HOST_DEVICE constexpr
auto
abs(ScaledBasis<T,N> const& e) {
return ScaledBasis<decltype(abs(e.value())),N>{abs(e.value())};
}
// Multiplication
template <class A, class T, int N>
CUTE_HOST_DEVICE constexpr
auto
operator*(A const& a, ScaledBasis<T,N> const& e) {
auto r = a * e.value();
return ScaledBasis<decltype(r),N>{r};
}
template <class T, int N, class B>
CUTE_HOST_DEVICE constexpr
auto
operator*(ScaledBasis<T,N> const& e, B const& b) {
auto r = e.value() * b;
return ScaledBasis<decltype(r),N>{r};
}
// Addition
template <class T, int N, class U, int M>
CUTE_HOST_DEVICE constexpr
auto
operator+(ScaledBasis<T,N> const& t, ScaledBasis<U,M> const& u) {
return as_arithmetic_tuple(t) + as_arithmetic_tuple(u);
}
template <class T, int N, class... U>
CUTE_HOST_DEVICE constexpr
auto
operator+(ScaledBasis<T,N> const& t, ArithmeticTuple<U...> const& u) {
return as_arithmetic_tuple(t) + u;
}
template <class... T, class U, int M>
CUTE_HOST_DEVICE constexpr
auto
operator+(ArithmeticTuple<T...> const& t, ScaledBasis<U,M> const& u) {
return t + as_arithmetic_tuple(u);
}
template <auto t, class U, int M>
CUTE_HOST_DEVICE constexpr
auto
operator+(C<t>, ScaledBasis<U,M> const& u) {
static_assert(t == 0, "ScaledBasis op+ error!");
return u;
}
template <class T, int N, auto u>
CUTE_HOST_DEVICE constexpr
auto
operator+(ScaledBasis<T,N> const& t, C<u>) {
static_assert(u == 0, "ScaledBasis op+ error!");
return t;
}
//
// Display utilities
//
template <class ArithTuple>
CUTE_HOST_DEVICE void print(ArithmeticTupleIterator<ArithTuple> const& iter)
{
printf("ArithTuple"); print(iter.coord_);
}
template <class T, int N>
CUTE_HOST_DEVICE void print(ScaledBasis<T,N> const& e)
{
print(e.value()); printf("@%d", N);
}
#if !defined(__CUDACC_RTC__)
template <class ArithTuple>
CUTE_HOST std::ostream& operator<<(std::ostream& os, ArithmeticTupleIterator<ArithTuple> const& iter)
{
return os << "ArithTuple" << iter.coord_;
}
template <class T, int N>
CUTE_HOST std::ostream& operator<<(std::ostream& os, ScaledBasis<T,N> const& e)
{
return os << e.value() << "@" << N;
}
#endif
} // end namespace cute
namespace CUTE_STL_NAMESPACE
{
template <class... T>
struct tuple_size<cute::ArithmeticTuple<T...>>
: CUTE_STL_NAMESPACE::integral_constant<size_t, sizeof...(T)>
{};
template <size_t I, class... T>
struct tuple_element<I, cute::ArithmeticTuple<T...>>
: CUTE_STL_NAMESPACE::tuple_element<I, CUTE_STL_NAMESPACE::tuple<T...>>
{};
template <class... T>
struct tuple_size<const cute::ArithmeticTuple<T...>>
: CUTE_STL_NAMESPACE::integral_constant<size_t, sizeof...(T)>
{};
template <size_t I, class... T>
struct tuple_element<I, const cute::ArithmeticTuple<T...>>
: CUTE_STL_NAMESPACE::tuple_element<I, const CUTE_STL_NAMESPACE::tuple<T...>>
{};
} // end namespace CUTE_STL_NAMESPACE
#ifdef CUTE_STL_NAMESPACE_IS_CUDA_STD
namespace std
{
#if defined(__CUDACC_RTC__)
template <class... _Tp>
struct tuple_size;
template <size_t _Ip, class... _Tp>
struct tuple_element;
#endif
template <class... T>
struct tuple_size<cute::ArithmeticTuple<T...>>
: CUTE_STL_NAMESPACE::integral_constant<size_t, sizeof...(T)>
{};
template <size_t I, class... T>
struct tuple_element<I, cute::ArithmeticTuple<T...>>
: CUTE_STL_NAMESPACE::tuple_element<I, CUTE_STL_NAMESPACE::tuple<T...>>
{};
template <class... T>
struct tuple_size<const cute::ArithmeticTuple<T...>>
: CUTE_STL_NAMESPACE::integral_constant<size_t, sizeof...(T)>
{};
template <size_t I, class... T>
struct tuple_element<I, const cute::ArithmeticTuple<T...>>
: CUTE_STL_NAMESPACE::tuple_element<I, const CUTE_STL_NAMESPACE::tuple<T...>>
{};
} // end namespace std
#endif // CUTE_STL_NAMESPACE_IS_CUDA_STD
| include/cute/numeric/arithmetic_tuple.hpp/0 | {
"file_path": "include/cute/numeric/arithmetic_tuple.hpp",
"repo_id": "include",
"token_count": 5832
} | 18 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/util/type_traits.hpp>
#include <cute/numeric/integral_constant.hpp>
#include <cute/numeric/integer_sequence.hpp>
#include <cute/container/tuple.hpp>
#include <cute/container/array_aligned.hpp>
#include <cute/container/array_subbyte.hpp>
#include <cute/pointer.hpp>
#include <cute/layout.hpp>
namespace cute
{
//
// Engine -- owning or non-owning data store
//
// concept Engine {
// using iterator = ;
// using value_type = ;
// using element_type = ;
// using reference = ;
// iterator begin();
// };
template <class T, int N>
struct ArrayEngine
{
using Storage = typename conditional<(sizeof_bits<T>::value % 8 == 0),
array_aligned<T,N>,
array_subbyte<T,N>>::type;
using iterator = typename Storage::iterator;
using reference = typename iterator_traits<iterator>::reference;
using element_type = typename iterator_traits<iterator>::element_type;
using value_type = typename iterator_traits<iterator>::value_type;
Storage storage_;
CUTE_HOST_DEVICE constexpr auto begin() const { return storage_.begin(); }
CUTE_HOST_DEVICE constexpr auto begin() { return storage_.begin(); }
};
template <class Iterator>
struct ViewEngine
{
using iterator = Iterator;
using reference = typename iterator_traits<iterator>::reference;
using element_type = typename iterator_traits<iterator>::element_type;
using value_type = typename iterator_traits<iterator>::value_type;
iterator storage_;
CUTE_HOST_DEVICE constexpr iterator const& begin() const { return storage_; }
CUTE_HOST_DEVICE constexpr iterator & begin() { return storage_; }
};
template <class Iterator>
struct ConstViewEngine
{
using iterator = Iterator;
using reference = typename iterator_traits<iterator>::reference;
using element_type = typename iterator_traits<iterator>::element_type;
using value_type = typename iterator_traits<iterator>::value_type;
iterator storage_;
CUTE_HOST_DEVICE constexpr iterator const& begin() const { return storage_; }
};
//
// Tensor
//
template <class Engine, class Layout>
struct Tensor
{
using iterator = typename Engine::iterator;
using value_type = typename Engine::value_type;
using element_type = typename Engine::element_type;
using reference = typename Engine::reference;
using engine_type = Engine;
using layout_type = Layout;
CUTE_HOST_DEVICE constexpr
Tensor() {}
template <class Ptr>
CUTE_HOST_DEVICE constexpr
Tensor(Ptr const& ptr, Layout const& layout)
: rep_(layout, ptr) {
}
//
// Accessors
//
static constexpr int rank = Layout::rank;
CUTE_HOST_DEVICE constexpr
decltype(auto)
tensor() const {
return *this;
}
CUTE_HOST_DEVICE constexpr
decltype(auto)
layout() const {
return get<0>(rep_);
}
CUTE_HOST_DEVICE constexpr
decltype(auto)
engine() const {
return get<1>(rep_);
}
CUTE_HOST_DEVICE constexpr
decltype(auto)
engine() {
return get<1>(rep_);
}
CUTE_HOST_DEVICE constexpr
decltype(auto)
data() const {
return engine().begin();
}
CUTE_HOST_DEVICE constexpr
decltype(auto)
data() {
return engine().begin();
}
CUTE_HOST_DEVICE constexpr
decltype(auto)
shape() const {
return layout().shape();
}
CUTE_HOST_DEVICE constexpr
auto
size() const {
return cute::size(shape());
}
CUTE_HOST_DEVICE constexpr
decltype(auto)
stride() const {
return layout().stride();
}
//
// Indexing op() and op[]
//
// Index into this tensor like an array by computing the offset via layout()
template <class Coord>
CUTE_HOST_DEVICE constexpr
decltype(auto)
operator[](Coord const& coord) {
return data()[layout()(coord)];
}
template <class Coord>
CUTE_HOST_DEVICE constexpr
decltype(auto)
operator[](Coord const& coord) const {
return data()[layout()(coord)];
}
template <class Coord>
CUTE_HOST_DEVICE constexpr
decltype(auto)
operator()(Coord const& coord) {
if constexpr (has_underscore<Coord>::value) {
auto const& [sliced_layout,offset] = slice_and_offset(coord, layout());
return make_tensor(data() + offset, sliced_layout);
} else {
return data()[layout()(coord)];
}
CUTE_GCC_UNREACHABLE;
}
template <class Coord>
CUTE_HOST_DEVICE constexpr
decltype(auto)
operator()(Coord const& coord) const {
if constexpr (has_underscore<Coord>::value) {
auto const& [sliced_layout,offset] = slice_and_offset(coord, layout());
return make_tensor(data() + offset, sliced_layout);
} else {
return data()[layout()(coord)];
}
CUTE_GCC_UNREACHABLE;
}
// op() convenience function for multi-dimensional coordinates
template <class Coord0, class Coord1, class... Coords>
CUTE_HOST_DEVICE constexpr
decltype(auto)
operator()(Coord0 const& c0, Coord1 const& c1, Coords const&... cs) {
return operator()(make_coord(c0,c1,cs...));
}
template <class Coord0, class Coord1, class... Coords>
CUTE_HOST_DEVICE constexpr
decltype(auto)
operator()(Coord0 const& c0, Coord1 const& c1, Coords const&... cs) const {
return operator()(make_coord(c0,c1,cs...));
}
//
// Compose
//
template <class... Layouts>
CUTE_HOST_DEVICE constexpr
auto
compose(Layouts const&... layouts) {
return make_tensor(data(), layout().compose(layouts...));
}
template <class... Layouts>
CUTE_HOST_DEVICE constexpr
auto
compose(Layouts const&... layouts) const {
return make_tensor(data(), layout().compose(layouts...));
}
//
// Tile
//
template <class... Layouts>
CUTE_HOST_DEVICE constexpr
auto
tile(Layouts const&... layouts) {
return make_tensor(data(), layout().tile(layouts...));
}
template <class... Layouts>
CUTE_HOST_DEVICE constexpr
auto
tile(Layouts const&... layouts) const {
return make_tensor(data(), layout().tile(layouts...));
}
//
// Utility
//
template <class Int,
__CUTE_REQUIRES(is_integral<Int>::value)>
CUTE_HOST_DEVICE constexpr
auto
get_1d_coord(Int const& linear_idx) const {
return layout().get_1d_coord(linear_idx);
}
template <class Int,
__CUTE_REQUIRES(is_integral<Int>::value)>
CUTE_HOST_DEVICE constexpr
auto
get_hier_coord(Int const& linear_idx) const {
return layout().get_hier_coord(linear_idx);
}
template <class Int,
__CUTE_REQUIRES(is_integral<Int>::value)>
CUTE_HOST_DEVICE constexpr
auto
get_flat_coord(Int const& linear_idx) const {
return layout().get_flat_coord(linear_idx);
}
cute::tuple<layout_type, engine_type> rep_;
};
template <class T>
struct is_tensor : false_type {};
template <class Engine, class Layout>
struct is_tensor<Tensor<Engine,Layout>> : true_type {};
template <class T>
constexpr bool is_tensor_v = is_tensor<T>::value;
// Customization point for creation of owning and non-owning Tensors
template <class T>
struct MakeTensor
{
template <class Layout,
__CUTE_REQUIRES(not has_dereference<T>::value &&
is_layout<Layout>::value)>
CUTE_HOST_DEVICE constexpr auto
operator()(Layout const& layout) const
{
static_assert(is_static<Layout>::value, "Dynamic owning tensors not supported");
using Engine = ArrayEngine<T, cosize_v<Layout>>;
return Tensor<Engine,Layout>();
}
template <class Layout,
__CUTE_REQUIRES(has_dereference<T>::value &&
is_layout<Layout>::value)>
CUTE_HOST_DEVICE constexpr auto
operator()(T const& iter, Layout const& layout)
{
using Engine = ViewEngine<T>;
return Tensor<Engine,Layout>(iter, layout);
}
template <class LayoutArg, class... LayoutArgs,
__CUTE_REQUIRES(not is_layout<LayoutArg>::value)>
CUTE_HOST_DEVICE constexpr auto
operator()(LayoutArg const& arg, LayoutArgs const&... args) const
{
return operator()(make_layout(arg, args...));
}
template <class LayoutArg, class... LayoutArgs,
__CUTE_REQUIRES(not is_layout<LayoutArg>::value)>
CUTE_HOST_DEVICE constexpr auto
operator()(T const& iter, LayoutArg const& arg, LayoutArgs const&... args)
{
return operator()(iter, make_layout(arg, args...));
}
};
//
// make_tensor
//
// Make an owning Tensor that will allocate a static array
// e.g. make_tensor<float>(Int<12>{})
template <class T, class... Args>
CUTE_HOST_DEVICE constexpr
auto
make_tensor(Args const&... args)
{
return MakeTensor<T>{}(args...);
}
// Make a non-owning Tensor that will use a pointer (view)
// e.g. make_tensor(vec.data(), 12)
template <class Iterator, class... Args>
CUTE_HOST_DEVICE constexpr
auto
make_tensor(Iterator const& iter, Args const&... args)
{
return MakeTensor<Iterator>{}(iter, args...);
}
//
// make_tensor_like
// Make a register tensor the same type and shape and (if possible) order as another tensor
//
template <class NewT, class Layout>
CUTE_HOST_DEVICE constexpr
auto
make_tensor_like(Layout const& layout)
{
return make_tensor<NewT>(make_layout_like(layout));
}
template <class NewT, class Engine, class Layout>
CUTE_HOST_DEVICE constexpr
auto
make_tensor_like(Tensor<Engine,Layout> const& tensor)
{
return make_tensor_like<NewT>(tensor.layout());
}
template <class Engine, class Layout>
CUTE_HOST_DEVICE constexpr
auto
make_tensor_like(Tensor<Engine,Layout> const& tensor)
{
return make_tensor_like<typename Engine::value_type>(tensor.layout());
}
//
// make_fragment_like --
// Make a tensor the same shape and (if possible) order as another tensor, with special
// consideration of the 0th mode. The 0th mode is commonly used for MMA_Atoms or Copy_Atoms
// so this allocates the 0th mode with LayoutLeft regardless of the reference layout.
//
template <class NewT, class Layout>
CUTE_HOST_DEVICE constexpr
auto
make_fragment_like(Layout const& layout)
{
return make_tensor<NewT>(make_fragment_like(layout));
}
template <class NewT, class Engine, class Layout>
CUTE_HOST_DEVICE constexpr
auto
make_fragment_like(Tensor<Engine,Layout> const& tensor)
{
return make_fragment_like<NewT>(tensor.layout());
}
template <class Engine, class Layout>
CUTE_HOST_DEVICE constexpr
auto
make_fragment_like(Tensor<Engine,Layout> const& tensor)
{
return make_fragment_like<typename Engine::value_type>(tensor.layout());
}
//
// make_counting_tensor
// Make a tensor from a layout by binding it to a counting iter with 0-offset of the same profile as the codomain.
//
template <class Layout, __CUTE_REQUIRES(is_layout<Layout>::value)>
CUTE_HOST_DEVICE constexpr
auto
make_counting_tensor(Layout const& layout)
{
return make_tensor(make_inttuple_iter(repeat_like(coshape(layout), Int<0>{})), layout);
}
//
// make_identity_tensor
// Make a tensor that maps coordinates within a shape to themselves.
//
template <class Shape>
CUTE_HOST_DEVICE constexpr
auto
make_identity_tensor(Shape const& shape)
{
return make_counting_tensor(make_identity_layout(shape));
}
//
// Utilities
//
// Return the subtensor of a mode
template <class Tensor,
__CUTE_REQUIRES(is_tensor<remove_cvref_t<Tensor>>::value)>
CUTE_HOST_DEVICE constexpr
decltype(auto)
tensor(Tensor&& tensor)
{
return static_cast<Tensor&&>(tensor);
}
template <int I, int... Is, class Tensor,
__CUTE_REQUIRES(is_tensor<remove_cvref_t<Tensor>>::value)>
CUTE_HOST_DEVICE constexpr
decltype(auto)
tensor(Tensor&& tensor)
{
return make_tensor(static_cast<Tensor&&>(tensor).data(), get<I,Is...>(tensor.layout()));
}
// Return the layout of a mode
template <int... Is, class Engine, class Layout>
CUTE_HOST_DEVICE constexpr
decltype(auto)
layout(Tensor<Engine,Layout> const& tensor)
{
return layout<Is...>(tensor.layout());
}
// Return the shape of a mode
template <int... Is, class Engine, class Layout>
CUTE_HOST_DEVICE constexpr
decltype(auto)
shape(Tensor<Engine,Layout> const& tensor)
{
return shape<Is...>(tensor.layout());
}
// Return the stride of a mode
template <int... Is, class Engine, class Layout>
CUTE_HOST_DEVICE constexpr
decltype(auto)
stride(Tensor<Engine,Layout> const& tensor)
{
return stride<Is...>(tensor.layout());
}
// Return the number of elements in a mode
template <int... Is, class Engine, class Layout>
CUTE_HOST_DEVICE constexpr
decltype(auto)
size(Tensor<Engine,Layout> const& tensor)
{
return size<Is...>(tensor.layout());
}
// Return the rank of a mode
template <int... Is, class Engine, class Layout>
CUTE_HOST_DEVICE constexpr
auto
rank(Tensor<Engine,Layout> const& tensor)
{
return rank<Is...>(tensor.layout());
}
// Return the depth of a mode
template <int... Is, class Engine, class Layout>
CUTE_HOST_DEVICE constexpr
auto
depth(Tensor<Engine, Layout> const& tensor)
{
return depth<Is...>(tensor.layout());
}
//
// Operations to manipulate Tensors like a Layout
//
template <class Tensor,
__CUTE_REQUIRES(is_tensor<remove_cvref_t<Tensor>>::value)>
CUTE_HOST_DEVICE constexpr
auto
flatten(Tensor&& tensor)
{
return make_tensor(static_cast<Tensor&&>(tensor).data(), flatten(tensor.layout()));
}
template <class Tensor,
__CUTE_REQUIRES(is_tensor<remove_cvref_t<Tensor>>::value)>
CUTE_HOST_DEVICE constexpr
auto
coalesce(Tensor&& tensor)
{
return make_tensor(static_cast<Tensor&&>(tensor).data(), coalesce(tensor.layout()));
}
template <class Tensor, class Profile,
__CUTE_REQUIRES(is_tensor<remove_cvref_t<Tensor>>::value)>
CUTE_HOST_DEVICE constexpr
auto
coalesce(Tensor&& tensor, Profile const& profile)
{
return make_tensor(static_cast<Tensor&&>(tensor).data(), coalesce(tensor.layout(), profile));
}
template <class Tensor,
__CUTE_REQUIRES(is_tensor<remove_cvref_t<Tensor>>::value)>
CUTE_HOST_DEVICE constexpr
auto
filter_zeros(Tensor&& tensor)
{
return make_tensor(static_cast<Tensor&&>(tensor).data(), filter_zeros(tensor.layout()));
}
template <class Tensor,
__CUTE_REQUIRES(is_tensor<remove_cvref_t<Tensor>>::value)>
CUTE_HOST_DEVICE constexpr
auto
filter(Tensor&& tensor)
{
return make_tensor(static_cast<Tensor&&>(tensor).data(), filter(tensor.layout()));
}
template <class Tensor, class Profile,
__CUTE_REQUIRES(is_tensor<remove_cvref_t<Tensor>>::value)>
CUTE_HOST_DEVICE constexpr
auto
filter(Tensor&& tensor, Profile const& profile)
{
return make_tensor(static_cast<Tensor&&>(tensor).data(), filter(tensor.layout(), profile));
}
// Return a tensor with the same shape as input but offset by a given coordinate
template <class Coord, class Tensor,
__CUTE_REQUIRES(is_tensor<remove_cvref_t<Tensor>>::value)>
CUTE_HOST_DEVICE constexpr
auto
domain_offset(Coord const& coord, Tensor&& tensor)
{
auto [layout, ptr_offset] = domain_offset(coord, tensor.layout());
return make_tensor(static_cast<Tensor&&>(tensor).data() + ptr_offset, layout);
}
// Group the modes [B,E) into a single mode
// e.g. group<2,4>(make_tensor<int>(Layout<Shape<_1,_2,_3,_4,_5,_6>>{}))
// => make_tensor<int>(Layout<Shape<_1,_2,Shape<_3,_4>,_5,_6>>{})
template <int B, int E, class Tensor,
__CUTE_REQUIRES(is_tensor<remove_cvref_t<Tensor>>::value)>
CUTE_HOST_DEVICE constexpr
auto
group_modes(Tensor&& tensor)
{
return make_tensor(static_cast<Tensor&&>(tensor).data(),
group<B,E>(tensor.layout()));
}
// Return the subtensor of a range of modes
template <int B, int E, class Tensor,
__CUTE_REQUIRES(is_tensor<remove_cvref_t<Tensor>>::value)>
CUTE_HOST_DEVICE constexpr
decltype(auto)
take(Tensor&& tensor)
{
return make_tensor(static_cast<Tensor&&>(tensor).data(), take<B,E>(tensor.layout()));
}
//
// Recast
//
// NOTE: This is very dangerous to do
// -- doesn't check dynamic integer divisibility
// -- doesn't check alignment
template <class NewType, class Tensor>
CUTE_HOST_DEVICE constexpr
auto
recast(Tensor&& tensor)
{
using OldType = typename remove_cvref_t<Tensor>::value_type;
auto old_layout = tensor.layout();
auto new_layout = recast_layout<OldType,NewType>(old_layout);
// If this is an upcast of a normal Layout with static negative strides, then offset as well
if constexpr (sizeof(OldType) < sizeof(NewType) && not is_composed_layout<decltype(old_layout)>::value) {
auto shape_diff = transform(flatten(old_layout.shape()), flatten(new_layout.shape()), minus{});
auto extent_diff = transform(shape_diff, flatten(old_layout.stride()), multiplies{});
auto offset = fold(extent_diff, Int<0>{}, [](auto const& i, auto const& a) { return i + cute::min(a,Int<0>{}); });
return make_tensor(recast_ptr<NewType>(static_cast<Tensor&&>(tensor).data() + offset), new_layout);
} else {
return make_tensor(recast_ptr<NewType>(static_cast<Tensor&&>(tensor).data() ), new_layout);
}
CUTE_GCC_UNREACHABLE;
}
//
// max_common_vector
//
/* Return Int<N> such that N is the maximum number of contiguous elements
* that logically correspond in the tensors of @a a and @a b. This is,
* the number of elements that could reasonably be vectorized into a single load/store.
*
* @returns Int<N> with N >= 0
*
* A return value of Int<0> indicates that no such conclusion can be made and no
* vectorization should be attempted.
*
* Note that the return value does NOT include alignment concerns such as the pointer value and
* the divisbility of dynamic strides.
*/
template <class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE constexpr
auto
max_common_vector(Tensor<SrcEngine,SrcLayout> const& a,
Tensor<DstEngine,DstLayout> const& b)
{
using SrcType = typename Tensor<SrcEngine,SrcLayout>::value_type;
using DstType = typename Tensor<DstEngine,DstLayout>::value_type;
using SrcRef = typename Tensor<SrcEngine,SrcLayout>::reference;
using DstRef = typename Tensor<SrcEngine,SrcLayout>::reference;
// Determine if vectorization candidates at all
if constexpr (// Should be the same value_types, else the copy is also performing a cast
sizeof_bits_v<SrcType> == sizeof_bits_v<DstType> &&
// The types should be trivially copyable so that vectorization is valid
is_trivially_copyable<SrcType>::value &&
is_trivially_copyable<DstType>::value &&
// Should be load/storing real data, rather than implicit iterators or such
is_reference<SrcRef>::value &&
is_reference<DstRef>::value)
{
return max_common_vector(a.layout(), b.layout());
} else {
return Int<0>{};
}
CUTE_GCC_UNREACHABLE;
}
/* Return a layout that points to the maximum number of contiguous elements
* that logically correspond in the tensors of @a a and @a b. This is,
* the elements that could reasonably be "vectorized" into a single load/store.
*
* @returns Layout R such that composition(a.layout(), R) and composition(b.layout(), R)
* are both identity Layouts.
*
* Note that the returned layout does NOT include alignment concerns such as the pointer value and
* the divisbility of dynamic strides.
*/
template <class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE constexpr
auto
max_common_layout(Tensor<SrcEngine,SrcLayout> const& a,
Tensor<DstEngine,DstLayout> const& b)
{
using SrcType = typename Tensor<SrcEngine,SrcLayout>::value_type;
using DstType = typename Tensor<DstEngine,DstLayout>::value_type;
using SrcRef = typename Tensor<SrcEngine,SrcLayout>::reference;
using DstRef = typename Tensor<SrcEngine,SrcLayout>::reference;
// Determine if vectorization candidates at all
if constexpr (// Should be the same value_types, else the copy is also performing a cast
sizeof_bits_v<SrcType> == sizeof_bits_v<DstType> &&
// The types should be trivially copyable so that vectorization is valid
is_trivially_copyable<SrcType>::value &&
is_trivially_copyable<DstType>::value &&
// Should be load/storing real data, rather than implicit iterators or such
is_reference<SrcRef>::value &&
is_reference<DstRef>::value)
{
return max_common_layout(a.layout(), b.layout());
} else {
return Layout<_1,_0>{};
}
CUTE_GCC_UNREACHABLE;
}
//
// Key algebraic operations -- Divide and Product
//
// Apply a Tiler to the Tensor.
//
// Consider a Tensor with shape (A,B,x,y)
// And a Tiler that is:
//
// * A Layout with shape (BLK_A,BLK_B)
// ** Result Tensor shape ((BLK_A,BLK_B),Rest).
// ** That is, the Tensor and Tile are treated as 1D for the tiling.
// ** See logical_divide(Layout,Layout)
//
// * A Tile<Layout...> with shape <BLK_A,BLK_B>
// ** Result Tensor shape ((BLK_A,a),(BLK_B,b),x,y).
// ** Each mode of the Tile<Layout...> is applied to the corresponding mode of the Tensor.
// ** See logical_divide(Layout,Tuple)
//
// * A Shape (BLK_A,BLK_B)
// ** Result Tensor shape ((BLK_A,a),(BLK_B,b),x,y).
// ** Equivalent to applying Tile<BLK_A:_1,BLK_B:_1>.
// ** See logical_divide(Layout,Tuple) and logical_divide(Layout,Int)
//
// Note that the Tile<Layout...>/Shape Tilers must be weakly_congruent to the Tensor
template <class Tensor, class Tiler,
__CUTE_REQUIRES(is_tensor<remove_cvref_t<Tensor>>::value)>
CUTE_HOST_DEVICE constexpr
auto
logical_divide(Tensor && tensor,
Tiler const& tiler) // Layout or Tile<Layout...> or Shape
{
return make_tensor(static_cast<Tensor&&>(tensor).data(),
logical_divide(tensor.layout(), tiler));
}
// zipped_divide is logical_divide with Tiler modes and Rest modes gathered together: (Tiler,Rest)
// When Tiler is Layout, this has no effect as logical_divide results in the same.
// When Tiler is Tile<Layout...> or Shape, this zips modes into standard form ((BLK_A,BLK_B),(a,b,x,y))
template <class Tensor, class Tiler,
__CUTE_REQUIRES(is_tensor<remove_cvref_t<Tensor>>::value)>
CUTE_HOST_DEVICE constexpr
auto
zipped_divide(Tensor && tensor,
Tiler const& tiler) // Layout or Tile<Layout...> or Shape
{
return make_tensor(static_cast<Tensor&&>(tensor).data(),
zipped_divide(tensor.layout(), tiler));
}
// tiled_divide is zipped_divide with the second output mode flattened ((BLK_A,BLK_B),a,b,x,y)
template <class Tensor, class Tiler,
__CUTE_REQUIRES(is_tensor<remove_cvref_t<Tensor>>::value)>
CUTE_HOST_DEVICE constexpr
auto
tiled_divide(Tensor && tensor,
Tiler const& tiler) // Layout or Tile<Layout...> or Shape
{
return make_tensor(static_cast<Tensor&&>(tensor).data(),
tiled_divide(tensor.layout(), tiler));
}
// flat_divide is zipped_divide with the both modes flattened (BLK_A,BLK_B,a,b,x,y)
template <class Tensor, class Tiler,
__CUTE_REQUIRES(is_tensor<remove_cvref_t<Tensor>>::value)>
CUTE_HOST_DEVICE constexpr
auto
flat_divide(Tensor && tensor,
Tiler const& tiler) // Layout or Tile<Layout...> or Shape
{
return make_tensor(static_cast<Tensor&&>(tensor).data(),
flat_divide(tensor.layout(), tiler));
}
// logical_product on a Tensor doesn't make sense since it often increases cosize
// though this might make sense for creating Tensors with broadcasted (stride-0) modes
//
// Tensor partitioning utilities
//
// Apply a Tiler to the Tensor, then slice out one of those tiles by slicing into the "Rest" modes.
// With an inner_partition, you get everything that's inside the Tiler. Everything that the Tiler is pointing to.
// Split the modes of tensor according to the Tiler
// zipped_divide returns something like ((BLK_A,BLK_B,...),(a,b,...,x,y))
// Then slice into the second mode (the "Rest" mode) with Coord
template <class Tensor, class Tiler, class Coord,
__CUTE_REQUIRES(is_tensor<remove_cvref_t<Tensor>>::value)>
CUTE_HOST_DEVICE constexpr
auto
inner_partition(Tensor && tensor,
Tiler const& tiler,
Coord const& coord)
{
auto tensor_tiled = zipped_divide(static_cast<Tensor&&>(tensor), tiler);
constexpr int R0 = decltype(rank<0>(tensor_tiled))::value;
// The coord slices into the second mode (the "rest" mode), flatten the first
if constexpr (is_tuple<Coord>::value) {
// Append trailing modes if coord is tuple
constexpr int R1 = decltype(rank<1>(tensor_tiled))::value;;
return tensor_tiled(repeat<R0>(_), append<R1>(coord,_));
} else {
// Flat indexing if coord is not tuple
return tensor_tiled(repeat<R0>(_), coord);
}
}
// Apply a Tiler to the Tensor, then slice out the remainder by slicing into the "Tile" modes.
// With an outer_partition, you get everything that's outside the Tiler. The layout of the Tile in the Tensor.
// Split the modes of tensor according to the Tiler
// zipped_divide returns something like ((BLK_A,BLK_B,...),(a,b,...,x,y))
// Then slice into the first mode (the "Tile" mode) with Coord
template <class Tensor, class Tiler, class Coord,
__CUTE_REQUIRES(is_tensor<remove_cvref_t<Tensor>>::value)>
CUTE_HOST_DEVICE constexpr
auto
outer_partition(Tensor && tensor,
Tiler const& tiler,
Coord const& coord)
{
auto tensor_tiled = zipped_divide(static_cast<Tensor&&>(tensor), tiler);
constexpr int R1 = decltype(rank<1>(tensor_tiled))::value;
// The coord slices into the first mode (the "tile" mode), flatten the second
if constexpr (is_tuple<Coord>::value) {
// Append trailing modes if coord is tuple
constexpr int R0 = decltype(rank<0>(tensor_tiled))::value;
return tensor_tiled(append<R0>(coord,_), repeat<R1>(_));
} else {
// Flat indexing if coord is not tuple
return tensor_tiled(coord, repeat<R1>(_));
}
}
// Tile a tensor according to @a tiler and use @a coord to index into the remainder, keeping the tile.
// This is typical at the CTA level where tiles of data are extracted:
// Tensor data = ... // ( M, N)
// Tensor cta_data = local_tile(data, Shape<_32,_64>{}, make_coord(blockIdx.x,blockIdx.y)); // (_32,_64)
template <class Tensor, class Tiler, class Coord,
__CUTE_REQUIRES(is_tensor<remove_cvref_t<Tensor>>::value)>
CUTE_HOST_DEVICE constexpr
auto
local_tile(Tensor && tensor,
Tiler const& tiler, // tiler to apply
Coord const& coord) // coord to slice into "remainder"
{
return inner_partition(static_cast<Tensor&&>(tensor),
tiler,
coord);
}
// Same as above, but with a projection parameter to strip out unwanted tiling modes for convenience
// when using projections of the same tiler.
// This is typical at the CTA level where tiles of data are extracted as projections:
// Tensor dataA = ... // (M,K)
// Tensor dataB = ... // (N,K)
// Tensor dataC = ... // (M,N)
// auto cta_tiler = Shape<_32, _64, _4>{};
// auto cta_coord = make_coord(blockIdx.x, blockIdx.y, _);
// Tensor ctaA = local_tile(dataA, cta_tiler, cta_coord, Step<_1, X,_1>{}); // (_32,_4,k)
// Tensor ctaB = local_tile(dataA, cta_tiler, cta_coord, Step< X,_1,_1>{}); // (_64,_4,k)
// Tensor ctaC = local_tile(dataA, cta_tiler, cta_coord, Step<_1,_1, X>{}); // (_32,_64)
template <class Tensor, class Tiler, class Coord, class Proj,
__CUTE_REQUIRES(is_tensor<remove_cvref_t<Tensor>>::value)>
CUTE_HOST_DEVICE
auto
local_tile(Tensor && tensor,
Tiler const& tiler, // tiler to apply
Coord const& coord, // coord to slice into "remainder"
Proj const& proj) // projection to apply to tiler and coord
{
return local_tile(static_cast<Tensor&&>(tensor),
dice(proj, tiler),
dice(proj, coord));
}
// Tile a tensor according to the flat shape of a layout that provides the coordinate of the target index.
// This is typical at the Thread level where data is partitioned across repeated patterns of threads:
// Tensor data = ... // (_16,_64)
// Tensor thr_data = local_partition(data, Layout<Shape<_2,_16>>{}, thr_idx); // ( _8, _4)
template <class Tensor, class LShape, class LStride, class Index,
__CUTE_REQUIRES(is_tensor<remove_cvref_t<Tensor>>::value)>
CUTE_HOST_DEVICE
auto
local_partition(Tensor && tensor,
Layout<LShape,LStride> const& tile, // coord -> index
Index const& index) // index to slice for
{
static_assert(is_integral<Index>::value);
return outer_partition(static_cast<Tensor&&>(tensor),
product_each(shape(tile)),
tile.get_flat_coord(index));
}
// Same as above, but with a projection parameter to strip out unwanted tiling modes for convenience
// when using projections of the same tiler.
// This is typical at the Thread level where data is partitioned across projected layouts of threads:
// Tensor dataA = ... // (M,K)
// Tensor dataB = ... // (N,K)
// Tensor dataC = ... // (M,N)
// auto thr_layout = Layout<Shape<_2,_16,_1>, Stride<_16,_1,_0>>{};
// Tensor thrA = local_partition(dataA, thr_layout, thr_idx, Step<_1, X,_1>{}); // (M/2,K/1)
// Tensor thrB = local_partition(dataB, thr_layout, thr_idx, Step< X,_1,_1>{}); // (N/16,K/1)
// Tensor thrC = local_partition(dataC, thr_layout, thr_idx, Step<_1,_1, X>{}); // (M/2,N/16)
template <class Tensor, class LShape, class LStride, class Index, class Projection,
__CUTE_REQUIRES(is_tensor<remove_cvref_t<Tensor>>::value)>
CUTE_HOST_DEVICE
auto
local_partition(Tensor && tensor,
Layout<LShape,LStride> const& tile, // coord -> index
Index const& index, // index to slice for
Projection const& proj)
{
return local_partition(static_cast<Tensor&&>(tensor),
dice(proj, tile),
index);
}
//
// Display utilities
//
template <class Engine, class Layout>
CUTE_HOST_DEVICE void print(Tensor<Engine,Layout> const& tensor)
{
print(tensor.data()); print(" o "); print(tensor.layout());
}
template <class Engine, class Layout>
CUTE_HOST_DEVICE void print_tensor(Tensor<Engine,Layout> const& tensor, bool print_type = true)
{
if (print_type) {
print(tensor); print(":\n");
}
if constexpr (Layout::rank == 1)
{
for (int m = 0; m < size(tensor); ++m) {
pretty_print(tensor(m));
printf("\n");
}
} else
if constexpr (Layout::rank == 2)
{
for (int m = 0; m < size<0>(tensor); ++m) {
for (int n = 0; n < size<1>(tensor); ++n) {
pretty_print(tensor(m,n));
}
printf("\n");
}
} else
if constexpr (Layout::rank == 3)
{
print_tensor(tensor(_,_,0), false);
for (int k = 1; k < size<2>(tensor); ++k) {
for (int i = 0; i < 5*size<1>(tensor); ++i) { print("-"); } print("\n");
print_tensor(tensor(_,_,k), false);
}
} else
if constexpr (Layout::rank == 4)
{
print_tensor(tensor(_,_,_,0), false);
for (int p = 1; p < size<3>(tensor); ++p) {
for (int i = 0; i < 5*size<1>(tensor); ++i) { print("="); } print("\n");
print_tensor(tensor(_,_,_,p), false);
}
}
}
#if !defined(__CUDACC_RTC__)
template <class Engine, class Layout>
CUTE_HOST std::ostream& print_tensor_os(std::ostream& os, Tensor<Engine,Layout> const& tensor)
{
int digits = 9;
if constexpr (Layout::rank == 1)
{
for (int m = 0; m < size(tensor); ++m) {
os << std::setw(digits) << tensor(m) << std::endl;
}
} else
if constexpr (Layout::rank == 2)
{
for (int m = 0; m < size<0>(tensor); ++m) {
for (int n = 0; n < size<1>(tensor); ++n) {
os << std::setw(digits) << tensor(m,n);
}
os << std::endl;
}
} else
if constexpr (Layout::rank == 3)
{
print_tensor_os(os, tensor(_,_,0));
for (int k = 1; k < size<2>(tensor); ++k) {
for (int i = 0; i < digits*size<1>(tensor); ++i) { os << "-"; } os << std::endl;
print_tensor_os(os, tensor(_,_,k));
}
} else
if constexpr (Layout::rank == 4)
{
print_tensor_os(os, tensor(_,_,_,0));
for (int p = 1; p < size<3>(tensor); ++p) {
for (int i = 0; i < digits*size<1>(tensor); ++i) { os << "="; } os << std::endl;
print_tensor_os(os, tensor(_,_,_,p));
}
}
return os;
}
template <class Engine, class Layout>
CUTE_HOST std::ostream& operator<<(std::ostream& os, Tensor<Engine,Layout> const& tensor)
{
os << tensor.layout() << std::endl;
return print_tensor_os(os, tensor);
}
#endif // !defined(__CUDACC_RTC__)
} // end namespace cute
//
// Extended Engines
//
#include <cute/pointer_swizzle.hpp>
#include <cute/pointer_flagged.hpp>
//
// Tensor Algorithms
//
#include <cute/algorithm/tensor_algorithms.hpp>
#include <cute/algorithm/fill.hpp>
#include <cute/algorithm/clear.hpp>
#include <cute/algorithm/copy.hpp>
#include <cute/algorithm/prefetch.hpp>
#include <cute/algorithm/axpby.hpp>
#include <cute/algorithm/gemm.hpp>
#include <cute/algorithm/cooperative_copy.hpp>
#include <cute/algorithm/cooperative_gemm.hpp>
| include/cute/tensor.hpp/0 | {
"file_path": "include/cute/tensor.hpp",
"repo_id": "include",
"token_count": 13997
} | 19 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Statically sized array of elements that accommodates all CUTLASS-supported numeric types
and is safe to use in a union.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_types.h"
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Statically sized array for any data type
template <
typename T,
int N,
bool RegisterSized = sizeof_bits<T>::value >= 32
>
struct Array;
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines the size of an Array<> in bits
template <typename T, int N, bool RegisterSized>
struct sizeof_bits<Array<T, N, RegisterSized> > {
static constexpr int value = sizeof(Array<T, N, RegisterSized>) * 8;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns true if the argument is a power of 2
CUTLASS_HOST_DEVICE
constexpr bool ispow2(unsigned x) {
return x && (!(x & (x - 1)));
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns the largest power of two not greater than the argument.
CUTLASS_HOST_DEVICE
constexpr unsigned floor_pow_2(unsigned x) {
return (x == 0 || ispow2(x)) ? x : ((floor_pow_2(x >> 1)) << 1);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Statically sized array for any data type
template <
typename T,
int N
>
struct Array<T, N, true> {
/// Storage type
using Storage = T;
/// Element type
using Element = T;
/// Number of storage elements
//static std::size_t const kStorageElements = N;
static constexpr size_t kStorageElements = N;
/// Number of logical elements
static constexpr size_t kElements = N;
//
// C++ standard members
//
typedef T value_type;
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef value_type &reference;
typedef value_type const & const_reference;
typedef value_type *pointer;
typedef value_type const * const_pointer;
//
// Iterators
//
/// Bidirectional iterator over elements
class iterator {
/// Pointer to object
T *ptr_;
public:
CUTLASS_HOST_DEVICE
iterator(): ptr_(nullptr) { }
CUTLASS_HOST_DEVICE
iterator(T *_ptr): ptr_(_ptr) { }
CUTLASS_HOST_DEVICE
iterator &operator++() {
++ptr_;
return *this;
}
CUTLASS_HOST_DEVICE
iterator &operator--() {
--ptr_;
return *this;
}
CUTLASS_HOST_DEVICE
iterator operator++(int) {
iterator ret(*this);
++ptr_;
return ret;
}
CUTLASS_HOST_DEVICE
iterator operator--(int) {
iterator ret(*this);
--ptr_;
return ret;
}
CUTLASS_HOST_DEVICE
T &operator*() const {
return *ptr_;
}
CUTLASS_HOST_DEVICE
bool operator==(iterator const &other) const {
return ptr_ == other.ptr_;
}
CUTLASS_HOST_DEVICE
bool operator!=(iterator const &other) const {
return ptr_ != other.ptr_;
}
};
/// Bidirectional constant iterator over elements
class const_iterator {
/// Pointer to object
const T *ptr_;
public:
CUTLASS_HOST_DEVICE
const_iterator(): ptr_(nullptr) { }
CUTLASS_HOST_DEVICE
const_iterator(T const *_ptr): ptr_(_ptr) { }
CUTLASS_HOST_DEVICE
const_iterator &operator++() {
++ptr_;
return *this;
}
CUTLASS_HOST_DEVICE
const_iterator &operator--() {
--ptr_;
return *this;
}
CUTLASS_HOST_DEVICE
const_iterator operator++(int) {
const_iterator ret(*this);
++ptr_;
return ret;
}
CUTLASS_HOST_DEVICE
const_iterator operator--(int) {
const_iterator ret(*this);
--ptr_;
return ret;
}
CUTLASS_HOST_DEVICE
T const &operator*() const {
return *ptr_;
}
CUTLASS_HOST_DEVICE
bool operator==(const_iterator const &other) const {
return ptr_ == other.ptr_;
}
CUTLASS_HOST_DEVICE
bool operator!=(const_iterator const &other) const {
return ptr_ != other.ptr_;
}
};
/// Bidirectional iterator over elements
class reverse_iterator {
/// Pointer to object
T *ptr_;
public:
CUTLASS_HOST_DEVICE
reverse_iterator(): ptr_(nullptr) { }
CUTLASS_HOST_DEVICE
reverse_iterator(T *_ptr): ptr_(_ptr) { }
CUTLASS_HOST_DEVICE
reverse_iterator &operator++() {
--ptr_;
return *this;
}
CUTLASS_HOST_DEVICE
reverse_iterator &operator--() {
++ptr_;
return *this;
}
CUTLASS_HOST_DEVICE
reverse_iterator operator++(int) {
iterator ret(*this);
--ptr_;
return ret;
}
CUTLASS_HOST_DEVICE
reverse_iterator operator--(int) {
iterator ret(*this);
++ptr_;
return ret;
}
CUTLASS_HOST_DEVICE
T &operator*() const {
return *(ptr_ - 1);
}
CUTLASS_HOST_DEVICE
bool operator==(reverse_iterator const &other) const {
return ptr_ == other.ptr_;
}
CUTLASS_HOST_DEVICE
bool operator!=(reverse_iterator const &other) const {
return ptr_ != other.ptr_;
}
};
/// Bidirectional constant iterator over elements
class const_reverse_iterator {
/// Pointer to object
T const *ptr_;
public:
CUTLASS_HOST_DEVICE
const_reverse_iterator(): ptr_(nullptr) { }
CUTLASS_HOST_DEVICE
const_reverse_iterator(T const *_ptr): ptr_(_ptr) { }
CUTLASS_HOST_DEVICE
const_reverse_iterator &operator++() {
--ptr_;
return *this;
}
CUTLASS_HOST_DEVICE
const_reverse_iterator &operator--() {
++ptr_;
return *this;
}
CUTLASS_HOST_DEVICE
const_reverse_iterator operator++(int) {
const_reverse_iterator ret(*this);
--ptr_;
return ret;
}
CUTLASS_HOST_DEVICE
const_reverse_iterator operator--(int) {
const_reverse_iterator ret(*this);
++ptr_;
return ret;
}
CUTLASS_HOST_DEVICE
T const &operator*() const {
return *(ptr_ - 1);
}
CUTLASS_HOST_DEVICE
bool operator==(const_iterator const &other) const {
return ptr_ == other.ptr_;
}
CUTLASS_HOST_DEVICE
bool operator!=(const_iterator const &other) const {
return ptr_ != other.ptr_;
}
};
/// Internal storage
Storage storage[kElements];
/// Efficient clear method
CUTLASS_HOST_DEVICE
void clear() {
fill(T(0));
}
CUTLASS_HOST_DEVICE
reference at(size_type pos) {
return reinterpret_cast<reference>(storage[pos]);
}
CUTLASS_HOST_DEVICE
const_reference at(size_type pos) const {
return reinterpret_cast<const_reference>(storage[pos]);
}
CUTLASS_HOST_DEVICE
reference operator[](size_type pos) {
return reinterpret_cast<reference>(storage[pos]);
}
CUTLASS_HOST_DEVICE
const_reference operator[](size_type pos) const {
return reinterpret_cast<const_reference>(storage[pos]);
}
CUTLASS_HOST_DEVICE
reference front() {
return reinterpret_cast<reference>(storage[0]);
}
CUTLASS_HOST_DEVICE
const_reference front() const {
return reinterpret_cast<const_reference>(storage[0]);
}
CUTLASS_HOST_DEVICE
reference back() {
return reinterpret_cast<reference>(storage[kStorageElements - 1]);
}
CUTLASS_HOST_DEVICE
const_reference back() const {
return reinterpret_cast<const_reference>(storage[kStorageElements - 1]);
}
CUTLASS_HOST_DEVICE
pointer data() {
return reinterpret_cast<pointer>(storage);
}
CUTLASS_HOST_DEVICE
const_pointer data() const {
return reinterpret_cast<const_pointer>(storage);
}
CUTLASS_HOST_DEVICE
pointer raw_data() {
return reinterpret_cast<pointer>(storage);
}
CUTLASS_HOST_DEVICE
const_pointer raw_data() const {
return reinterpret_cast<const_pointer>(storage);
}
CUTLASS_HOST_DEVICE
constexpr bool empty() const {
return !kElements;
}
CUTLASS_HOST_DEVICE
constexpr size_type size() const {
return kElements;
}
CUTLASS_HOST_DEVICE
constexpr size_type max_size() const {
return kElements;
}
CUTLASS_HOST_DEVICE
void fill(T const &value) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < int(kElements); ++i) {
storage[i] = static_cast<Storage>(value);
}
}
CUTLASS_HOST_DEVICE
iterator begin() {
return iterator(storage);
}
CUTLASS_HOST_DEVICE
const_iterator begin() const {
return cbegin();
}
CUTLASS_HOST_DEVICE
const_iterator cbegin() const {
return const_iterator(storage);
}
CUTLASS_HOST_DEVICE
iterator end() {
return iterator(reinterpret_cast<pointer>(storage + kStorageElements));
}
CUTLASS_HOST_DEVICE
const_iterator end() const {
return cend();
}
CUTLASS_HOST_DEVICE
const_iterator cend() const {
return const_iterator(reinterpret_cast<const_pointer>(storage + kStorageElements));
}
CUTLASS_HOST_DEVICE
reverse_iterator rbegin() {
return reverse_iterator(reinterpret_cast<pointer>(storage + kStorageElements));
}
CUTLASS_HOST_DEVICE
const_reverse_iterator rbegin() const {
return crbegin();
}
CUTLASS_HOST_DEVICE
const_reverse_iterator crbegin() const {
return const_reverse_iterator(reinterpret_cast<const_pointer>(storage + kStorageElements));
}
CUTLASS_HOST_DEVICE
reverse_iterator rend() {
return reverse_iterator(reinterpret_cast<pointer>(storage));
}
CUTLASS_HOST_DEVICE
const_reverse_iterator rend() const {
return crend();
}
CUTLASS_HOST_DEVICE
const_reverse_iterator crend() const {
return const_reverse_iterator(reinterpret_cast<const_pointer>(storage));
}
//
// Comparison operators
//
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// Factories
////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Element>
CUTLASS_HOST_DEVICE
Array<Element, 1> make_Array(Element x) {
return {x};
}
template <typename Element>
CUTLASS_HOST_DEVICE
Array<Element, 2> make_Array(Element x, Element y) {
return {x,y};
}
template <typename Element>
CUTLASS_HOST_DEVICE
Array<Element, 3> make_Array(Element x, Element y, Element z) {
return {x,y,z};
}
template <typename Element>
CUTLASS_HOST_DEVICE
Array<Element, 4> make_Array(Element x, Element y, Element z, Element w) {
return {x,y,z,w};
}
/////////////////////////////////////////////////////////////////////////////////////////////////
// functional.h numeric specializations
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T, int N>
struct absolute_value_op< Array<T, N> > {
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &lhs) const {
Array<T, N> result;
absolute_value_op<T> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(lhs[i]);
}
return result;
}
};
template <typename T, int N>
struct plus<Array<T, N>> {
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &lhs, Array<T, N> const &rhs) const {
Array<T, N> result;
plus<T> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(lhs[i], rhs[i]);
}
return result;
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &lhs, T const &scalar) const {
Array<T, N> result;
plus<T> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(lhs[i], scalar);
}
return result;
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()( T const &scalar, Array<T, N> const &rhs) const {
Array<T, N> result;
plus<T> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(scalar, rhs[i]);
}
return result;
}
};
template <typename T, int N>
struct minus<Array<T, N>> {
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &lhs, Array<T, N> const &rhs) const {
Array<T, N> result;
minus<T> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(lhs[i], rhs[i]);
}
return result;
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &lhs, T const &scalar) const {
Array<T, N> result;
minus<T> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(lhs[i], scalar);
}
return result;
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()( T const &scalar, Array<T, N> const &rhs) const {
Array<T, N> result;
minus<T> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(scalar, rhs[i]);
}
return result;
}
};
template <typename T, int N>
struct multiplies<Array<T, N>> {
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &lhs, Array<T, N> const &rhs) const {
Array<T, N> result;
multiplies<T> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(lhs[i], rhs[i]);
}
return result;
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &lhs, T const &scalar) const {
Array<T, N> result;
multiplies<T> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(lhs[i], scalar);
}
return result;
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()( T const &scalar, Array<T, N> const &rhs) const {
Array<T, N> result;
multiplies<T> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(scalar, rhs[i]);
}
return result;
}
};
template <typename T, int N, bool PropogateNaN>
struct maximum_absolute_value_reduction<Array<T, N>, PropogateNaN> {
CUTLASS_HOST_DEVICE
T operator() (T const& scalar, Array<T, N> const& rhs) const {
T result = scalar;
maximum_absolute_value_reduction<T, PropogateNaN> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result = scalar_op(result, rhs[i]);
}
return result;
}
};
template <typename T, int N>
struct scale<Array<T, N>> {
T const scaling_factor_;
CUTLASS_HOST_DEVICE
scale(T scaling_factor) : scaling_factor_(scaling_factor) {
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const & rhs) const {
Array<T, N> result;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = rhs[i] * scaling_factor_;
}
return result;
}
};
template <typename T, int N>
struct divides<Array<T, N>> {
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &lhs, Array<T, N> const &rhs) const {
Array<T, N> result;
divides<T> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(lhs[i], rhs[i]);
}
return result;
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &lhs, T const &scalar) const {
Array<T, N> result;
divides<T> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(lhs[i], scalar);
}
return result;
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()( T const &scalar, Array<T, N> const &rhs) const {
Array<T, N> result;
divides<T> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(scalar, rhs[i]);
}
return result;
}
};
template <typename T, int N>
struct reciprocal_approximate<Array<T, N>> {
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &lhs) const {
Array<T, N> result;
reciprocal_approximate<T> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(lhs[i]);
}
return result;
}
};
template <typename T, int N>
struct maximum<Array<T, N>, false> {
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &lhs, Array<T, N> const &rhs) const {
Array<T, N> result;
maximum<T, false> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(lhs[i], rhs[i]);
}
return result;
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &lhs, T const &scalar) const {
Array<T, N> result;
maximum<T, false> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(lhs[i], scalar);
}
return result;
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()(T const &scalar, Array<T, N> const &rhs) const {
Array<T, N> result;
maximum<T, false> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(scalar, rhs[i]);
}
return result;
}
};
template <typename T, int N>
struct maximum<Array<T, N>, true> {
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &lhs, Array<T, N> const &rhs) const {
Array<T, N> result;
maximum<T, true> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(lhs[i], rhs[i]);
}
return result;
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &lhs, T const &scalar) const {
Array<T, N> result;
maximum<T, true> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(lhs[i], scalar);
}
return result;
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()(T const &scalar, Array<T, N> const &rhs) const {
Array<T, N> result;
maximum<T, true> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(scalar, rhs[i]);
}
return result;
}
};
template <typename T, int N>
struct minimum<Array<T, N>, false> {
CUTLASS_HOST_DEVICE
static T scalar_op(T const &lhs, T const &rhs) {
return (rhs < lhs ? rhs : lhs);
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &lhs, Array<T, N> const &rhs) const {
Array<T, N> result;
minimum<T, false> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(lhs[i], rhs[i]);
}
return result;
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &lhs, T const &scalar) const {
Array<T, N> result;
minimum<T, false> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(lhs[i], scalar);
}
return result;
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()(T const &scalar, Array<T, N> const &rhs) const {
Array<T, N> result;
minimum<T, false> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(scalar, rhs[i]);
}
return result;
}
};
template <typename T, int N>
struct minimum<Array<T, N>, true> {
CUTLASS_HOST_DEVICE
static T scalar_op(T const &lhs, T const &rhs) {
return (rhs < lhs ? rhs : lhs);
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &lhs, Array<T, N> const &rhs) const {
Array<T, N> result;
minimum<T, true> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(lhs[i], rhs[i]);
}
return result;
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &lhs, T const &scalar) const {
Array<T, N> result;
minimum<T, true> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(lhs[i], scalar);
}
return result;
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()(T const &scalar, Array<T, N> const &rhs) const {
Array<T, N> result;
minimum<T, true> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(scalar, rhs[i]);
}
return result;
}
};
template <typename T, int N>
struct negate<Array<T, N>> {
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &lhs) const {
Array<T, N> result;
negate<T> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(lhs[i]);
}
return result;
}
};
/// Fused multiply-add
template <typename T, int N>
struct multiply_add<Array<T, N>, Array<T, N>, Array<T, N>> {
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &a, Array<T, N> const &b, Array<T, N> const &c) const {
Array<T, N> result;
multiply_add<T> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(a[i], b[i], c[i]);
}
return result;
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &a, T const &scalar, Array<T, N> const &c) const {
Array<T, N> result;
multiply_add<T> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(a[i], scalar, c[i]);
}
return result;
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()(T const &scalar, Array<T, N> const &b, Array<T, N> const &c) const {
Array<T, N> result;
multiply_add<T> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(scalar, b[i], c[i]);
}
return result;
}
};
/// Fused square-and-plus
template <typename T, int N>
struct square_and_plus<Array<T, N>> {
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &lhs, Array<T, N> const &rhs) const {
multiply_add<Array<T, N>, Array<T, N>, Array<T, N>> ma_op;
return ma_op(rhs, rhs, lhs);
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &lhs, T const &rhs) const {
plus<Array<T, N>> plus_op;
multiplies<T> multiplies_op;
return plus_op(multiplies_op(rhs, rhs), lhs);
}
};
/// Inverse-square-root
template <typename T, int N>
struct inverse_square_root<Array<T, N>> {
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &a) const {
Array<T, N> result;
inverse_square_root<T> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(a[i]);
}
return result;
}
};
template <int N>
struct inverse_square_root<Array<half_t, N>> {
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(Array<half_t, N> const & a) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 const *a_ptr = reinterpret_cast<__half2 const *>(&a);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = h2rsqrt(a_ptr[i]);
}
if constexpr (N % 2) {
__half const *a_residual_ptr = reinterpret_cast<__half const *>(&a);
__half d_residual = hrsqrt(a_residual_ptr[N - 1]);
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
inverse_square_root<half_t> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(a[i]);
}
#endif
return result;
}
};
/// Fused multiply-add-relu0
template <typename T, int N>
struct multiply_add_relu0<Array<T, N>, Array<T, N>, Array<T, N>> {
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &a, Array<T, N> const &b, Array<T, N> const &c) const {
Array<T, N> result;
multiply_add<T> scalar_op;
maximum<T> mx;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = mx(scalar_op(a[i], b[i], c[i]), T(0));
}
return result;
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &a, T const &scalar, Array<T, N> const &c) const {
Array<T, N> result;
multiply_add<T> scalar_op;
maximum<T> mx;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = mx(scalar_op(a[i], scalar, c[i]), T(0));
}
return result;
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()(T const &scalar, Array<T, N> const &b, Array<T, N> const &c) const {
Array<T, N> result;
multiply_add<T> scalar_op;
maximum<T> mx;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = mx(scalar_op(scalar, b[i], c[i]), T(0));
}
return result;
}
};
template <typename T, int N>
struct conjugate<Array<T, N> > {
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &a) const {
conjugate<T> conj_op;
Array<T, N> ca;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
ca[i] = conj_op(a[i]);
}
return ca;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// functional.h numeric specializations targeting SIMD instructions in device code.
/////////////////////////////////////////////////////////////////////////////////////////////////
template <int N>
struct plus<Array<half_t, N>> {
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(Array<half_t, N> const & lhs, Array<half_t, N> const &rhs) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 const *lhs_ptr = reinterpret_cast<__half2 const *>(&lhs);
__half2 const *rhs_ptr = reinterpret_cast<__half2 const *>(&rhs);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hadd2(lhs_ptr[i], rhs_ptr[i]);
}
if constexpr (N % 2) {
__half const *a_residual_ptr = reinterpret_cast<__half const *>(&lhs);
__half const *b_residual_ptr = reinterpret_cast<__half const *>(&rhs);
__half d_residual = __hadd(a_residual_ptr[N - 1], b_residual_ptr[N - 1]);
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = lhs[i] + rhs[i];
}
#endif
return result;
}
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(half_t const & lhs, Array<half_t, N> const &rhs) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 lhs_pair = __half2half2(reinterpret_cast<__half const &>(lhs));
__half2 const *rhs_ptr = reinterpret_cast<__half2 const *>(&rhs);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hadd2(lhs_pair, rhs_ptr[i]);
}
if constexpr (N % 2) {
__half const *b_residual_ptr = reinterpret_cast<__half const *>(&rhs);
__half d_residual = __hadd(reinterpret_cast<__half const &>(lhs), b_residual_ptr[N - 1]);
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = lhs + rhs[i];
}
#endif
return result;
}
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(Array<half_t, N> const & lhs, half_t const &rhs) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 const *lhs_ptr = reinterpret_cast<__half2 const *>(&lhs);
__half2 rhs_pair = __half2half2(reinterpret_cast<__half const &>(rhs));
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hadd2(lhs_ptr[i], rhs_pair);
}
if constexpr (N % 2) {
__half const *a_residual_ptr = reinterpret_cast<__half const *>(&lhs);
__half d_residual = __hadd(a_residual_ptr[N - 1], reinterpret_cast<__half const &>(rhs));
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = lhs[i] + rhs;
}
#endif
return result;
}
};
template <int N>
struct minus<Array<half_t, N>> {
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(Array<half_t, N> const & lhs, Array<half_t, N> const &rhs) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 const *lhs_ptr = reinterpret_cast<__half2 const *>(&lhs);
__half2 const *rhs_ptr = reinterpret_cast<__half2 const *>(&rhs);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hsub2(lhs_ptr[i], rhs_ptr[i]);
}
if constexpr (N % 2) {
__half const *a_residual_ptr = reinterpret_cast<__half const *>(&lhs);
__half const *b_residual_ptr = reinterpret_cast<__half const *>(&rhs);
__half d_residual = __hsub(a_residual_ptr[N - 1], b_residual_ptr[N - 1]);
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = lhs[i] - rhs[i];
}
#endif
return result;
}
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(half_t const & lhs, Array<half_t, N> const &rhs) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 lhs_pair = __half2half2(reinterpret_cast<__half const &>(lhs));
__half2 const *rhs_ptr = reinterpret_cast<__half2 const *>(&rhs);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hsub2(lhs_pair, rhs_ptr[i]);
}
if constexpr (N % 2) {
__half const *b_residual_ptr = reinterpret_cast<__half const *>(&rhs);
__half d_residual = __hsub(reinterpret_cast<__half const &>(lhs), b_residual_ptr[N - 1]);
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = lhs - rhs[i];
}
#endif
return result;
}
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(Array<half_t, N> const & lhs, half_t const &rhs) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 const *lhs_ptr = reinterpret_cast<__half2 const *>(&lhs);
__half2 rhs_pair = __half2half2(reinterpret_cast<__half const &>(rhs));
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hsub2(lhs_ptr[i], rhs_pair);
}
if constexpr (N % 2) {
__half const *a_residual_ptr = reinterpret_cast<__half const *>(&lhs);
__half d_residual = __hsub(a_residual_ptr[N - 1], reinterpret_cast<__half const &>(rhs));
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = lhs[i] - rhs;
}
#endif
return result;
}
};
template <int N>
struct multiplies<Array<half_t, N>> {
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(Array<half_t, N> const & lhs, Array<half_t, N> const &rhs) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 const *lhs_ptr = reinterpret_cast<__half2 const *>(&lhs);
__half2 const *rhs_ptr = reinterpret_cast<__half2 const *>(&rhs);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hmul2(lhs_ptr[i], rhs_ptr[i]);
}
if constexpr (N % 2) {
__half const *a_residual_ptr = reinterpret_cast<__half const *>(&lhs);
__half const *b_residual_ptr = reinterpret_cast<__half const *>(&rhs);
__half d_residual = __hmul(a_residual_ptr[N - 1], b_residual_ptr[N - 1]);
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = lhs[i] * rhs[i];
}
#endif
return result;
}
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(half_t const & lhs, Array<half_t, N> const &rhs) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 lhs_pair = __half2half2(reinterpret_cast<__half const &>(lhs));
__half2 const *rhs_ptr = reinterpret_cast<__half2 const *>(&rhs);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hmul2(lhs_pair, rhs_ptr[i]);
}
if constexpr (N % 2) {
__half const *b_residual_ptr = reinterpret_cast<__half const *>(&rhs);
__half d_residual = __hmul(
reinterpret_cast<__half const &>(lhs),
b_residual_ptr[N - 1]);
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = lhs * rhs[i];
}
#endif
return result;
}
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(Array<half_t, N> const & lhs, half_t const &rhs) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 const *lhs_ptr = reinterpret_cast<__half2 const *>(&lhs);
__half2 rhs_pair = __half2half2(reinterpret_cast<__half const &>(rhs));
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hmul2(lhs_ptr[i], rhs_pair);
}
if constexpr (N % 2) {
__half const *a_residual_ptr = reinterpret_cast<__half const *>(&lhs);
__half d_residual = __hmul(
a_residual_ptr[N - 1],
reinterpret_cast<__half const &>(rhs));
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = lhs[i] * rhs;
}
#endif
return result;
}
};
template <int N>
struct divides<Array<half_t, N>> {
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(Array<half_t, N> const & lhs, Array<half_t, N> const &rhs) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 const *lhs_ptr = reinterpret_cast<__half2 const *>(&lhs);
__half2 const *rhs_ptr = reinterpret_cast<__half2 const *>(&rhs);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __h2div(lhs_ptr[i], rhs_ptr[i]);
}
if constexpr (N % 2) {
__half const *a_residual_ptr = reinterpret_cast<__half const *>(&lhs);
__half const *b_residual_ptr = reinterpret_cast<__half const *>(&rhs);
__half d_residual = __hdiv(
a_residual_ptr[N - 1],
b_residual_ptr[N - 1]);
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = lhs[i] / rhs[i];
}
#endif
return result;
}
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(half_t const & lhs, Array<half_t, N> const &rhs) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 lhs_pair = __half2half2(reinterpret_cast<__half const &>(lhs));
__half2 const *rhs_ptr = reinterpret_cast<__half2 const *>(&rhs);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __h2div(lhs_pair, rhs_ptr[i]);
}
if constexpr (N % 2) {
__half const *b_residual_ptr = reinterpret_cast<__half const *>(&rhs);
__half d_residual = __hdiv(
reinterpret_cast<__half const &>(lhs),
b_residual_ptr[N - 1]);
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = lhs / rhs[i];
}
#endif
return result;
}
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(Array<half_t, N> const & lhs, half_t const &rhs) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 const *lhs_ptr = reinterpret_cast<__half2 const *>(&lhs);
__half2 rhs_pair = __half2half2(reinterpret_cast<__half const &>(rhs));
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __h2div(lhs_ptr[i], rhs_pair);
}
if constexpr (N % 2) {
__half const *a_residual_ptr = reinterpret_cast<__half const *>(&lhs);
__half d_residual = __hdiv(
a_residual_ptr[N - 1],
reinterpret_cast<__half const &>(rhs));
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = lhs[i] / rhs;
}
#endif
return result;
}
};
template <int N>
struct negate<Array<half_t, N>> {
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(Array<half_t, N> const & lhs) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 const *source_ptr = reinterpret_cast<__half2 const *>(&lhs);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hneg2(source_ptr[i]);
}
if constexpr (N % 2) {
half_t x = -lhs[N - 1];
__half lhs_val = reinterpret_cast<__half const &>(x);
result[N - 1] = reinterpret_cast<half_t const &>(lhs_val);
}
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = -lhs[i];
}
#endif
return result;
}
};
/// Fused multiply-add
template <int N>
struct multiply_add<Array<half_t, N>, Array<half_t, N>, Array<half_t, N>> {
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(
Array<half_t, N> const &a,
Array<half_t, N> const &b,
Array<half_t, N> const &c) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 const *a_ptr = reinterpret_cast<__half2 const *>(&a);
__half2 const *b_ptr = reinterpret_cast<__half2 const *>(&b);
__half2 const *c_ptr = reinterpret_cast<__half2 const *>(&c);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hfma2(a_ptr[i], b_ptr[i], c_ptr[i]);
}
if constexpr (N % 2) {
__half const *a_residual_ptr = reinterpret_cast<__half const *>(&a);
__half const *b_residual_ptr = reinterpret_cast<__half const *>(&b);
__half const *c_residual_ptr = reinterpret_cast<__half const *>(&c);
__half d_residual = __hfma(
a_residual_ptr[N - 1],
b_residual_ptr[N - 1],
c_residual_ptr[N - 1]);
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
multiply_add<half_t> op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = op(a[i], b[i], c[i]);
}
#endif
return result;
}
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(
half_t const &a,
Array<half_t, N> const &b,
Array<half_t, N> const &c) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 a_pair = __half2half2(reinterpret_cast<__half const &>(a));
__half2 const *b_ptr = reinterpret_cast<__half2 const *>(&b);
__half2 const *c_ptr = reinterpret_cast<__half2 const *>(&c);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hfma2(a_pair, b_ptr[i], c_ptr[i]);
}
if constexpr (N % 2) {
__half const *b_residual_ptr = reinterpret_cast<__half const *>(&b);
__half const *c_residual_ptr = reinterpret_cast<__half const *>(&c);
__half d_residual = __hfma(
reinterpret_cast<__half const &>(a),
b_residual_ptr[N - 1],
c_residual_ptr[N - 1]);
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
multiply_add<half_t> op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = op(a, b[i], c[i]);
}
#endif
return result;
}
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(
Array<half_t, N> const &a,
half_t const &b,
Array<half_t, N> const &c) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 const *a_ptr = reinterpret_cast<__half2 const *>(&a);
__half2 b_pair = __half2half2(reinterpret_cast<__half const &>(b));
__half2 const *c_ptr = reinterpret_cast<__half2 const *>(&c);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hfma2(a_ptr[i], b_pair, c_ptr[i]);
}
if constexpr (N % 2) {
__half const *a_residual_ptr = reinterpret_cast<__half const *>(&a);
__half const *c_residual_ptr = reinterpret_cast<__half const *>(&c);
__half d_residual = __hfma(
a_residual_ptr[N - 1],
reinterpret_cast<__half const &>(b),
c_residual_ptr[N - 1]);
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
multiply_add<half_t> op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = op(a[i], b, c[i]);
}
#endif
return result;
}
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(
Array<half_t, N> const &a,
Array<half_t, N> const &b,
half_t const &c) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 const *a_ptr = reinterpret_cast<__half2 const *>(&a);
__half2 const *b_ptr = reinterpret_cast<__half2 const *>(&b);
__half2 c_pair = __half2half2(reinterpret_cast<__half const &>(c));
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hfma2(a_ptr[i], b_ptr[i], c_pair);
}
if constexpr (N % 2) {
__half const *a_residual_ptr = reinterpret_cast<__half const *>(&a);
__half const *b_residual_ptr = reinterpret_cast<__half const *>(&b);
__half d_residual = __hfma(
a_residual_ptr[N - 1],
b_residual_ptr[N - 1],
reinterpret_cast<__half const &>(c));
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
multiply_add<half_t> op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = op(a[i], b[i], c);
}
#endif
return result;
}
};
/// Fused multiply-add-relu0
template <int N>
struct multiply_add_relu0<Array<half_t, N>, Array<half_t, N>, Array<half_t, N>> {
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(
Array<half_t, N> const &a,
Array<half_t, N> const &b,
Array<half_t, N> const &c) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 const *a_ptr = reinterpret_cast<__half2 const *>(&a);
__half2 const *b_ptr = reinterpret_cast<__half2 const *>(&b);
__half2 const *c_ptr = reinterpret_cast<__half2 const *>(&c);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hfma2_relu(a_ptr[i], b_ptr[i], c_ptr[i]);
}
if constexpr (N % 2) {
__half const *a_residual_ptr = reinterpret_cast<__half const *>(&a);
__half const *b_residual_ptr = reinterpret_cast<__half const *>(&b);
__half const *c_residual_ptr = reinterpret_cast<__half const *>(&c);
__half d_residual = __hfma_relu(
a_residual_ptr[N - 1],
b_residual_ptr[N - 1],
c_residual_ptr[N - 1]);
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
multiply_add<half_t> op;
maximum<half_t> mx;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = mx(op(a[i], b[i], c[i]), (half_t)0);
}
#endif
return result;
}
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(
half_t const &a,
Array<half_t, N> const &b,
Array<half_t, N> const &c) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 a_pair = __half2half2(reinterpret_cast<__half const &>(a));
__half2 const *b_ptr = reinterpret_cast<__half2 const *>(&b);
__half2 const *c_ptr = reinterpret_cast<__half2 const *>(&c);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hfma2_relu(a_pair, b_ptr[i], c_ptr[i]);
}
if constexpr (N % 2) {
__half const *b_residual_ptr = reinterpret_cast<__half const *>(&b);
__half const *c_residual_ptr = reinterpret_cast<__half const *>(&c);
__half d_residual = __hfma_relu(
reinterpret_cast<__half const &>(a),
b_residual_ptr[N - 1],
c_residual_ptr[N - 1]);
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
multiply_add<half_t> op;
maximum<half_t> mx;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = mx(op(a, b[i], c[i]), half_t(0));
}
#endif
return result;
}
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(
Array<half_t, N> const &a,
half_t const &b,
Array<half_t, N> const &c) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 const *a_ptr = reinterpret_cast<__half2 const *>(&a);
__half2 b_pair = __half2half2(reinterpret_cast<__half const &>(b));
__half2 const *c_ptr = reinterpret_cast<__half2 const *>(&c);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hfma2_relu(a_ptr[i], b_pair, c_ptr[i]);
}
if constexpr (N % 2) {
__half const *a_residual_ptr = reinterpret_cast<__half const *>(&a);
__half const *c_residual_ptr = reinterpret_cast<__half const *>(&c);
__half d_residual = __hfma_relu(
a_residual_ptr[N - 1],
reinterpret_cast<__half const &>(b),
c_residual_ptr[N - 1]);
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
multiply_add<half_t> op;
maximum<half_t> mx;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = mx(op(a[i], b, c[i]), half_t(0));
}
#endif
return result;
}
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(
Array<half_t, N> const &a,
Array<half_t, N> const &b,
half_t const &c) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 const *a_ptr = reinterpret_cast<__half2 const *>(&a);
__half2 const *b_ptr = reinterpret_cast<__half2 const *>(&b);
__half2 c_pair = __half2half2(reinterpret_cast<__half const &>(c));
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hfma2_relu(a_ptr[i], b_ptr[i], c_pair);
}
if constexpr (N % 2) {
__half const *a_residual_ptr = reinterpret_cast<__half const *>(&a);
__half const *b_residual_ptr = reinterpret_cast<__half const *>(&b);
__half d_residual = __hfma_relu(
a_residual_ptr[N - 1],
b_residual_ptr[N - 1],
reinterpret_cast<__half const &>(c));
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
multiply_add<half_t> op;
maximum<half_t> mx;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = mx(op(a[i], b[i], c), half_t(0));
}
#endif
return result;
}
};
template <int N>
struct minimum<Array<half_t, N>, false> {
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(Array<half_t, N> const & lhs, Array<half_t, N> const &rhs) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 const *lhs_ptr = reinterpret_cast<__half2 const *>(&lhs);
__half2 const *rhs_ptr = reinterpret_cast<__half2 const *>(&rhs);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hmin2(lhs_ptr[i], rhs_ptr[i]);
}
if constexpr (N % 2) {
__half const *a_residual_ptr = reinterpret_cast<__half const *>(&lhs);
__half const *b_residual_ptr = reinterpret_cast<__half const *>(&rhs);
__half d_residual = __hmin(
a_residual_ptr[N - 1],
b_residual_ptr[N - 1]);
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = (rhs[i] < lhs[i] ? rhs[i] : lhs[i]);
}
#endif
return result;
}
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(half_t const & lhs, Array<half_t, N> const &rhs) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 lhs_pair = __half2half2(reinterpret_cast<__half const &>(lhs));
__half2 const *rhs_ptr = reinterpret_cast<__half2 const *>(&rhs);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hmin2(lhs_pair, rhs_ptr[i]);
}
if constexpr (N % 2) {
__half const *b_residual_ptr = reinterpret_cast<__half const *>(&rhs);
__half d_residual = __hmin(
reinterpret_cast<__half const &>(lhs),
b_residual_ptr[N - 1]);
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = (rhs[i] < lhs ? rhs[i] : lhs);
}
#endif
return result;
}
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(Array<half_t, N> const & lhs, half_t const &rhs) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 const *lhs_ptr = reinterpret_cast<__half2 const *>(&lhs);
__half2 rhs_pair = __half2half2(reinterpret_cast<__half const &>(rhs));
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hmin2(lhs_ptr[i], rhs_pair);
}
if constexpr (N % 2) {
__half const *a_residual_ptr = reinterpret_cast<__half const *>(&lhs);
__half d_residual = __hmin(
a_residual_ptr[N - 1],
reinterpret_cast<__half const &>(rhs));
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = (rhs < lhs[i] ? rhs : lhs[i]);
}
#endif
return result;
}
};
template <int N>
struct maximum<Array<half_t, N>, false> {
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(Array<half_t, N> const & lhs, Array<half_t, N> const &rhs) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 const *lhs_ptr = reinterpret_cast<__half2 const *>(&lhs);
__half2 const *rhs_ptr = reinterpret_cast<__half2 const *>(&rhs);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hmax2(lhs_ptr[i], rhs_ptr[i]);
}
if constexpr (N % 2) {
__half const *a_residual_ptr = reinterpret_cast<__half const *>(&lhs);
__half const *b_residual_ptr = reinterpret_cast<__half const *>(&rhs);
__half d_residual = __hmax(
a_residual_ptr[N - 1],
b_residual_ptr[N - 1]);
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = (lhs[i] < rhs[i] ? rhs[i] : lhs[i]);
}
#endif
return result;
}
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(half_t const & lhs, Array<half_t, N> const &rhs) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 lhs_pair = __half2half2(reinterpret_cast<__half const &>(lhs));
__half2 const *rhs_ptr = reinterpret_cast<__half2 const *>(&rhs);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hmax2(lhs_pair, rhs_ptr[i]);
}
if constexpr (N % 2) {
__half const *b_residual_ptr = reinterpret_cast<__half const *>(&rhs);
__half d_residual = __hmax(
reinterpret_cast<__half const &>(lhs),
b_residual_ptr[N - 1]);
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = (lhs < rhs[i] ? rhs[i] : lhs);
}
#endif
return result;
}
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(Array<half_t, N> const & lhs, half_t const &rhs) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 const *lhs_ptr = reinterpret_cast<__half2 const *>(&lhs);
__half2 rhs_pair = __half2half2(reinterpret_cast<__half const &>(rhs));
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hmax2(lhs_ptr[i], rhs_pair);
}
if constexpr (N % 2) {
__half const *a_residual_ptr = reinterpret_cast<__half const *>(&lhs);
__half d_residual = __hmax(
a_residual_ptr[N - 1],
reinterpret_cast<__half const &>(rhs));
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = (lhs[i] < rhs ? rhs : lhs[i]);
}
#endif
return result;
}
};
/// Fused multiply-add
template <int N>
struct multiply_add<Array<bfloat16_t, N>, Array<bfloat16_t, N>, Array<bfloat16_t, N>> {
CUTLASS_HOST_DEVICE
Array<bfloat16_t, N> operator()(
Array<bfloat16_t, N> const &a,
Array<bfloat16_t, N> const &b,
Array<bfloat16_t, N> const &c) const {
Array<bfloat16_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
unsigned *result_ptr = reinterpret_cast<unsigned *>(&result);
unsigned const *a_ptr = reinterpret_cast<unsigned const *>(&a);
unsigned const *b_ptr = reinterpret_cast<unsigned const *>(&b);
unsigned const *c_ptr = reinterpret_cast<unsigned const *>(&c);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
asm ("fma.rn.bf16x2 %0, %1, %2, %3;\n"
: "=r"(result_ptr[i])
: "r"(a_ptr[i]), "r"(b_ptr[i]), "r"(c_ptr[i])
);
}
if constexpr (N % 2) {
uint16_t *result_ptr = reinterpret_cast<uint16_t *>(&result);
uint16_t const *a_residual_ptr = reinterpret_cast<uint16_t const *>(&a);
uint16_t const *b_residual_ptr = reinterpret_cast<uint16_t const *>(&b);
uint16_t const *c_residual_ptr = reinterpret_cast<uint16_t const *>(&c);
asm ("fma.rn.bf16 %0, %1, %2, %3;\n"
: "=h"(result_ptr[N - 1])
: "h"(a_residual_ptr[N - 1]), "h"(b_residual_ptr[N - 1]), "h"(c_residual_ptr[N - 1])
);
}
#else
multiply_add<bfloat16_t> op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = op(a[i], b[i], c[i]);
}
#endif
return result;
}
CUTLASS_HOST_DEVICE
Array<bfloat16_t, N> operator()(
bfloat16_t const &a,
Array<bfloat16_t, N> const &b,
Array<bfloat16_t, N> const &c) const {
Array<bfloat16_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
unsigned *result_ptr = reinterpret_cast<unsigned *>(&result);
unsigned const *b_ptr = reinterpret_cast<unsigned const *>(&b);
unsigned const *c_ptr = reinterpret_cast<unsigned const *>(&c);
unsigned a_packed = static_cast<unsigned>(a.raw());
a_packed = (a_packed | (a_packed << 16));
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
asm ("fma.rn.bf16x2 %0, %1, %2, %3;\n"
: "=r"(result_ptr[i])
: "r"(a_packed), "r"(b_ptr[i]), "r"(c_ptr[i])
);
}
if constexpr (N % 2) {
uint16_t *result_ptr = reinterpret_cast<uint16_t *>(&result);
uint16_t const *a_residual_ptr = reinterpret_cast<uint16_t const *>(&a);
uint16_t const *b_residual_ptr = reinterpret_cast<uint16_t const *>(&b);
uint16_t const *c_residual_ptr = reinterpret_cast<uint16_t const *>(&c);
asm ("fma.rn.bf16 %0, %1, %2, %3;\n"
: "=h"(result_ptr[N - 1])
: "h"(a_residual_ptr[0]), "h"(b_residual_ptr[N - 1]), "h"(c_residual_ptr[N - 1])
);
}
#else
multiply_add<bfloat16_t> op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = op(a, b[i], c[i]);
}
#endif
return result;
}
CUTLASS_HOST_DEVICE
Array<bfloat16_t, N> operator()(
Array<bfloat16_t, N> const &a,
bfloat16_t const &b,
Array<bfloat16_t, N> const &c) const {
Array<bfloat16_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
unsigned *result_ptr = reinterpret_cast<unsigned *>(&result);
unsigned const *a_ptr = reinterpret_cast<unsigned const *>(&a);
unsigned const *c_ptr = reinterpret_cast<unsigned const *>(&c);
unsigned b_packed = static_cast<unsigned>(b.raw());
b_packed = (b_packed | (b_packed << 16));
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
asm ("fma.rn.bf16x2 %0, %1, %2, %3;\n"
: "=r"(result_ptr[i])
: "r"(a_ptr[i]), "r"(b_packed), "r"(c_ptr[i])
);
}
if constexpr (N % 2) {
uint16_t *result_ptr = reinterpret_cast<uint16_t *>(&result);
uint16_t const *a_residual_ptr = reinterpret_cast<uint16_t const *>(&a);
uint16_t const *b_residual_ptr = reinterpret_cast<uint16_t const *>(&b);
uint16_t const *c_residual_ptr = reinterpret_cast<uint16_t const *>(&c);
asm ("fma.rn.bf16 %0, %1, %2, %3;\n"
: "=h"(result_ptr[N - 1])
: "h"(a_residual_ptr[N - 1]), "h"(b_residual_ptr[0]), "h"(c_residual_ptr[N - 1])
);
}
#else
multiply_add<bfloat16_t> op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = op(a[i], b, c[i]);
}
#endif
return result;
}
CUTLASS_HOST_DEVICE
Array<bfloat16_t, N> operator()(
Array<bfloat16_t, N> const &a,
Array<bfloat16_t, N> const &b,
bfloat16_t const &c) const {
Array<bfloat16_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
unsigned *result_ptr = reinterpret_cast<unsigned *>(&result);
unsigned const *a_ptr = reinterpret_cast<unsigned const *>(&a);
unsigned const *b_ptr = reinterpret_cast<unsigned const *>(&b);
unsigned c_packed = static_cast<unsigned>(c.raw());
c_packed = (c_packed | (c_packed << 16));
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
asm ("fma.rn.bf16x2 %0, %1, %2, %3;\n"
: "=r"(result_ptr[i])
: "r"(a_ptr[i]), "r"(b_ptr[i]), "r"(c_packed)
);
}
if constexpr (N % 2) {
uint16_t *result_ptr = reinterpret_cast<uint16_t *>(&result);
uint16_t const *a_residual_ptr = reinterpret_cast<uint16_t const *>(&a);
uint16_t const *b_residual_ptr = reinterpret_cast<uint16_t const *>(&b);
uint16_t const *c_residual_ptr = reinterpret_cast<uint16_t const *>(&c);
asm ("fma.rn.bf16 %0, %1, %2, %3;\n"
: "=h"(result_ptr[N - 1])
: "h"(a_residual_ptr[N - 1]), "h"(b_residual_ptr[N - 1]), "h"(c_residual_ptr[0])
);
}
#else
multiply_add<bfloat16_t> op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = op(a[i], b[i], c);
}
#endif
return result;
}
};
/// bit_and
template <int N>
struct bit_and<Array<uint1b_t, N>> {
CUTLASS_HOST_DEVICE
Array<uint1b_t, N> operator()(Array<uint1b_t, N> const &a, Array<uint1b_t, N> const &b) const {
using ArrayType = Array<uint1b_t, N>;
using Storage = typename ArrayType::Storage;
ArrayType result;
Storage *result_data = result.raw_data();
Storage const *a_data = a.raw_data();
Storage const *b_data = b.raw_data();
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < ArrayType::kStorageElements; ++i) {
result_data[i] = (a_data[i] & b_data[i]);
}
return result;
}
};
/// bit_or
template <int N>
struct bit_or<Array<uint1b_t, N>> {
CUTLASS_HOST_DEVICE
Array<uint1b_t, N> operator()(Array<uint1b_t, N> const &a, Array<uint1b_t, N> const &b) const {
using ArrayType = Array<uint1b_t, N>;
using Storage = typename ArrayType::Storage;
ArrayType result;
Storage *result_data = result.raw_data();
Storage const *a_data = a.raw_data();
Storage const *b_data = b.raw_data();
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < ArrayType::kStorageElements; ++i) {
result_data[i] = (a_data[i] | b_data[i]);
}
return result;
}
};
/// bit_not
template <int N>
struct bit_not<Array<uint1b_t, N>> {
CUTLASS_HOST_DEVICE
Array<uint1b_t, N> operator()(Array<uint1b_t, N> const &a) const {
using ArrayType = Array<uint1b_t, N>;
using Storage = typename ArrayType::Storage;
ArrayType result;
Storage *result_data = result.raw_data();
Storage const *a_data = a.raw_data();
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < ArrayType::kStorageElements; ++i) {
result_data[i] = (~a_data[i]);
}
return result;
}
};
/// bit_xor
template <int N>
struct bit_xor<Array<uint1b_t, N>> {
CUTLASS_HOST_DEVICE
Array<uint1b_t, N> operator()(Array<uint1b_t, N> const &a, Array<uint1b_t, N> const &b) const {
using ArrayType = Array<uint1b_t, N>;
using Storage = typename ArrayType::Storage;
ArrayType result;
Storage *result_data = result.raw_data();
Storage const *a_data = a.raw_data();
Storage const *b_data = b.raw_data();
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < ArrayType::kStorageElements; ++i) {
result_data[i] = (a_data[i] ^ b_data[i]);
}
return result;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Operator overloads
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T, int N>
CUTLASS_HOST_DEVICE
Array<T, N> operator+(Array<T, N> const &lhs, Array<T, N> const &rhs) {
plus<Array<T, N>> op;
return op(lhs, rhs);
}
template <typename T, int N>
CUTLASS_HOST_DEVICE
Array<T, N> operator+(T const &lhs, Array<T, N> const &rhs) {
plus<Array<T, N>> op;
return op(lhs, rhs);
}
template <typename T, int N>
CUTLASS_HOST_DEVICE
Array<T, N> operator+(Array<T, N> const &lhs, T const &rhs) {
plus<Array<T, N>> op;
return op(lhs, rhs);
}
template <typename T, int N>
CUTLASS_HOST_DEVICE
Array<T, N> operator-(Array<T, N> const &lhs, Array<T, N> const &rhs) {
minus<Array<T, N>> op;
return op(lhs, rhs);
}
template <typename T, int N>
CUTLASS_HOST_DEVICE
Array<T, N> operator-(Array<T, N> const &lhs) {
negate<Array<T, N>> op;
return op(lhs);
}
template <typename T, int N>
CUTLASS_HOST_DEVICE
Array<T, N> operator*(Array<T, N> const &lhs, Array<T, N> const &rhs) {
multiplies<Array<T, N>> op;
return op(lhs, rhs);
}
template <typename T, int N>
CUTLASS_HOST_DEVICE
Array<T, N> operator*(T lhs, Array<T, N> const &rhs) {
multiplies<Array<T, N>> op;
return op(lhs, rhs);
}
template <typename T, int N>
CUTLASS_HOST_DEVICE
Array<T, N> operator*(Array<T, N> const &lhs, T rhs) {
multiplies<Array<T, N>> op;
return op(lhs, rhs);
}
template <typename T, int N>
CUTLASS_HOST_DEVICE
Array<T, N> operator/(Array<T, N> const &lhs, Array<T, N> const &rhs) {
divides<Array<T, N>> op;
return op(lhs, rhs);
}
template <typename T, int N>
CUTLASS_HOST_DEVICE
Array<T, N> fma(Array<T, N> const &a, Array<T, N> const &b, Array<T, N> const &c) {
multiply_add<Array<T, N>> op;
return op(a, b, c);
}
template <typename T, int N>
CUTLASS_HOST_DEVICE
Array<T, N> fma(T a, Array<T, N> const &b, Array<T, N> const &c) {
multiply_add<Array<T, N>> op;
return op(a, b, c);
}
template <typename T, int N>
CUTLASS_HOST_DEVICE
Array<T, N> fma(Array<T, N> const &a, T b, Array<T, N> const &c) {
multiply_add<Array<T, N>> op;
return op(a, b, c);
}
template <typename T, int N>
CUTLASS_HOST_DEVICE
Array<T, N> fma(Array<T, N> const &a, Array<T, N> const &b, T c) {
multiply_add<Array<T, N>> op;
return op(a, b, c);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////////////////////////
#include "cutlass/array_subbyte.h"
////////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////////////////////////
// AlignedArray
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Aligned array type
template <
/// Element type
typename T,
/// Number of elements in the array
int N,
/// Alignment requirement in bytes
int Alignment = ( sizeof_bits<T>::value * N + 7 ) / 8
>
class alignas(Alignment) AlignedArray: public Array<T, N> {
public:
};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/array.h/0 | {
"file_path": "include/cutlass/array.h",
"repo_id": "include",
"token_count": 30495
} | 20 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/cutlass.h"
#include "cute/tensor_predicate.hpp"
#include "cute/arch/cluster_sm90.hpp"
#include "cute/arch/copy_sm90.hpp"
#include "cute/atom/mma_atom.hpp"
#include "cute/atom/copy_traits_sm90_im2col.hpp"
#include "cute/numeric/arithmetic_tuple.hpp"
#include "cute/algorithm/functional.hpp"
#include "cute/algorithm/gemm.hpp"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/convnd_problem_shape.hpp"
#include "cutlass/conv/dispatch_policy.hpp"
#include "cutlass/pipeline/pipeline.hpp"
#include "cutlass/util/packed_stride.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass::conv::collective {
using namespace cute;
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
conv::Operator ConvOp,
int Stages,
int NumSpatialDims,
class ClusterShape,
class KernelSchedule,
int PipelineAsyncMmaStages,
class TileShape_,
class ElementA_,
class ElementB_,
class TiledMma_,
class TileTraitsA_,
class TileTraitsB_>
struct CollectiveConv<
MainloopSm90TmaGmmaWarpSpecializedImplicitGemm<
ConvOp, Stages, NumSpatialDims, ClusterShape, KernelSchedule, PipelineAsyncMmaStages>,
TileShape_,
ElementA_,
ElementB_,
TiledMma_,
TileTraitsA_,
TileTraitsB_>
{
//
// Type Aliases
//
using DispatchPolicy = MainloopSm90TmaGmmaWarpSpecializedImplicitGemm<
ConvOp, Stages, NumSpatialDims, ClusterShape, KernelSchedule, PipelineAsyncMmaStages>;
using TileShape = TileShape_;
using ElementA = ElementA_;
using ElementB = ElementB_;
using TiledMma = TiledMma_;
using ElementAccumulator = typename TiledMma::ValTypeC;
using GmemTiledCopyA = typename TileTraitsA_::GmemTiledCopy;
using GmemTiledCopyB = typename TileTraitsB_::GmemTiledCopy;
using SmemLayoutA = typename TileTraitsA_::SmemLayout;
using SmemLayoutB = typename TileTraitsB_::SmemLayout;
using ArchTag = typename DispatchPolicy::ArchTag;
static constexpr int NumSpatialDimensions = DispatchPolicy::NumSpatialDimensions;
static constexpr int NumTensorDimensions = NumSpatialDimensions + 2;
// Deduce the kernel-facing stride tuple types based on the dispatch policy
// (which is a function of the number of spatial dimensions, the algorithm, etc.)
using StrideA = decltype(detail::sm90_dispatch_policy_to_stride_A<DispatchPolicy>());
using StrideB = decltype(detail::sm90_dispatch_policy_to_stride_B<DispatchPolicy>());
using MainloopPipeline = cutlass::PipelineTmaAsync<DispatchPolicy::Stages>;
using PipelineParams = typename MainloopPipeline::Params;
using PipelineState = typename cutlass::PipelineState<DispatchPolicy::Stages>;
// TODO: move pipeline mode tiling into the collective setup phase instead
static_assert(rank(SmemLayoutA{}) == 3, "SmemLayout must be rank 3 (M/N, K, PIPE)");
static_assert((size<0>(TileShape{}) == size<0>(SmemLayoutA{})), "SmemLayout must be compatible with the tile shape.");
static_assert((size<2>(TileShape{}) == size<1>(SmemLayoutA{})), "SmemLayout must be compatible with the tile shape.");
static_assert(rank(SmemLayoutB{}) == 3, "SmemLayout must be rank 3 (M/N, K, PIPE)");
static_assert((size<1>(TileShape{}) == size<0>(SmemLayoutB{})), "SmemLayout must be compatible with the tile shape.");
static_assert((size<2>(TileShape{}) == size<1>(SmemLayoutB{})), "SmemLayout must be compatible with the tile shape.");
static_assert(DispatchPolicy::Stages >= 2, "Specialization requires Stages set to value 1 or more.");
static_assert(cute::is_base_of<cute::GMMA::DescriptorIterator, typename TiledMma::FrgTypeA>::value &&
cute::is_base_of<cute::GMMA::DescriptorIterator, typename TiledMma::FrgTypeB>::value,
"MMA atom must source both A and B operand from smem_desc for this mainloop.");
// The tma load mode of wgrad is tiled for tensor A and im2col for tensor B while the tma load mode of fprop and dgrad
// kernel is im2col for tensor A and tiled for tensor B.
static_assert((ConvOp == conv::Operator::kWgrad
&& (cute::is_same_v<GmemTiledCopyA, SM90_TMA_LOAD> || cute::is_same_v<GmemTiledCopyA, SM90_TMA_LOAD_MULTICAST>))
|| (ConvOp != conv::Operator::kWgrad
&& (cute::is_same_v<GmemTiledCopyA, SM90_TMA_LOAD_IM2COL> || cute::is_same_v<GmemTiledCopyA, SM90_TMA_LOAD_IM2COL_MULTICAST>)),
"GmemTiledCopyA - invalid SM90 TMA copy atom specified.");
static_assert((ConvOp == conv::Operator::kWgrad
&& (cute::is_same_v<GmemTiledCopyB, SM90_TMA_LOAD_IM2COL> || cute::is_same_v<GmemTiledCopyB, SM90_TMA_LOAD_IM2COL_MULTICAST>))
|| (ConvOp != conv::Operator::kWgrad
&& (cute::is_same_v<GmemTiledCopyB, SM90_TMA_LOAD> || cute::is_same_v<GmemTiledCopyB, SM90_TMA_LOAD_MULTICAST>)),
"GmemTiledCopyB - invalid SM90 TMA copy atom specified.");
// TMA converts f32 input to tf32 when copying from GMEM to SMEM
// For all other types, cast to size equivalent uint type to avoid any rounding by TMA.
static constexpr bool ConvertF32toTF32A = cute::is_same_v<float, ElementA>;
static constexpr bool ConvertF32toTF32B = cute::is_same_v<float, ElementB>;
using InternalElementA = cute::conditional_t<ConvertF32toTF32A, tfloat32_t, uint_bit_t<sizeof_bits_v<ElementA>>>;
using InternalElementB = cute::conditional_t<ConvertF32toTF32B, tfloat32_t, uint_bit_t<sizeof_bits_v<ElementB>>>;
struct SharedStorage
{
struct TensorStorage : cute::aligned_struct<128> {
cute::array_aligned<typename TiledMma::ValTypeA, cute::cosize_v<SmemLayoutA>> smem_A;
cute::array_aligned<typename TiledMma::ValTypeB, cute::cosize_v<SmemLayoutB>> smem_B;
} tensors;
using PipelineStorage = typename MainloopPipeline::SharedStorage;
PipelineStorage pipeline;
};
using TensorStorage = typename SharedStorage::TensorStorage;
using PipelineStorage = typename SharedStorage::PipelineStorage;
static constexpr int K_PIPE_MAX = DispatchPolicy::Stages;
static constexpr int K_PIPE_MMAS = DispatchPolicy::PipelineAsyncMmaStages;
static constexpr uint32_t TmaTransactionBytes =
(size<0>(SmemLayoutA{}) * size<1>(SmemLayoutA{}) * static_cast<uint32_t>(sizeof(InternalElementA)))+
(size<0>(SmemLayoutB{}) * size<1>(SmemLayoutB{}) * static_cast<uint32_t>(sizeof(InternalElementB)));
// Host side kernel arguments
struct Arguments {
using ProblemShape = ConvProblemShape<ConvOp, NumSpatialDimensions>;
ProblemShape problem_shape{};
ElementA const* ptr_A{nullptr};
ElementB const* ptr_B{nullptr};
};
private:
// Note that for fprop and dgrad kernel, the tma load mode is im2col for tensor A and tiled for
// tensor B while for wgrad kernel, the tma load mode is tiled for tensor A and im2col for tensor
// B since operand A, B is swapped.
// Get tma_load_a instantce.
template <class TensorA>
static constexpr auto
get_tma_load_a_instance(TensorA const& tensor_a, typename Arguments::ProblemShape const& problem_shape) {
if constexpr (ConvOp == conv::Operator::kFprop || ConvOp == conv::Operator::kDgrad) {
// compute the upper and lower corners based on the conv padding
auto lower_corner_whd = detail::compute_lower_corner_whd(problem_shape);
auto upper_corner_whd = detail::compute_upper_corner_whd(problem_shape);
auto lower_srt = detail::compute_lower_srt(problem_shape);
// The calculation of gbasis strides for dgrad kernel needs perform negate for dilation values.
cute::array<int32_t, NumSpatialDimensions> stride_srt{};
for (int i = 0; i < NumSpatialDimensions; ++i) {
stride_srt[i] = ConvOp == conv::Operator::kDgrad ?
-problem_shape.dilation[NumSpatialDimensions-1-i] :
problem_shape.dilation[NumSpatialDimensions-1-i];
}
return make_im2col_tma_copy(
GmemTiledCopyA{},
tensor_a,
SmemLayoutA{}(_,_,_0{}),
product_each(shape(SmemLayoutA{}(_,_,_0{}))),
size<1>(ClusterShape{}),
shape(lower_corner_whd),
shape(upper_corner_whd),
cute::reverse(shape(problem_shape.lower_padding)),
cute::reverse(shape(problem_shape.upper_padding)),
cute::reverse(shape(problem_shape.traversal_stride)),
shape(lower_srt),
shape(stride_srt));
}
// TMA tiled mode for tensor A in wgrad kernel.
else if constexpr (ConvOp == conv::Operator::kWgrad) {
return make_tma_copy(
GmemTiledCopyA{},
tensor_a,
SmemLayoutA{}(_,_,_0{}),
make_shape(shape<0>(TileShape{}), shape<2>(TileShape{})),
size<1>(ClusterShape{}));
}
}
// Get tma_load_b instantce.
template <class TensorB>
static constexpr auto
get_tma_load_b_instance(TensorB const& tensor_b, typename Arguments::ProblemShape const& problem_shape) {
if constexpr (ConvOp == conv::Operator::kFprop || ConvOp == conv::Operator::kDgrad) {
return make_tma_copy(
GmemTiledCopyB{},
tensor_b,
SmemLayoutB{}(_,_,_0{}),
make_shape(shape<1>(TileShape{}), shape<2>(TileShape{})),
size<0>(ClusterShape{}));
}
// TMA im2col mode for tensor B in wgrad kernel.
else if constexpr (ConvOp == conv::Operator::kWgrad) {
// compute the upper and lower corners based on the conv padding
auto lower_corner_whd = detail::compute_lower_corner_whd(problem_shape);
auto upper_corner_whd = detail::compute_upper_corner_whd(problem_shape);
auto lower_srt = detail::compute_lower_srt(problem_shape);
return make_im2col_tma_copy(
GmemTiledCopyB{},
tensor_b,
SmemLayoutB{}(_,_,_0{}),
product_each(shape(SmemLayoutB{}(_,_,_0{}))),
size<0>(ClusterShape{}),
shape(lower_corner_whd),
shape(upper_corner_whd),
cute::reverse(shape(problem_shape.lower_padding)),
cute::reverse(shape(problem_shape.upper_padding)),
cute::reverse(shape(problem_shape.traversal_stride)),
shape(lower_srt),
cute::reverse(shape(problem_shape.dilation)));
}
}
public:
// Device side kernel params
struct Params {
using _Submode = decltype(take<0,NumTensorDimensions-1>(typename Arguments::ProblemShape::TensorExtent{}));
using ProblemShape = cute::conditional_t<DispatchPolicy::ConvOp == conv::Operator::kWgrad,
Shape<int, _Submode, _Submode>,
Shape<_Submode, int, _Submode>>;
// Assumption: StrideA is congruent with Problem_MK
// Select TMA load type according to convolution operator.
using TensorShapeA = cute::conditional_t<ConvOp == conv::Operator::kWgrad,
decltype(repeat_like(StrideA{}, int32_t(0))),
decltype(make_shape(_Submode{}, int(0)))>;
using TensorShapeB = cute::conditional_t<ConvOp == conv::Operator::kWgrad,
decltype(make_shape(int(0), _Submode{})),
decltype(repeat_like(StrideB{}, int32_t(0)))>;
using TMA_A = decltype(get_tma_load_a_instance(
make_tensor(
make_gmem_ptr(static_cast<InternalElementA const*>(nullptr)),
make_layout(TensorShapeA{}, StrideA{})),
ConvProblemShape<ConvOp, NumSpatialDimensions>{}));
using TMA_B = decltype(get_tma_load_b_instance(
make_tensor(
make_gmem_ptr(static_cast<InternalElementB const*>(nullptr)),
make_layout(TensorShapeB{}, StrideB{})),
ConvProblemShape<ConvOp, NumSpatialDimensions>{}));
// Members
TMA_A tma_load_a;
TMA_B tma_load_b;
ProblemShape problem_shape;
};
//
// Methods
//
// Lowers the host side user facing arguments to the kernel facing lauch params
static constexpr Params
to_underlying_arguments(Arguments const& args, void* workspace) {
(void) workspace;
// from the flat problem shape arrays of ConvProblemShape<ConvOp, N>, create a rank-3 MNK problem shape tuple
// tma desc creation depends on the original untransformed domain.
// A extents.
auto shape_A_orig = args.problem_shape.get_shape_A();
// B extents.
auto shape_B_orig = args.problem_shape.get_shape_B();
// Fill inferred cute strides from flat stride arrays
auto dA = make_cute_packed_stride(StrideA{}, args.problem_shape.stride_A, ConvOp);
auto dB = make_cute_packed_stride(StrideB{}, args.problem_shape.stride_B, ConvOp);
auto ptr_A = reinterpret_cast<InternalElementA const*>(args.ptr_A);
auto ptr_B = reinterpret_cast<InternalElementB const*>(args.ptr_B);
Tensor tensor_a = make_tensor(make_gmem_ptr(ptr_A), make_layout(shape_A_orig, dA));
Tensor tensor_b = make_tensor(make_gmem_ptr(ptr_B), make_layout(shape_B_orig, dB));
auto tma_load_a = get_tma_load_a_instance(tensor_a, args.problem_shape);
auto tma_load_b = get_tma_load_b_instance(tensor_b, args.problem_shape);
auto problem_shape_mnk = args.problem_shape.get_transformed_problem_shape_MNK();
return {
tma_load_a,
tma_load_b,
problem_shape_mnk
};
}
template<class ProblemShape>
CUTLASS_HOST_DEVICE static bool
can_implement(
ProblemShape const& problem_shape,
Arguments const& args) {
// Activation and Filter channel mode extents much match
bool implementable = true;
// channel mode is major
implementable &= args.problem_shape.stride_A[NumTensorDimensions-1] == 1;
implementable &= args.problem_shape.stride_B[NumTensorDimensions-1] == 1;
constexpr int tma_alignment_bits = 128;
// A extents.
auto shape_A_orig = args.problem_shape.get_shape_A();
// B extents.
auto shape_B_orig = args.problem_shape.get_shape_B();
constexpr int min_tma_aligned_elements_A = tma_alignment_bits / cutlass::sizeof_bits<ElementA>::value;
implementable = implementable && cutlass::detail::check_alignment<min_tma_aligned_elements_A>(shape_A_orig, StrideA{});
constexpr int min_tma_aligned_elements_B = tma_alignment_bits / cutlass::sizeof_bits<ElementB>::value;
implementable = implementable && cutlass::detail::check_alignment<min_tma_aligned_elements_B>(shape_B_orig, StrideB{});
if (!implementable) {
CUTLASS_TRACE_HOST(" CAN IMPLEMENT: Problem Size doesn't meet the minimum alignment requirements for TMA.\n");
return false;
}
// Check valid padding values for TMA_LOAD_IM2COL
constexpr int padding_limit = (ProblemShape::RankS == 1) ? 65536 : (ProblemShape::RankS == 2 ? 256 : 16);
for (int i = 0; i < problem_shape.RankS; ++i) {
implementable = implementable && problem_shape.lower_padding[i] <= padding_limit && problem_shape.lower_padding[i] >= 0;
implementable = implementable && problem_shape.upper_padding[i] <= padding_limit && problem_shape.upper_padding[i] >= 0;
}
if (!implementable) {
CUTLASS_TRACE_HOST(" CAN IMPLEMENT: Padding values don't meet requirements for TMA LOAD IM2COL.\n");
return false;
}
if (problem_shape.groups > 1) {
CUTLASS_TRACE_HOST(" CAN IMPLEMENT: This kernel does not support conv groups > 1.\n");
return false;
}
return true;
}
/// Issue Tma Descriptor Prefetch -- ideally from a single thread for best performance
CUTLASS_DEVICE
static void prefetch_tma_descriptors(Params const& mainloop_params) {
cute::prefetch_tma_descriptor(mainloop_params.tma_load_a.get_tma_descriptor());
cute::prefetch_tma_descriptor(mainloop_params.tma_load_b.get_tma_descriptor());
}
/// Perform a collective-scoped matrix multiply-accumulate
/// Producer Perspective
template <
class TensorA, class TMA_LOAD_A,
class TensorB, class TMA_LOAD_B,
class KTileIterator
>
CUTLASS_DEVICE void
load(MainloopPipeline pipeline,
PipelineState smem_pipe_producer_state,
TensorA const& gA, TMA_LOAD_A& tma_load_a,
TensorB const& gB, TMA_LOAD_B& tma_load_b,
KTileIterator k_tile_iter, int k_tile_count,
int thad_idx,
TensorStorage& shared_tensors) {
int warp_idx = canonical_warp_idx_sync();
int warp_idx_in_warp_group = warp_idx % 4;
int lane_predicate = cute::elect_one_sync();
if (warp_idx_in_warp_group == 0 and lane_predicate) {
Tensor sA = make_tensor(make_smem_ptr(shared_tensors.smem_A.data()), SmemLayoutA{}); // (BLK_M,BLK_K,PIPE)
Tensor sB = make_tensor(make_smem_ptr(shared_tensors.smem_B.data()), SmemLayoutB{}); // (BLK_N,BLK_K,PIPE)
//
// Prepare the TMA loads for A and B
//
dim3 cluster_local_block_id = cute::block_id_in_cluster();
auto block_tma_a = tma_load_a.get_slice(cluster_local_block_id.y);
auto block_tma_b = tma_load_b.get_slice(cluster_local_block_id.x);
// Applies the mapping from block_tma_a
Tensor tAgA = block_tma_a.partition_S(gA); // (TMA,TMA_M,TMA_K,k)
Tensor tAsA = block_tma_a.partition_D(sA); // (TMA,TMA_M,TMA_K,PIPE)
Tensor tBgB = block_tma_b.partition_S(gB); // (TMA,TMA_N,TMA_K,k)
Tensor tBsB = block_tma_b.partition_D(sB); // (TMA,TMA_N,TMA_K,PIPE)
uint16_t mcast_mask_a = 0;
uint16_t mcast_mask_b = 0;
// Issue TmaLoads
// Maps the tile -> block, value
if constexpr (cute::is_same_v<GmemTiledCopyA, SM90_TMA_LOAD_IM2COL_MULTICAST> ||
cute::is_same_v<GmemTiledCopyA, SM90_TMA_LOAD_MULTICAST>) {
auto block_layout = Layout<typename DispatchPolicy::ClusterShape>{}; // (m,n) -> block_id
for (int n = 0; n < size<1>(block_layout); ++n) {
mcast_mask_a |= (uint16_t(1) << block_layout(cluster_local_block_id.x,n,Int<0>{}));
}
}
if constexpr (cute::is_same_v<GmemTiledCopyB, SM90_TMA_LOAD_IM2COL_MULTICAST> ||
cute::is_same_v<GmemTiledCopyB, SM90_TMA_LOAD_MULTICAST>) {
auto block_layout = Layout<typename DispatchPolicy::ClusterShape>{}; // (m,n) -> block_id
for (int m = 0; m < size<0>(block_layout); ++m) {
mcast_mask_b |= (uint16_t(1) << block_layout(m,cluster_local_block_id.y,Int<0>{}));
}
}
// Mainloop
CUTLASS_PRAGMA_NO_UNROLL
for ( ; k_tile_count > 0; --k_tile_count) {
// LOCK smem_pipe_producer_state for _writing_
pipeline.producer_acquire(smem_pipe_producer_state);
//
// Copy gmem to smem for *k_tile_iter
//
using BarrierType = typename MainloopPipeline::ProducerBarrierType;
BarrierType* tma_barrier = pipeline.producer_get_barrier(smem_pipe_producer_state);
int write_stage = smem_pipe_producer_state.index();
copy(tma_load_a.with(*tma_barrier, mcast_mask_a), tAgA(_,_,_,*k_tile_iter), tAsA(_,_,_,write_stage));
copy(tma_load_b.with(*tma_barrier, mcast_mask_b), tBgB(_,_,_,*k_tile_iter), tBsB(_,_,_,write_stage));
++k_tile_iter;
// Advance smem_pipe_producer_state
++smem_pipe_producer_state;
}
}
}
/// Perform a Producer Epilogue to prevent early exit of blocks in a Cluster
CUTLASS_DEVICE void
load_tail(MainloopPipeline pipeline, PipelineState smem_pipe_producer_state) {
int warp_idx = canonical_warp_idx_sync();
int warp_idx_in_warp_group = warp_idx % 4;
int lane_predicate = cute::elect_one_sync();
// Issue the epilogue waits
if (warp_idx_in_warp_group == 0 and lane_predicate) {
/* This helps avoid early exit of blocks in Cluster
* Waits for all stages to either be released (all
* Consumer UNLOCKs), or if the stage was never used
* then would just be acquired since the phase was
* still inverted from make_producer_start_state
*/
pipeline.producer_tail(smem_pipe_producer_state);
}
}
/// Perform a collective-scoped matrix multiply-accumulate
/// Consumer Perspective
template <class FrgTensorC>
CUTLASS_DEVICE void
mma(MainloopPipeline pipeline,
PipelineState smem_pipe_consumer_state,
FrgTensorC& accum,
int k_tile_count,
int thread_idx,
TensorStorage& shared_tensors,
Params const& mainloop_params) {
static_assert(is_rmem<FrgTensorC>::value, "C tensor must be rmem resident.");
Tensor sA = make_tensor(make_smem_ptr(shared_tensors.smem_A.data()), SmemLayoutA{}); // (BLK_M,BLK_K,PIPE)
Tensor sB = make_tensor(make_smem_ptr(shared_tensors.smem_B.data()), SmemLayoutB{}); // (BLK_N,BLK_K,PIPE)
//
// Define C accumulators and A/B partitioning
//
TiledMma tiled_mma;
auto thread_mma = tiled_mma.get_thread_slice(thread_idx);
Tensor tCsA = thread_mma.partition_A(sA); // (MMA,MMA_M,MMA_K,PIPE)
Tensor tCsB = thread_mma.partition_B(sB); // (MMA,MMA_N,MMA_K,PIPE)
// Allocate "fragments/descriptors"
Tensor tCrA = thread_mma.make_fragment_A(tCsA); // (MMA,MMA_M,MMA_K,PIPE)
Tensor tCrB = thread_mma.make_fragment_B(tCsB); // (MMA,MMA_N,MMA_K,PIPE)
CUTE_STATIC_ASSERT_V(size<1>(tCsA) == size<1>(accum)); // M
CUTE_STATIC_ASSERT_V(size<1>(tCsB) == size<2>(accum)); // N
CUTE_STATIC_ASSERT_V(size<2>(tCsA) == size<2>(tCsB)); // K
CUTE_STATIC_ASSERT_V(size<3>(tCsA) == size<3>(tCsB)); // PIPE
CUTE_STATIC_ASSERT_V(Int<DispatchPolicy::Stages>{} == size<2>(sA)); // PIPE
CUTE_STATIC_ASSERT_V(Int<DispatchPolicy::Stages>{} == size<2>(sB)); // PIPE
//
// PIPELINED MAIN LOOP
//
static_assert((0 <= K_PIPE_MMAS) && (K_PIPE_MMAS < K_PIPE_MAX),
"ERROR : Incorrect number of MMAs in flight");
// We release buffers to producer warps(dma load) with some mmas in flight
PipelineState smem_pipe_release = smem_pipe_consumer_state;
// Prologue GMMAs
int prologue_mma_count = min(K_PIPE_MMAS, k_tile_count);
tiled_mma.accumulate_ = GMMA::ScaleOut::Zero;
warpgroup_fence_operand(accum);
CUTLASS_PRAGMA_UNROLL
for (int k_tile_prologue = prologue_mma_count; k_tile_prologue > 0; --k_tile_prologue) {
// WAIT on smem_pipe_consumer_state until its data are available (phase bit flips from rdPhaseBit value)
pipeline.consumer_wait(smem_pipe_consumer_state);
int read_stage = smem_pipe_consumer_state.index();
warpgroup_arrive();
// Unroll the K mode manually to set scale D to 1
CUTLASS_PRAGMA_UNROLL
for (int k_block = 0; k_block < size<2>(tCrA); ++k_block) {
// (V,M,K) x (V,N,K) => (V,M,N)
cute::gemm(tiled_mma, tCrA(_,_,k_block,read_stage), tCrB(_,_,k_block,read_stage), accum);
tiled_mma.accumulate_ = GMMA::ScaleOut::One;
}
warpgroup_commit_batch();
++smem_pipe_consumer_state;
}
warpgroup_fence_operand(accum);
// Mainloop GMMAs
k_tile_count -= prologue_mma_count;
CUTLASS_PRAGMA_NO_UNROLL
for ( ; k_tile_count > 0; --k_tile_count) {
// WAIT on smem_pipe_consumer_state until its data are available (phase bit flips from rdPhaseBit value)
pipeline.consumer_wait(smem_pipe_consumer_state);
//
// Compute on k_tile
//
int read_stage = smem_pipe_consumer_state.index();
warpgroup_fence_operand(accum);
warpgroup_arrive();
// Unroll the K mode manually to set scale D to 1
CUTLASS_PRAGMA_UNROLL
for (int k_block = 0; k_block < size<2>(tCrA); ++k_block) {
// (V,M) x (V,N) => (V,M,N)
cute::gemm(tiled_mma, tCrA(_,_,k_block,read_stage), tCrB(_,_,k_block,read_stage), accum);
tiled_mma.accumulate_ = GMMA::ScaleOut::One;
}
warpgroup_commit_batch();
/// Wait on the GMMA barrier for K_PIPE_MMAS (or fewer) outstanding to ensure smem_pipe_producer_state is consumed
warpgroup_wait<K_PIPE_MMAS>();
warpgroup_fence_operand(accum);
// UNLOCK smem_pipe_release, done _computing_ on it
pipeline.consumer_release(smem_pipe_release);
// Advance smem_pipe_consumer_state and smem_pipe_release
++smem_pipe_consumer_state;
++smem_pipe_release;
}
warpgroup_fence_operand(accum);
}
/// Perform a Consumer Epilogue to release all buffers
CUTLASS_DEVICE void
mma_tail(MainloopPipeline pipeline, PipelineState smem_pipe_release, int k_tile_count) {
// Prologue GMMAs
int prologue_mma_count = min(K_PIPE_MMAS, k_tile_count);
k_tile_count -= prologue_mma_count;
smem_pipe_release.advance(k_tile_count);
// Wait on all GMMAs to complete
warpgroup_wait<0>();
for (int count = 0; count < prologue_mma_count; ++count) {
pipeline.consumer_release(smem_pipe_release); // UNLOCK smem_pipe_release, done _computing_ on it
++smem_pipe_release;
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::conv::collective
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/collective/sm90_implicit_gemm_gmma_ss_warpspecialized.hpp/0 | {
"file_path": "include/cutlass/conv/collective/sm90_implicit_gemm_gmma_ss_warpspecialized.hpp",
"repo_id": "include",
"token_count": 11460
} | 21 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM B (filter tile)
matrix from memory.
This iterator assumes TensorNHWC or TensorCxRSKx<Interleave> layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/threadblock/conv2d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename Layout_,
typename ThreadMap_,
typename AccessType_ = cutlass::AlignedArray<Element_, ThreadMap_::kElementsPerAccess>
>
class Conv2dFpropFilterTileAccessIteratorFewChannels {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = Layout_;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kFewChannels;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
static int const kPositionsPerTile = Shape::kRow;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static bool const kUseFastDivmodPrologue = true;
static bool const kUseFastDivmodMainloop = true;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
//
// Simplifying assertions
//
static_assert(ThreadMap::Iterations::kContiguous == 1,
"Require Iterations::kContiguous == 1");
//
// Parameters structure
//
using Params = Conv2dFewChannelsParams<Layout>;
private:
Params const ¶ms_;
Conv2dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
LongIndex iteration_vector_;
char const *pointer_;
int rsc_index_;
int offset_k_[ThreadMap::Iterations::kStrided];
public:
CUTLASS_HOST_DEVICE
Conv2dFpropFilterTileAccessIteratorFewChannels(
Params const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
rsc_index_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
rsc_index_ = (threadblock_offset.row() + thread_coord.contiguous());
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_k_[s] = threadblock_offset.column() + thread_coord.strided() + s * ThreadMap::Delta::kStrided;
}
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * 8 / sizeof_bits<Element>::value;
}
CUTLASS_HOST_DEVICE
void advance() {
// moves to the next tile
rsc_index_ += kPositionsPerTile * problem_size_.split_k_slices;
}
/// Returns the coordinate in the filter tensor W that is currently pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at() const {
int rsc_index = rsc_index_ + iteration_vector_ * AccessType::kElements;
int c = 0;
int s = 0;
int r = 0;
if (kUseFastDivmodMainloop) {
int rs_index = params_.divmod_C.divmod(c, rsc_index);
r = params_.divmod_S.divmod(s, rs_index);
}
else {
c = (rsc_index % problem_size_.C);
int rs_index = (rsc_index / problem_size_.C);
s = (rs_index % problem_size_.S);
r = (rs_index / problem_size_.S);
}
int k = offset_k_[iteration_strided_];
return TensorCoord(k, r, s, c);
}
/// Returns true if the current coordinate is within the activations tensor W
CUTLASS_HOST_DEVICE
bool valid() const {
TensorCoord coord = at();
bool in_bounds =
coord.n() < problem_size_.K &&
coord.h() >= 0 &&
coord.h() < problem_size_.R &&
coord.c() < problem_size_.C;
return in_bounds;
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
TensorCoord coord = at();
int32_t offset =
coord.n() * params_.stride_n +
coord.h() * params_.stride_h +
coord.w() * params_.stride_w +
coord.c();
return reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv2dFpropFilterTileAccessIteratorFewChannels &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.C % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
if (platform::is_same<Layout, layout::TensorCxRSKx<32>>::value) {
if (problem_size.K % 32) {
return Status::kErrorInvalidProblem;
}
}
if (platform::is_same<Layout, layout::TensorCxRSKx<64>>::value) {
if (problem_size.K % 64) {
return Status::kErrorInvalidProblem;
}
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_few_channels.h/0 | {
"file_path": "include/cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_few_channels.h",
"repo_id": "include",
"token_count": 3035
} | 22 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM B (filter tile)
matrix from memory.
This iterator assumes TensorNHWC or TensorCxRSKx<Interleave> layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv3d_problem_size.h"
#include "cutlass/conv/threadblock/conv3d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename Layout_,
typename ThreadMap_,
bool IsDeconv_ = false
>
class Conv3dFpropFilterTileAccessIteratorOptimized{
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = Layout_;
using ThreadMap = ThreadMap_;
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static bool const IsDeconv = IsDeconv_;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 3;
using ConvProblemSize = typename conv::Conv3dProblemSize;
static int const kAccessesPerVector = 1;
//
// Simplifying assertions
//
static_assert(ThreadMap::Iterations::kContiguous == 1,
"Require Iterations::kContiguous == 1");
//
// Parameters structure
//
struct Params : Conv3dFpropFilterIteratorOptimizedParams<Layout> {
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(Conv3dFpropFilterIteratorOptimizedParams<Layout> const &base):
Conv3dFpropFilterIteratorOptimizedParams<Layout>(base) { }
CUTLASS_HOST_DEVICE
Params(
Conv3dProblemSize const &problem_size,
Layout const &layout
):
Conv3dFpropFilterIteratorOptimizedParams<Layout>(
problem_size,
layout,
sizeof_bits<Element>::value,
{Shape::kRow, Shape::kColumn},
ThreadMap::kThreads,
ThreadMap::kElementsPerAccess,
{ThreadMap::Iterations::kContiguous, ThreadMap::Iterations::kStrided},
{ThreadMap::Delta::kContiguous, ThreadMap::Delta::kStrided}
) {
}
};
private:
Conv3dFpropFilterIteratorOptimizedParams<Layout> const ¶ms_;
Conv3dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
char const *pointer_;
uint32_t predicates_;
int filter_trs_;
int filter_c_;
//
// Assertions
//
// We map predicates into bits packed in this uint32_t container
static_assert(ThreadMap::Iterations::kStrided < sizeof(predicates_) * 8,
"Currently, the number of loads per iteration is limited by the size of the predicates container.");
public:
CUTLASS_HOST_DEVICE
Conv3dFpropFilterTileAccessIteratorOptimized(
Conv3dFpropFilterIteratorOptimizedParams<Layout> const ¶ms,
Conv3dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
predicates_{0},
filter_trs_(0),
filter_c_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_c_ = threadblock_offset.row() + thread_coord.contiguous();
Index column = threadblock_offset.column() + thread_coord.strided();
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
uint32_t pred = ((column + s * ThreadMap::Delta::kStrided < (IsDeconv ? problem_size_.C : problem_size_.K)) ? 1u : 0);
predicates_ |= (pred << s);
}
if (filter_c_ >= (IsDeconv ? problem_size_.K : problem_size_.C)) {
predicates_ = 0u;
}
pointer_ += (
params_.layout({filter_c_, column})
) * sizeof_bits<Element>::value / 8;
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
LongIndex next = params_.inc_next_trs;
// moves to the next tile
++filter_trs_;
if (filter_trs_ == params_.TRS) {
filter_trs_ = 0;
next = params_.inc_next_c;
filter_c_ += params_.filter_c_delta;
}
if (filter_c_ >= (IsDeconv ? problem_size_.K : problem_size_.C)) {
predicates_ = 0;
}
pointer_ += next;
}
/// Returns true if the current coordinate is within the filter tensor W
CUTLASS_HOST_DEVICE
bool valid() {
return (predicates_ & (1u << iteration_strided_));
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
return reinterpret_cast<AccessType const *>(pointer_);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv3dFpropFilterTileAccessIteratorOptimized &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
// Move to the next K coordinate within the tile
pointer_ += params_.inc_next_k;
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv3dProblemSize const &problem_size) {
auto input_channels = (IsDeconv ? problem_size.K : problem_size.C);
// check alignment constraint on iterator's contiguous dimension
if (input_channels % (128/sizeof_bits<Element>::value)) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/threadblock/conv3d_fprop_filter_tile_access_iterator_optimized.h/0 | {
"file_path": "include/cutlass/conv/threadblock/conv3d_fprop_filter_tile_access_iterator_optimized.h",
"repo_id": "include",
"token_count": 3008
} | 23 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Visitor tree compute operations for the sm90 TMA warp-specialized (ws) epilogue
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/epilogue/thread/activation.h"
#include "cute/tensor.hpp"
#include "cutlass/epilogue/fusion/sm90_visitor_tma_warpspecialized.hpp"
#include "cutlass/epilogue/fusion/sm90_visitor_load_tma_warpspecialized.hpp"
#include "cutlass/epilogue/fusion/sm90_visitor_store_tma_warpspecialized.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass::epilogue::fusion {
using namespace cute;
using namespace detail;
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// N-nary Elementwise Compute Operation
//
/////////////////////////////////////////////////////////////////////////////////////////////////
// The template argument provided for ComputeFn must be able to accept
// exactly one template parameter. In Standard C++, it's OK for
// ComputeFn to have other template parameters, as long as those have
// defaults. For example, the following struct Foo would work.
//
// template<class A, class B = A>
// struct Foo {
// CUTLASS_HOST_DEVICE auto operator() (A a, B b);
// };
//
// However, some compilers, such as Clang, require that the argument
// take _exactly_ one template parameter. This is nonstandard C++
// behavior. One work-around for this case is to create a subclass
// with exactly one template parameter, and then use that subclass as
// the template argument.
//
// template<class A>
// struct FooHomogeneous : public Foo<A, B> {};
//
template<
template <class> class ComputeFn,
class ElementOutput,
class ElementCompute,
FloatRoundStyle RoundStyle,
class = void
>
struct Sm90Compute {
private:
using EmptyArguments = typename Sm90VisitorImpl<>::Arguments;
template <class Fn, class = void>
struct ComputeArguments {
using type = EmptyArguments;
};
// partial specialization for compute fns that define an Arguments member, e.g. activation hyperparameters
template <class Fn>
struct ComputeArguments<Fn, platform::void_t<typename Fn::Arguments>> {
using type = typename Fn::Arguments;
};
public:
struct SharedStorage { };
using Arguments = typename ComputeArguments<ComputeFn<ElementCompute>>::type;
using Params = Arguments;
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(ProblemShape const&, Arguments const& args, void*) {
return args;
}
template <class ProblemShape>
static size_t
get_workspace_size(ProblemShape const&, Arguments const&) {
return 0;
}
template <class ProblemShape>
static cutlass::Status
initialize_workspace(ProblemShape const& problem_shape, Arguments const& args, void* workspace, cudaStream_t stream,
CudaHostAdapter* cuda_adapter = nullptr) {
return cutlass::Status::kSuccess;
}
CUTLASS_DEVICE bool
is_producer_load_needed() const {
return false;
}
CUTLASS_DEVICE bool
is_C_load_needed() const {
return false;
}
CUTLASS_HOST_DEVICE
Sm90Compute() { }
CUTLASS_HOST_DEVICE
Sm90Compute(Params const& params, SharedStorage const& shared_storage)
: params(params) {}
Params const params;
template <class... Args>
CUTLASS_DEVICE auto
get_producer_load_callbacks(ProducerLoadArgs<Args...> const& args) {
return EmptyProducerLoadCallbacks{};
}
struct ConsumerStoreCallbacks : EmptyConsumerStoreCallbacks {
CUTLASS_DEVICE
ConsumerStoreCallbacks(Params const& params)
: params(params) {}
Params const& params;
template <typename ElementAccumulator, typename... ElementInputs, int FragmentSize>
CUTLASS_DEVICE Array<ElementOutput, FragmentSize>
visit(Array<ElementAccumulator, FragmentSize> const& frg_acc, int epi_v, int epi_m, int epi_n,
Array<ElementInputs, FragmentSize> const&... frg_inputs) {
return transform_apply(cute::make_tuple(frg_inputs...),
[&] (auto&& frg_input) {
using ElementInput = typename cute::remove_cvref_t<decltype(frg_input)>::Element;
using ConvertInput = NumericArrayConverter<ElementCompute, ElementInput, FragmentSize, RoundStyle>;
ConvertInput convert_input{};
return convert_input(frg_input);
},
[&] (auto&&... cvt_frg_inputs) {
using ComputeOutput = ComputeFn<Array<ElementCompute, FragmentSize>>;
using ConvertOutput = NumericArrayConverter<ElementOutput, ElementCompute, FragmentSize, RoundStyle>;
ComputeOutput compute_output{};
ConvertOutput convert_output{};
if constexpr (cute::is_same_v<Arguments, EmptyArguments>) {
return convert_output(compute_output(cvt_frg_inputs...));
}
else {
return convert_output(compute_output(cvt_frg_inputs..., params));
}
}
);
}
};
template <
bool ReferenceSrc, // do register tensors reference the src or dst layout of the tiled copy
class... Args
>
CUTLASS_DEVICE auto
get_consumer_store_callbacks(ConsumerStoreArgs<Args...> const& args) {
return ConsumerStoreCallbacks(params);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Performance Optimized Specializations
//
/////////////////////////////////////////////////////////////////////////////////////////////////
// beta * C + Z
template <
class ElementOutput,
class ElementCompute,
FloatRoundStyle RoundStyle,
class InputScaleOp, // beta
class ElementSource, // C
class InputAddOp // Z
>
struct Sm90TreeVisitor<
Sm90Compute<homogeneous_multiply_add, ElementOutput, ElementCompute, RoundStyle,
cute::void_t<decltype(declval<InputScaleOp>().is_zero())>>,
InputScaleOp,
Sm90SrcFetch<ElementSource>,
InputAddOp
> : Sm90VisitorImpl<
InputScaleOp,
Sm90SrcFetch<ElementSource>,
InputAddOp,
Sm90Compute<homogeneous_multiply_add, ElementOutput, ElementCompute, RoundStyle>
>
{
using Impl =
Sm90VisitorImpl<
InputScaleOp,
Sm90SrcFetch<ElementSource>,
InputAddOp,
Sm90Compute<homogeneous_multiply_add, ElementOutput, ElementCompute, RoundStyle>
>;
using Params = typename Impl::Params;
using SharedStorage = typename Impl::SharedStorage;
CUTLASS_HOST_DEVICE
Sm90TreeVisitor() {}
CUTLASS_HOST_DEVICE
Sm90TreeVisitor(
Params const& params,
SharedStorage const& shared_storage)
: Impl(params, shared_storage) {}
CUTLASS_DEVICE bool
is_producer_load_needed() const {
auto const& added_op = get<2>(Impl::ops);
return is_C_load_needed() || added_op.is_producer_load_needed();
}
CUTLASS_DEVICE bool
is_C_load_needed() const {
auto const& scale_op = get<0>(Impl::ops);
auto const& src_op = get<1>(Impl::ops);
auto const& added_op = get<2>(Impl::ops);
return (not scale_op.is_zero() && src_op.is_C_load_needed()) || added_op.is_C_load_needed();
}
template <class CallbacksImpl>
struct ConsumerStoreCallbacks : CallbacksImpl {
CUTLASS_DEVICE
ConsumerStoreCallbacks(bool is_C_load_needed, CallbacksImpl&& impl)
: is_C_load_needed(is_C_load_needed), CallbacksImpl(cute::forward<CallbacksImpl>(impl)) { }
bool is_C_load_needed;
template <typename ElementAccumulator, int FragmentSize>
CUTLASS_DEVICE Array<ElementOutput, FragmentSize>
visit(Array<ElementAccumulator, FragmentSize> const& frg_acc, int epi_v, int epi_m, int epi_n) {
Array frg_added = get<2>(CallbacksImpl::callbacks_tuple).visit(frg_acc, epi_v, epi_m, epi_n);
using ElementZ = typename decltype(frg_added)::Element;
using ConvertZ = NumericArrayConverter<ElementCompute, ElementZ, FragmentSize, RoundStyle>;
using ConvertI = NumericArrayConverter<ElementOutput, ElementCompute, FragmentSize, RoundStyle>;
ConvertZ convert_Z{};
ConvertI convert_I{};
Array frg_I = convert_Z(frg_added);
if (is_C_load_needed) {
Array frg_scalar = get<0>(CallbacksImpl::callbacks_tuple).visit(frg_acc, epi_v, epi_m, epi_n);
Array frg_source = get<1>(CallbacksImpl::callbacks_tuple).visit(frg_acc, epi_v, epi_m, epi_n);
using ElementX = typename decltype(frg_scalar)::Element;
using ElementY = typename decltype(frg_source)::Element;
using ConvertX = NumericArrayConverter<ElementCompute, ElementX, FragmentSize, RoundStyle>;
using ConvertY = NumericArrayConverter<ElementCompute, ElementY, FragmentSize, RoundStyle>;
using ComputeI = multiply_add<Array<ElementCompute, FragmentSize>>;
ConvertX convert_X{};
ConvertY convert_Y{};
ComputeI compute_I{};
frg_I = compute_I(convert_X(frg_scalar), convert_Y(frg_source), frg_I);
}
return convert_I(frg_I);
}
};
template <
bool ReferenceSrc, // do register tensors reference the src or dst layout of the tiled copy
class... Args
>
CUTLASS_DEVICE auto
get_consumer_store_callbacks(ConsumerStoreArgs<Args...> const& args) {
auto callbacks_tuple = Impl::template get_consumer_store_callbacks<ReferenceSrc>(args);
return ConsumerStoreCallbacks<decltype(callbacks_tuple)>(
is_C_load_needed(), std::move(callbacks_tuple));
}
};
// ReLU with aux bit tensor dReLU/dZ
// Aux(i) = Z(i) >= 0 ? 1 : 0
namespace detail {
// Placeholder node so we can retain standard EVT structure
template <class StrideMNL>
struct Sm90ReLUAuxStore : Sm90VisitorImpl<> {
struct SharedStorage {};
struct Arguments {
cutlass::uint1b_t* ptr_aux = nullptr;
StrideMNL dAux = {};
};
using Params = Arguments;
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(ProblemShape const& problem_shape, Arguments const& args, void* workspace) {
return args;
}
template <class ProblemShape>
static size_t
get_workspace_size(ProblemShape const& problem_shape, Arguments const& args) {
return 0;
}
template <class ProblemShape>
static cutlass::Status
initialize_workspace(ProblemShape const& problem_shape, Arguments const& args, void* workspace, cudaStream_t stream,
CudaHostAdapter* cuda_adapter = nullptr) {
return cutlass::Status::kSuccess;
}
CUTLASS_HOST_DEVICE
Sm90ReLUAuxStore() { }
CUTLASS_HOST_DEVICE
Sm90ReLUAuxStore(Params const& params, SharedStorage const& shared_storage) { }
};
} // namespace detail
// Specialization on the generic compute+aux EVT
template <
// Compute node
template <class> class Activation,
class ElementOutput,
class ElementCompute,
FloatRoundStyle RoundStyle,
// Aux node
int Stages,
class EpilogueTile,
class StrideMNL,
class SmemLayoutAtom,
class CopyOpR2S,
int Alignment,
bool EnableNullptr,
// Input node
class InputOp
>
struct Sm90TreeVisitor<
Sm90Compute<Activation, ElementOutput, ElementCompute, RoundStyle,
cute::enable_if_t<cute::is_same_v<Activation<ElementCompute>, cutlass::epilogue::thread::ReLu<ElementCompute>> ||
cute::is_same_v<Activation<ElementCompute>, cutlass::epilogue::thread::Clamp<ElementCompute>> >>,
Sm90TreeVisitor<
Sm90AuxStore<
Stages,
EpilogueTile,
cutlass::uint1b_t,
RoundStyle,
StrideMNL,
SmemLayoutAtom,
CopyOpR2S,
Alignment,
EnableNullptr
>,
InputOp
>
> : Sm90VisitorImpl<
Sm90VisitorImpl<
InputOp,
detail::Sm90ReLUAuxStore<StrideMNL>
>,
Sm90Compute<Activation, ElementOutput, ElementCompute, RoundStyle>
>
{
using Impl =
Sm90VisitorImpl<
Sm90VisitorImpl<
InputOp,
detail::Sm90ReLUAuxStore<StrideMNL>
>,
Sm90Compute<Activation, ElementOutput, ElementCompute, RoundStyle>
>;
using Params = typename Impl::Params;
using SharedStorage = typename Impl::SharedStorage;
CUTLASS_HOST_DEVICE
Sm90TreeVisitor() {}
CUTLASS_HOST_DEVICE
Sm90TreeVisitor(Params const& params_, SharedStorage const& shared_storage)
: params(params_), Impl(params_, shared_storage) {}
Params const& params;
template <class RTensor, class GTensor, class CTensor, class ResidueMN, class CallbacksImpl>
struct ConsumerStoreCallbacks : CallbacksImpl {
CUTLASS_DEVICE
ConsumerStoreCallbacks(
RTensor&& tC_rAux,
GTensor&& tC_gAux,
CTensor tC_cAux,
ResidueMN residue_mn,
Params const& params,
CallbacksImpl&& impl)
: tC_rAux(cute::forward<RTensor>(tC_rAux)),
tC_gAux(cute::forward<GTensor>(tC_gAux)),
tC_cAux(tC_cAux),
residue_mn(residue_mn),
params(params),
CallbacksImpl(cute::forward<CallbacksImpl>(impl)) {}
RTensor tC_rAux; // (CPY,CPY_M,CPY_N,EPI_M,EPI_N)
GTensor tC_gAux; // (CPY,CPY_M,CPY_N,EPI_M,EPI_N)
CTensor tC_cAux; // (CPY,CPY_M,CPY_N,EPI_M,EPI_N)
ResidueMN residue_mn;
Params const& params;
template <typename ElementAccumulator, int FragmentSize>
CUTLASS_DEVICE Array<ElementOutput, FragmentSize>
visit(Array<ElementAccumulator, FragmentSize> const& frg_acc, int epi_v, int epi_m, int epi_n) {
// Unpack callbacks + params
auto& [callbacks_input_aux, callbacks_compute] = CallbacksImpl::callbacks_tuple;
auto& [callbacks_input, callbacks_aux] = callbacks_input_aux.callbacks_tuple;
auto const& [params_input_aux, params_compute] = params;
auto const& [params_input, params_aux] = params_input_aux;
// Visit the input node
Array frg_input = callbacks_input.visit(frg_acc, epi_v, epi_m, epi_n);
// Compute activation + aux
using ElementInput = typename decltype(frg_input)::Element;
using ConvertInput = NumericArrayConverter<ElementCompute, ElementInput, FragmentSize, RoundStyle>;
using ConvertAux = PackPredicates<FragmentSize>;
using ComputeOutput = Activation<ElementCompute>;
using ConvertOutput = NumericArrayConverter<ElementOutput, ElementCompute, FragmentSize, RoundStyle>;
ConvertInput convert_input{};
ComputeOutput relu{};
ConvertAux convert_aux{};
ConvertOutput convert_output{};
Array frg_compute = convert_input(frg_input);
bool frg_aux[FragmentSize];
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < FragmentSize; ++i) {
ElementCompute pre_relu = frg_compute[i];
if constexpr (cute::is_same_v<Activation<ElementCompute>, cutlass::epilogue::thread::Clamp<ElementCompute>>) {
frg_compute[i] = relu(frg_compute[i], params_compute);
}
else {
frg_compute[i] = relu(frg_compute[i]);
}
frg_aux[i] = frg_compute[i] == pre_relu;
}
static_assert(FragmentSize % 8 == 0, "Predicate vector must be byte-aligned");
Tensor tC_rAux_frg = recast<typename ConvertAux::result_type>(coalesce(tC_rAux(_,_,_,epi_m,epi_n))); // (EPI_V)
tC_rAux_frg(epi_v) = convert_aux(frg_aux);
return convert_output(frg_compute);
}
CUTLASS_DEVICE void
end() {
// Unpack callbacks + params
auto& [callbacks_input_aux, callbacks_compute] = CallbacksImpl::callbacks_tuple;
auto& [callbacks_input, callbacks_aux] = callbacks_input_aux.callbacks_tuple;
auto const& [params_input_aux, params_compute] = params;
auto const& [params_input, params_aux] = params_input_aux;
// Visit the input node
callbacks_input.end();
// Nullptr is no-op
if constexpr (EnableNullptr) {
if (params_aux.ptr_aux == nullptr) {
return;
}
}
// Copy vectorizes into byte-aligned stores
constexpr int V = cute::min(Alignment, decltype(max_common_vector(tC_rAux, tC_gAux))::value);
if constexpr (V > 0 && V % 8 == 0) {
using VecType = uint_bit_t<V>;
Tensor tC_rAux_vec = recast<VecType>(tC_rAux);
Tensor tC_gAux_vec = recast<VecType>(tC_gAux);
Tensor tC_cAux_vec = tC_cAux.compose(make_layout(Int<size(tC_rAux_vec)>{}, Int<V>{})); // only works if vector is logically sequential
auto predicate_fn = [&] (auto&&... coords) { return elem_less(tC_cAux_vec(coords...), residue_mn); };
copy_if(FunctionPredTensor(predicate_fn), tC_rAux_vec, tC_gAux_vec);
}
// sub-byte vectorization, must serialize threads
else {
// Assumes no inter-warp sharing of bytes (most copy layouts should satisfy this)
int lane_idx = canonical_lane_idx();
auto predicate_fn = [&] (auto&&... coords) { return elem_less(tC_cAux(coords...), residue_mn); };
CUTLASS_PRAGMA_NO_UNROLL
for (int i = 0; i < NumThreadsPerWarp; ++i) {
if (lane_idx == i) {
copy_if(FunctionPredTensor(predicate_fn), tC_rAux, tC_gAux);
}
__syncwarp();
}
}
}
};
template <
bool ReferenceSrc, // do register tensors reference the src or dst layout of the tiled copy
class... Args
>
CUTLASS_DEVICE auto
get_consumer_store_callbacks(ConsumerStoreArgs<Args...> const& args) {
// Unpack params
auto const& [params_input_aux, params_compute] = params;
auto const& [params_input, params_aux] = params_input_aux;
auto [M, N, K, L] = args.problem_shape_mnkl;
auto [m, n, k, l] = args.tile_coord_mnkl;
gmem_ptr ptr_aux = make_gmem_ptr(subbyte_iterator<cutlass::uint1b_t>(params_aux.ptr_aux));
Tensor mAux = make_tensor(ptr_aux, make_layout(make_shape(M,N,L), params_aux.dAux)); // (M,N,L)
Tensor gAux = local_tile(mAux, take<0,2>(args.tile_shape_mnk), make_coord(m,n,l)); // (CTA_M,CTA_N)
Tensor tC_gAux = sm90_partition_for_epilogue<ReferenceSrc>( // (CPY,CPY_M,CPY_N,EPI_M,EPI_N)
gAux, args.epi_tile, args.tiled_copy, args.thread_idx);
Tensor tC_rAux = make_tensor<cutlass::uint1b_t>(shape(tC_gAux)); // (CPY,CPY_M,CPY_N,EPI_M,EPI_N)
auto callbacks_impl = Impl::template get_consumer_store_callbacks<ReferenceSrc>(args);
return ConsumerStoreCallbacks<decltype(tC_rAux), decltype(tC_gAux), decltype(args.tCcD), decltype(args.residue_mn), decltype(callbacks_impl)>(
cute::move(tC_rAux), cute::move(tC_gAux), args.tCcD, args.residue_mn, params, cute::move(callbacks_impl));
}
};
// Aux load for uint1b_t
template <
int Stages,
class EpilogueTile,
class StrideMNL,
class SmemLayoutAtom,
class CopyOpS2R,
int Alignment,
bool EnableNullptr
>
struct Sm90AuxLoad<
Stages,
EpilogueTile,
cutlass::uint1b_t,
StrideMNL,
SmemLayoutAtom,
CopyOpS2R,
Alignment,
EnableNullptr
> {
static_assert(Alignment % 128 == 0, "sub-16B alignment not supported yet");
struct SharedStorage {};
struct Arguments {
cutlass::uint1b_t const* ptr_aux = nullptr;
cutlass::uint1b_t null_default = cutlass::uint1b_t(0);
StrideMNL dAux = {};
};
using Params = Arguments;
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(ProblemShape const& problem_shape, Arguments const& args, void* workspace) {
return args;
}
template <class ProblemShape>
static size_t
get_workspace_size(ProblemShape const& problem_shape, Arguments const& args) {
return 0;
}
template <class ProblemShape>
static cutlass::Status
initialize_workspace(ProblemShape const& problem_shape, Arguments const& args, void* workspace, cudaStream_t stream,
CudaHostAdapter* cuda_adapter = nullptr) {
return cutlass::Status::kSuccess;
}
CUTLASS_HOST_DEVICE
Sm90AuxLoad() { }
CUTLASS_HOST_DEVICE
Sm90AuxLoad(Params const& params, SharedStorage const&)
: params(params) { }
Params const params;
CUTLASS_DEVICE bool
is_producer_load_needed() const {
return false;
}
CUTLASS_DEVICE bool
is_C_load_needed() const {
return false;
}
template <class... Args>
CUTLASS_DEVICE auto
get_producer_load_callbacks(ProducerLoadArgs<Args...> const& args) {
return EmptyProducerLoadCallbacks{};
}
template <class RTensor, class GTensor, class CTensor, class ResidueMN>
struct ConsumerStoreCallbacks : EmptyConsumerStoreCallbacks {
CUTLASS_DEVICE
ConsumerStoreCallbacks(RTensor&& tC_rAux_, GTensor&& tC_gAux_, CTensor tC_cAux_, ResidueMN residue_mn_, Params const& params_)
: tC_rAux(cute::forward<RTensor>(tC_rAux_)),
tC_gAux(cute::forward<GTensor>(tC_gAux_)),
tC_cAux(tC_cAux_),
residue_mn(residue_mn_),
params(params_) {}
RTensor tC_rAux; // (CPY,CPY_M,CPY_N,{EPI_M,EPI_N})
GTensor tC_gAux; // (CPY,CPY_M,CPY_N,EPI_M,EPI_N)
CTensor tC_cAux; // (CPY,CPY_M,CPY_N,EPI_M,EPI_N)
ResidueMN residue_mn;
Params const& params;
CUTLASS_DEVICE void
begin() {
if constexpr (decltype(cute::rank(tC_rAux))::value == 5) {
if constexpr (EnableNullptr) {
if (params.ptr_aux == nullptr) {
return;
}
}
constexpr int V = cute::min(Alignment, decltype(max_common_vector(tC_rAux, tC_gAux))::value);
if constexpr (V > 0) {
using VecType = uint_bit_t<V>;
Tensor tC_gAux_vec = recast<VecType>(tC_gAux);
Tensor tC_rAux_vec = recast<VecType>(tC_rAux);
Tensor tC_cAux_vec = tC_cAux.compose(make_layout(Int<size(tC_rAux_vec)>{}, Int<V>{})); // only works if vector is logically sequential
auto predicate_fn = [&] (auto&&... coords) { return elem_less(tC_cAux_vec(coords...), residue_mn); };
copy_if(FunctionPredTensor(predicate_fn), tC_gAux_vec, tC_rAux_vec);
}
else {
auto predicate_fn = [&] (auto&&... coords) { return elem_less(tC_cAux(coords...), residue_mn); };
copy_if(FunctionPredTensor(predicate_fn), tC_gAux, tC_rAux);
}
}
}
CUTLASS_DEVICE void
previsit(int epi_m, int epi_n, int load_iteration, bool is_producer_load_needed) {
if constexpr (decltype(cute::rank(tC_rAux))::value == 3) {
if constexpr (EnableNullptr) {
if (params.ptr_aux == nullptr) {
return;
}
}
auto predicate_fn = [&] (auto&&... coords) { return elem_less(tC_cAux(_,_,_,epi_m,epi_n)(coords...), residue_mn); };
copy_if(FunctionPredTensor(predicate_fn), tC_gAux(_,_,_,epi_m,epi_n), tC_rAux);
}
}
template <typename ElementAccumulator, int FragmentSize>
CUTLASS_DEVICE auto
visit(Array<ElementAccumulator, FragmentSize> const& frg_acc, int epi_v, int epi_m, int epi_n) {
using ElementRegister = typename remove_cvref_t<RTensor>::value_type;
if constexpr (decltype(cute::rank(tC_rAux))::value == 3) {
return recast<Array<ElementRegister, FragmentSize>>(coalesce(tC_rAux))(epi_v);
}
else {
return recast<Array<ElementRegister, FragmentSize>>(coalesce(tC_rAux(_,_,_,epi_m,epi_n)))(epi_v);
}
}
};
template <
bool ReferenceSrc, // do register tensors reference the src or dst layout of the tiled copy
class... Args
>
CUTLASS_DEVICE auto
get_consumer_store_callbacks(ConsumerStoreArgs<Args...> const& args) {
auto [M, N, K, L] = args.problem_shape_mnkl;
auto [m, n, k, l] = args.tile_coord_mnkl;
gmem_ptr ptr_aux = make_gmem_ptr(subbyte_iterator<cutlass::uint1b_t const>(params.ptr_aux));
Tensor mAux = make_tensor(ptr_aux, make_layout(make_shape(M,N,L), params.dAux)); // (M,N,L)
Tensor gAux = local_tile(mAux, take<0,2>(args.tile_shape_mnk), make_coord(m,n,l)); // (CTA_M,CTA_N)
Tensor tC_gAux = sm90_partition_for_epilogue<ReferenceSrc>( // (CPY,CPY_M,CPY_N,EPI_M,EPI_N)
gAux, args.epi_tile, args.tiled_copy, args.thread_idx);
// If byte-unaligned vectorization, store in registers as uint32_t to reduce redundant pack+unpack instruction sequences
constexpr int V = decltype(max_common_vector(tC_gAux.layout(), make_layout(tC_gAux.shape())))::value;
Tensor tC_rAux = [&] () {
if constexpr (V % 8 != 0) {
return make_tensor<uint32_t>(take<0,3>(shape(tC_gAux))); // (CPY,CPY_M,CPY_N)
} else {
return make_tensor<cutlass::uint1b_t>(shape(tC_gAux)); // (CPY,CPY_M,CPY_N,EPI_M,EPI_N)
}
}();
if constexpr (EnableNullptr) {
if (params.ptr_aux == nullptr) {
fill(tC_rAux, params.null_default);
}
}
return ConsumerStoreCallbacks<decltype(tC_rAux), decltype(tC_gAux), decltype(args.tCcD), decltype(args.residue_mn)>(
cute::move(tC_rAux), cute::move(tC_gAux), args.tCcD, args.residue_mn, params);
}
};
// dReLU specialization
template<
class ElementOutput,
class ElementCompute,
FloatRoundStyle RoundStyle
>
struct Sm90Compute<
cutlass::epilogue::thread::dReLU,
ElementOutput,
ElementCompute,
RoundStyle
> : Sm90VisitorImpl<> {
using Sm90VisitorImpl<>::Sm90VisitorImpl;
struct ConsumerStoreCallbacks : EmptyConsumerStoreCallbacks {
template <typename ElementAccumulator, typename ElementInput, typename ElementAux, int FragmentSize>
CUTLASS_DEVICE Array<ElementOutput, FragmentSize>
visit(Array<ElementAccumulator, FragmentSize> const& frg_acc, int epi_v, int epi_m, int epi_n,
Array<ElementInput , FragmentSize> const& frg_input,
Array<ElementAux , FragmentSize> const& frg_aux) {
using ConvertInput = NumericArrayConverter<ElementCompute, ElementInput, FragmentSize, RoundStyle>;
using ComputeOutput = cutlass::epilogue::thread::dReLU<Array<ElementCompute, FragmentSize>>;
using ConvertOutput = NumericArrayConverter<ElementOutput, ElementCompute, FragmentSize, RoundStyle>;
ConvertInput convert_input{};
ComputeOutput compute_output{};
ConvertOutput convert_output{};
return convert_output(compute_output(convert_input(frg_input), frg_aux)); // don't convert frg_aux for dReLU
}
};
template <
bool ReferenceSrc, // do register tensors reference the src or dst layout of the tiled copy
class... Args
>
CUTLASS_DEVICE auto
get_consumer_store_callbacks(ConsumerStoreArgs<Args...> const& args) {
return ConsumerStoreCallbacks();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::epilogue::fusion
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/fusion/sm90_visitor_compute_tma_warpspecialized.hpp/0 | {
"file_path": "include/cutlass/epilogue/fusion/sm90_visitor_compute_tma_warpspecialized.hpp",
"repo_id": "include",
"token_count": 11719
} | 24 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Constructs a default epilogue for planar complex outputs.
This template reuses components for real-valued epilogues and applies them to planar complex
output matrices.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/array_planar_complex.h"
#include "cutlass/arch/arch.h"
#include "cutlass/epilogue/thread/linear_combination_planar_complex.h"
#include "cutlass/epilogue/threadblock/default_epilogue_simt.h"
#include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h"
#include "cutlass/epilogue/threadblock/epilogue_planar_complex.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues.
template <
typename ThreadblockShape_,
typename WarpMma_,
typename OpcodeClass_,
typename ArchTag_,
int PartitionsK,
typename OutputOp_,
int ElementsPerAccess
>
struct DefaultEpiloguePlanarComplex;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues.
template <
typename ThreadblockShape_,
typename WarpMmaOperator_,
int PartitionsK,
typename OutputOp_,
int ElementsPerAccess
>
struct DefaultEpiloguePlanarComplex<
ThreadblockShape_,
WarpMmaOperator_,
arch::OpClassTensorOp,
arch::Sm70,
PartitionsK,
OutputOp_,
ElementsPerAccess> {
using RealEpilogue = DefaultEpilogueVoltaTensorOp<
ThreadblockShape_,
WarpMmaOperator_,
PartitionsK,
OutputOp_,
ElementsPerAccess
>;
using Epilogue = EpiloguePlanarComplex<
ThreadblockShape_,
WarpMmaOperator_,
PartitionsK,
typename RealEpilogue::OutputTileIterator,
typename RealEpilogue::AccumulatorFragmentIterator,
typename RealEpilogue::WarpTileIterator,
typename RealEpilogue::SharedLoadIterator,
OutputOp_,
typename RealEpilogue::Padding
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues.
template <
typename ThreadblockShape_,
typename WarpMmaOperator_,
int PartitionsK,
typename OutputOp_,
int ElementsPerAccess
>
struct DefaultEpiloguePlanarComplex<
ThreadblockShape_,
WarpMmaOperator_,
arch::OpClassTensorOp,
arch::Sm75,
PartitionsK,
OutputOp_,
ElementsPerAccess> {
using RealEpilogue = DefaultEpilogueTensorOp<
ThreadblockShape_,
WarpMmaOperator_,
PartitionsK,
OutputOp_,
ElementsPerAccess
>;
using Epilogue = EpiloguePlanarComplex<
ThreadblockShape_,
WarpMmaOperator_,
PartitionsK,
typename RealEpilogue::OutputTileIterator,
typename RealEpilogue::AccumulatorFragmentIterator,
typename RealEpilogue::WarpTileIterator,
typename RealEpilogue::SharedLoadIterator,
OutputOp_,
typename RealEpilogue::Padding
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues.
template <
typename ThreadblockShape_,
typename WarpMmaOperator_,
int PartitionsK,
typename OutputOp_,
int ElementsPerAccess
>
struct DefaultEpiloguePlanarComplex<
ThreadblockShape_,
WarpMmaOperator_,
arch::OpClassTensorOp,
arch::Sm80,
PartitionsK,
OutputOp_,
ElementsPerAccess> {
using RealEpilogue = DefaultEpilogueTensorOp<
ThreadblockShape_,
WarpMmaOperator_,
PartitionsK,
OutputOp_,
ElementsPerAccess
>;
using Epilogue = EpiloguePlanarComplex<
ThreadblockShape_,
WarpMmaOperator_,
PartitionsK,
typename RealEpilogue::OutputTileIterator,
typename RealEpilogue::AccumulatorFragmentIterator,
typename RealEpilogue::WarpTileIterator,
typename RealEpilogue::SharedLoadIterator,
OutputOp_,
typename RealEpilogue::Padding
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues.
template <
typename ThreadblockShape_,
typename WarpMmaOperator_,
typename ArchTag_,
int PartitionsK,
typename OutputOp_,
int ElementsPerAccess
>
struct DefaultEpiloguePlanarComplex<
ThreadblockShape_,
WarpMmaOperator_,
arch::OpClassSimt,
ArchTag_,
PartitionsK,
OutputOp_,
ElementsPerAccess> {
using RealEpilogue = DefaultEpilogueSimt<
ThreadblockShape_,
WarpMmaOperator_,
OutputOp_,
ElementsPerAccess
>;
using Epilogue = EpiloguePlanarComplex<
ThreadblockShape_,
WarpMmaOperator_,
PartitionsK,
typename RealEpilogue::OutputTileIterator,
typename RealEpilogue::AccumulatorFragmentIterator,
typename RealEpilogue::WarpTileIterator,
typename RealEpilogue::SharedLoadIterator,
OutputOp_,
typename RealEpilogue::Padding
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/default_epilogue_planar_complex.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/default_epilogue_planar_complex.h",
"repo_id": "include",
"token_count": 2191
} | 25 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Basic subset of epilogue functionality for supporting StreamK decompositions
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/functional.h"
#include "cutlass/block_striped.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// StreamK epilogue functionality for cross-block accumulator fragment reduction
template <
typename Shape, ///< Shape of threadblock tile (concept: GemmShape)
int PartitionsK,
typename WarpMmaOperator, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp)
typename AccumulatorFragmentIterator> ///< Iterator for enumerating fragments within the per-thread tile of raw accumulators
class EpilogueBaseStreamK
{
protected:
/// The per-thread tile of raw accumulators
using AccumulatorTile = typename AccumulatorFragmentIterator::AccumulatorTile;
/// Number of warps
using WarpCount = gemm::GemmShape<
Shape::kM / WarpMmaOperator::Shape::kM,
Shape::kN / WarpMmaOperator::Shape::kN,
PartitionsK>;
/// Number of threads per block
static int const kBlockThreads = 32 * WarpCount::kCount;
/// Numerical accumulation element type
using ElementAccumulator = typename WarpMmaOperator::ElementC;
/// Fragment type used by the accumulator tile's fragment iterator
using AccumulatorFragment = typename AccumulatorFragmentIterator::Fragment;
public:
/// Number of AccumulatorTile fragments per thread
static int const kAccumulatorFragments = AccumulatorFragmentIterator::Policy::kIterations;
protected:
/// Number of AccumulatorTile fragments per block output tile
static int const kOutputTileFragments = kBlockThreads * kAccumulatorFragments;
/// Block-striped transfer utility for sharing AccumulatorFragment
using BlockStripedT = BlockStriped<kBlockThreads, AccumulatorFragment>;
/// AccumulatorFragment stride in the shared workspace between different peer blocks (each thread block can share accumulators for up to two block output tiles)
static const int kPeerFragmentStride = kOutputTileFragments * 2;
public:
/// Workspace bytes per thread block
static size_t const kWorkspaceBytesPerBlock =sizeof(AccumulatorFragment) * kPeerFragmentStride;
public:
/// Thread index in the threadblock
int thread_idx;
public:
/// Constructor
CUTLASS_DEVICE
EpilogueBaseStreamK(
int thread_idx) ///< ID of a thread within the threadblock
:
thread_idx(thread_idx)
{}
/// Aggregates the accumulator sets shared by peer blocks in the global workspace
CUTLASS_DEVICE
void reduce(
AccumulatorFragment &accum_fragment, ///< [out] sum of all shared accumulator fragments for these peer partials
int peer_idx_begin,
int peer_idx_end,
int reduce_fragment_idx,
void *workspace_ptr)
{
plus<AccumulatorFragment> add_fragments;
AccumulatorFragment *fragment_workspace = reinterpret_cast<AccumulatorFragment *>(workspace_ptr);
int fragment_offset = (peer_idx_begin * kPeerFragmentStride) + (reduce_fragment_idx * kBlockThreads);
// Load first peer fragment
BlockStripedT::load(accum_fragment, fragment_workspace + fragment_offset, this->thread_idx);
fragment_offset += kPeerFragmentStride; // Move to next peer
fragment_offset += kOutputTileFragments; // Move to the set of fragments for this peer's "non-started" output tile
// Reduce fragments from additional peers
#pragma unroll 2
for (; fragment_offset < peer_idx_end * kPeerFragmentStride; fragment_offset += kPeerFragmentStride)
{
// Load peer fragment
AccumulatorFragment addend_fragment;
BlockStripedT::load(addend_fragment, fragment_workspace + fragment_offset, this->thread_idx);
// Add peer fragment
accum_fragment = add_fragments(accum_fragment, addend_fragment);
}
}
/// Shares the accumulator set with peers in the global workspace
CUTLASS_DEVICE
void share(
int peer_idx,
void *workspace_ptr,
AccumulatorTile const &accumulators,
bool started_tile) ///< Whether this thread block computed the first work volume for the current output tile
{
AccumulatorFragment *fragment_workspace = reinterpret_cast<AccumulatorFragment *>(workspace_ptr);
int fragment_offset = peer_idx * kPeerFragmentStride;
if (!started_tile) {
// Move to the set of fragments for the "non-started" output tile
fragment_offset += kOutputTileFragments;
}
AccumulatorFragmentIterator accum_fragment_iterator(accumulators);
// Convert raw accumulator tile to fragments and store
CUTLASS_PRAGMA_UNROLL
for (int iter = 0; iter < kAccumulatorFragments; ++iter)
{
// Acquire reordered accumulator fragment
AccumulatorFragment accum_fragment;
accum_fragment_iterator.load(accum_fragment);
++accum_fragment_iterator;
// Store accumulator fragment
BlockStripedT::store(fragment_workspace + fragment_offset, accum_fragment, this->thread_idx);
fragment_offset += kBlockThreads;
}
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/epilogue_base_streamk.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/epilogue_base_streamk.h",
"repo_id": "include",
"token_count": 2346
} | 26 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Visitor tree load operations for the CUTLASS 2x epilogue
*/
#pragma once
#include "cutlass/epilogue/threadblock/fusion/visitor_2x.hpp"
#include "cute/tensor.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass::epilogue::threadblock {
using namespace cute;
using namespace detail;
using X = Underscore;
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Elementwise Fetch Operations
//
/////////////////////////////////////////////////////////////////////////////////////////////////
// returns accumulator
struct VisitorAccFetch : VisitorImpl2x<> {
using VisitorImpl2x<>::VisitorImpl2x;
struct Callbacks : EmptyCallbacks {
template <class ElementAccumulator, int FragmentSize>
CUTLASS_DEVICE Array<ElementAccumulator, FragmentSize>
visit(int iter_idx, int row_idx, int column_idx, int frg_idx, Array<ElementAccumulator, FragmentSize> const& frg_acc) {
return frg_acc;
}
};
template <class ProblemShape>
CUTLASS_DEVICE auto
get_callbacks(
gemm::GemmCoord threadblock_tile_offset,
int thread_idx,
ProblemShape problem_shape
) {
return Callbacks{};
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Broadcast Load Operations
//
/////////////////////////////////////////////////////////////////////////////////////////////////
// Scalar broadcast
template<
class Element,
class StrideMNL = Stride<_0,_0,_0>,
int BroadcastCount = 1,
template <class> class ReductionFn = multiplies
>
struct VisitorScalarBroadcast {
static_assert(
(cute::is_same_v<StrideMNL, Stride<_0,_0,_0>>) || // scalar broadcast, e.g. alpha
(cute::is_same_v<StrideMNL, Stride<_0,_0,_1>>) ||
(cute::is_same_v<StrideMNL, Stride<_0,_0,int>>)); // batched scalar broadcast, e.g. per-batch alpha
struct SharedStorage { };
struct Arguments {
Element scalars[BroadcastCount] = {};
Element const* scalar_ptrs[BroadcastCount] = {};
StrideMNL dScalar = {};
};
using Params = Arguments;
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(ProblemShape const& problem_shape, Arguments const& args, void* workspace) {
return args;
}
template <class ProblemShape>
static size_t
get_workspace_size(ProblemShape const& problem_shape, Arguments const& args) {
return 0;
}
CUTLASS_HOST_DEVICE
VisitorScalarBroadcast() { }
CUTLASS_HOST_DEVICE
VisitorScalarBroadcast(Params const& params, SharedStorage const& shared_storage)
: params_ptr(¶ms) {
// Get the scalar for non-batched broadcast
if constexpr (cute::is_same_v<StrideMNL, Stride<_0,_0,_0>>) {
update_scalar();
}
}
Element scalar;
Params const* params_ptr;
struct Callbacks: EmptyCallbacks {
CUTLASS_DEVICE
Callbacks(Element scalar)
: scalar(scalar) {}
Element scalar;
template <class ElementAccumulator, int FragmentSize>
CUTLASS_DEVICE auto // returns an Array
visit(int iter_idx, int row_idx, int column_idx, int frg_idx,
Array<ElementAccumulator, FragmentSize> const& frg_acc) {
Array<Element, FragmentSize> frg_scalar;
frg_scalar.fill(scalar);
return frg_scalar;
}
};
template <class ProblemShape>
CUTLASS_DEVICE auto
get_callbacks(
gemm::GemmCoord threadblock_tile_offset,
int thread_idx,
ProblemShape problem_shape
) {
// Get the scalar for batched broadcast
if constexpr (
cute::is_same_v<StrideMNL, Stride<_0,_0,_1>> ||
cute::is_same_v<StrideMNL, Stride<_0,_0,int>>) {
update_scalar(threadblock_tile_offset.k());
}
return Callbacks(scalar);
}
private:
CUTLASS_DEVICE void
update_scalar(int l_coord = 0) {
int l_offset = l_coord * size<2>(params_ptr->dScalar);
if (params_ptr->scalar_ptrs[0] != nullptr) {
scalar = params_ptr->scalar_ptrs[0][l_offset];
} else {
// batch stride is ignored for nullptr fallback
scalar = params_ptr->scalars[0];
}
// Do reduction over multiple broadcasts if necessary
ReductionFn<Element> reduction_fn;
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < BroadcastCount; ++i) {
if (params_ptr->scalar_ptrs[i] != nullptr) {
scalar = reduction_fn(scalar, params_ptr->scalar_ptrs[i][l_offset]);
} else {
// batch stride is ignored for nullptr fallback
scalar = reduction_fn(scalar, params_ptr->scalars[i]);
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Elementwise Load Operations
//
/////////////////////////////////////////////////////////////////////////////////////////////////
template<
class ThreadMap,
class Element,
class StrideMNL
>
struct VisitorAuxLoad{
struct Arguments {
Element* ptr_aux = nullptr;
Element null_default = Element(0);
StrideMNL dAux = {};
};
using Params = Arguments;
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(ProblemShape const& problem_shape, Arguments const& args, void* workspace) {
return args;
}
template <class ProblemShape>
static size_t
get_workspace_size(ProblemShape const& problem_shape, Arguments const& args) {
return 0;
}
// Software pipeline stages
static const int Stages = ThreadMap::Stages;
struct SharedStorage {};
// Global load type
static int constexpr vec_bits = ThreadMap::kElementsPerAccess * sizeof_bits<Element>::value;
using VecType = uint_bit_t<cute::min(128, vec_bits)>;
static int constexpr VecLength = sizeof(VecType) / sizeof(Element);
CUTLASS_HOST_DEVICE
VisitorAuxLoad() { }
CUTLASS_HOST_DEVICE
VisitorAuxLoad(Params const& params, SharedStorage const& shared_storage)
: params_ptr(¶ms) { }
Params const* params_ptr;
template <class GTensor, class RTensor, class CTensor, class ProblemShape>
struct Callbacks : EmptyCallbacks {
CUTLASS_DEVICE
Callbacks(
GTensor&& tC_gAux,
RTensor&& tC_rAux,
CTensor&& tC_cAux,
ProblemShape problem_shape,
Params const* params_ptr
):
tC_gAux(cute::forward<GTensor>(tC_gAux)),
tC_rAux(cute::forward<RTensor>(tC_rAux)),
tC_cAux(cute::forward<CTensor>(tC_cAux)),
problem_shape(problem_shape),
params_ptr(params_ptr) { }
GTensor tC_gAux;
RTensor tC_rAux;
CTensor tC_cAux;
Params const* params_ptr;
ProblemShape problem_shape;
CUTLASS_DEVICE void
begin_step(int step_idx) {
clear(tC_rAux(_,_,_,step_idx%Stages));
auto src_v = filter(tC_gAux(_,_,_,step_idx));
auto coord_v = filter(tC_cAux(_,_,_,step_idx));
auto dst_v = filter(tC_rAux(_,_,_,step_idx%Stages));
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < size(src_v); ++i) {
bool guard = elem_less(coord_v(i), problem_shape);
cutlass::arch::global_load<VecType, sizeof(VecType)>(dst_v(i), (void const*)&src_v(i), guard);
}
}
template <class ElementAccumulator, int FragmentSize>
CUTLASS_DEVICE auto // returns an Array
visit(int iter_idx, int row_idx, int column_idx, int frg_idx,
Array<ElementAccumulator, FragmentSize> const& frg_acc) {
Tensor tC_rAux_frg = recast<Array<Element, FragmentSize>>(coalesce(tC_rAux(_,_,_,iter_idx%Stages)));
return tC_rAux_frg(frg_idx);
}
};
template <class ProblemShape>
CUTLASS_DEVICE auto
get_callbacks(
gemm::GemmCoord threadblock_tile_offset,
int thread_idx,
ProblemShape problem_shape
) {
Tensor mAux = make_tensor(
make_gmem_ptr(params_ptr->ptr_aux),
problem_shape,
params_ptr->dAux); // (M,N,L)
// VECTOR, FRAGMENT_COLUMN, FRAGMENT_ROW, ITERATION_ROW, ITERATION_GROUP, ITERATION_CLUSTER
Tensor tC_gAux = recast<VecType>(
group_modes<3,6>(ThreadMap::partition(mAux, thread_idx, threadblock_tile_offset)));
// VECTOR, FRAGMENT_COLUMN, FRAGMENT_ROW, Stages
Tensor tC_rAux = make_tensor<VecType>(
make_layout(flatten(make_shape(take<0,3>(tC_gAux.shape()), Int<Stages>{}))));
// Generate the pred tensor
Tensor cAux = make_identity_tensor(mAux.shape());
Tensor tC_cAux = outer_partition(
group_modes<3,6>(ThreadMap::partition(cAux, thread_idx, threadblock_tile_offset)),
Shape<Int<VecLength>>{},
(_0{})
);
return Callbacks<
decltype(tC_gAux), decltype(tC_rAux),
decltype(tC_cAux), ProblemShape>(
cute::move(tC_gAux),
cute::move(tC_rAux),
cute::move(tC_cAux),
problem_shape,
params_ptr
);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Row vector broadcast
template<
class ThreadMap,
class Element,
class StrideMNL
>
struct VisitorRowBroadcast {
struct Arguments {
Element const* ptr_row = nullptr;
Element null_default = Element(0);
StrideMNL dRow = {};
};
using Params = Arguments;
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(ProblemShape const& problem_shape, Arguments const& args, void* workspace) {
return args;
}
template <class ProblemShape>
static size_t
get_workspace_size(ProblemShape const& problem_shape, Arguments const& args) {
return 0;
}
struct SharedStorage {};
// Global load type
static int constexpr vec_bits = ThreadMap::kElementsPerAccess * sizeof_bits<Element>::value;
using VecType = uint_bit_t<cute::min(128, vec_bits)>;
static int constexpr VecLength = sizeof(VecType) / sizeof(Element);
CUTLASS_HOST_DEVICE
VisitorRowBroadcast() { }
CUTLASS_HOST_DEVICE
VisitorRowBroadcast(Params const& params, SharedStorage const& shared_storage)
: params_ptr(¶ms) { }
Params const* params_ptr;
template <class GTensor, class RTensor, class CTensor, class ProblemShape>
struct Callbacks : EmptyCallbacks {
CUTLASS_DEVICE
Callbacks(
GTensor&& tC_gRow,
RTensor&& tC_rRow,
CTensor&& tC_cRow,
ProblemShape problem_shape,
Params const* params_ptr
):
tC_gRow(cute::forward<GTensor>(tC_gRow)),
tC_rRow(cute::forward<RTensor>(tC_rRow)),
tC_cRow(cute::forward<CTensor>(tC_cRow)),
n(get<1>(problem_shape)),
params_ptr(params_ptr) { }
GTensor tC_gRow;
RTensor tC_rRow;
CTensor tC_cRow;
Params const* params_ptr;
int n;
CUTLASS_DEVICE void
begin_epilogue() {
clear(tC_rRow);
auto src_v = filter(tC_gRow);
auto coord_v = filter(tC_cRow);
auto dst_v = filter(tC_rRow);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < size(src_v); ++i) {
bool guard = get<1>(coord_v(i)) < n;
cutlass::arch::global_load<VecType, sizeof(VecType)>(dst_v(i), (void const*)&src_v(i), guard);
}
}
template <class ElementAccumulator, int FragmentSize>
CUTLASS_DEVICE auto // returns an Array
visit(int iter_idx, int row_idx, int column_idx, int frg_idx,
Array<ElementAccumulator, FragmentSize> const& frg_acc) {
Tensor rRow_frg = recast<Array<Element, FragmentSize>>(coalesce(tC_rRow));
return rRow_frg(column_idx);
}
};
template <class ProblemShape>
CUTLASS_DEVICE auto
get_callbacks(
gemm::GemmCoord threadblock_tile_offset,
int thread_idx,
ProblemShape problem_shape
) {
Tensor mRow = make_tensor(
make_gmem_ptr(params_ptr->ptr_row),
problem_shape,
params_ptr->dRow);
// VECTOR, FRAGMENT_COLUMN
Tensor tC_gRow = recast<VecType>(
ThreadMap::partition(mRow, thread_idx, threadblock_tile_offset)
)(_,_,_0{},_0{},_0{},_0{});
Tensor tC_rRow = make_tensor_like(tC_gRow);
// Generate the pred tensor
Tensor cRow = make_identity_tensor(mRow.shape());
Tensor tC_cRow = outer_partition(
ThreadMap::partition(cRow, thread_idx, threadblock_tile_offset)(_,_,_0{},_0{},_0{},_0{}),
Shape<Int<VecLength>>{},
(_0{})
);
return Callbacks<
decltype(tC_gRow), decltype(tC_rRow),
decltype(tC_cRow), ProblemShape>(
cute::move(tC_gRow),
cute::move(tC_rRow),
cute::move(tC_cRow),
problem_shape,
params_ptr
);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Column vector broadcast
template<
class ThreadMap,
class Element,
class StrideMNL = Stride<_1,_0,_0>
>
struct VisitorColBroadcast {
struct Arguments {
Element const* ptr_col = nullptr;
Element null_default = Element(0);
StrideMNL dCol = {};
};
using Params = Arguments;
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(ProblemShape const& problem_shape, Arguments const& args, void* workspace) {
return args;
}
template <class ProblemShape>
static size_t
get_workspace_size(ProblemShape const& problem_shape, Arguments const& args) {
return 0;
}
struct SharedStorage { };
CUTLASS_HOST_DEVICE
VisitorColBroadcast() { }
CUTLASS_HOST_DEVICE
VisitorColBroadcast(Params const& params, SharedStorage const& shared_storage)
: params_ptr(¶ms) { }
Params const* params_ptr;
template <class GTensor, class RTensor, class CTensor, class ProblemShape>
struct Callbacks : EmptyCallbacks {
CUTLASS_DEVICE
Callbacks(
GTensor&& tC_gCol,
RTensor&& tC_rCol,
CTensor&& tC_cCol,
ProblemShape problem_shape,
Params const* params_ptr
):
tC_gCol(cute::forward<GTensor>(tC_gCol)),
tC_rCol(cute::forward<RTensor>(tC_rCol)),
tC_cCol(cute::forward<CTensor>(tC_cCol)),
m(get<0>(problem_shape)),
params_ptr(params_ptr) { }
GTensor tC_gCol;
RTensor tC_rCol;
CTensor tC_cCol;
Params const* params_ptr;
int m;
CUTLASS_DEVICE void
begin_epilogue() {
clear(tC_rCol);
Tensor pred = make_tensor<bool>(shape(tC_gCol));
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < size(pred); ++i) {
pred(i) = get<0>(tC_cCol(i)) < m;
}
copy_if(pred, tC_gCol, tC_rCol);
}
template <class ElementAccumulator, int FragmentSize>
CUTLASS_DEVICE auto // returns an Array
visit(int iter_idx, int row_idx, int column_idx, int frg_idx,
Array<ElementAccumulator, FragmentSize> const& frg_acc) {
Array<Element, FragmentSize> frg_col;
frg_col.fill(tC_rCol(row_idx,iter_idx));
return frg_col;
}
};
template <class ProblemShape>
CUTLASS_DEVICE auto
get_callbacks(
gemm::GemmCoord threadblock_tile_offset,
int thread_idx,
ProblemShape problem_shape
) {
Tensor mCol = make_tensor(
make_gmem_ptr(params_ptr->ptr_col),
problem_shape,
params_ptr->dCol);
// VECTOR, FRAGMENT_COLUMN, FRAGMENT_ROW, ITERATION_ROW, ITERATION_GROUP, ITERATION_CLUSTER
Tensor tC_gCol = group_modes<1,4>(
ThreadMap::partition(mCol, thread_idx, threadblock_tile_offset)(_0{},_0{},_,_,_,_));
Tensor tC_rCol = make_tensor_like(tC_gCol);
// Generate the pred tensor
Tensor cCol = make_identity_tensor(mCol.shape());
Tensor tC_cCol = group_modes<1,4>(
ThreadMap::partition(cCol, thread_idx, threadblock_tile_offset)(_0{},_0{},_,_,_,_));
return Callbacks<
decltype(tC_gCol), decltype(tC_rCol),
decltype(tC_cCol), ProblemShape>(
cute::move(tC_gCol),
cute::move(tC_rCol),
cute::move(tC_cCol),
problem_shape,
params_ptr
);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::epilogue::threadblock
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/fusion/visitor_load.hpp/0 | {
"file_path": "include/cutlass/epilogue/threadblock/fusion/visitor_load.hpp",
"repo_id": "include",
"token_count": 6834
} | 27 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops optimized for mixed-precision.
This assumes the shared memory tile is in a permuted layout which avoids bank conflicts on loading.
When the fragment is loaded into registers, it matches the row-major thread map assumed by
the predicated tile iterator writing to global memory.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/epilogue/threadblock/output_tile_thread_map.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tile iterator used to load output tile from shared memory in epilogue.
///
/// Satisfies: ReadableTileIterator
///
template <
typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap)
typename Element_, ///< Accumulator data type
int ElementSizeBits_, ///< Size of accumulator in bits
int OutputSizeBits_, ///< Size of output element in bits
int ElementsPerAccess, ///< Vector length of output vector
int ContiguousLanes, ///< Number of lanes in the warp writing to contiguous elements
/// in the global memory tensor
bool EightBitsOutputOrLess = (OutputSizeBits_ <= 8)
>
class SharedLoadIteratorMixed;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tile iterator used to load output tile from shared memory in epilogue.
///
/// Satisfies: ReadableTileIterator
///
template <
typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap)
typename Element_ ///< Accumulator data type
>
class SharedLoadIteratorMixed<ThreadMap_, Element_, 32, 16, 8, 8, false> {
public:
using ThreadMap = ThreadMap_;
using Shape = typename ThreadMap::Shape;
using Element = Element_;
using Layout = layout::RowMajor;
using TensorRef = TensorRef<Element, Layout>;
using ConstTensorRef = typename TensorRef::ConstTensorRef;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorCoord = MatrixCoord;
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
static int const kAlignment = ThreadMap::kElementsPerAccess * sizeof_bits<Element_>::value / 8;
static int const kThreads = ThreadMap::kThreads;
/// Fragment object
using Fragment = Array<
Element,
ThreadMap::Iterations::kColumn *
ThreadMap::Iterations::kRow *
ThreadMap::Iterations::kGroup *
ThreadMap::Iterations::kCluster *
ThreadMap::kElementsPerAccess>;
/// Memory access size
using AccessType = AlignedArray<
Element,
ThreadMap::kElementsPerAccess,
kAlignment>;
/// Vector type used for SMEM loads
using LoadType = AlignedArray<
Element,
const_min(128 / sizeof_bits<Element>::value, ThreadMap::kElementsPerAccess),
const_min(16, kAlignment)
>;
static int const kLoadsPerAccess = AccessType::kElements / LoadType::kElements;
private:
//
// Data members
//
/// Byte-level pointer
LoadType const *pointers_[kLoadsPerAccess];
/// Stride along adjacent rows in units of LoadType
int stride_;
public:
//
// Methods
//
/// Constructor
CUTLASS_DEVICE
SharedLoadIteratorMixed(
TensorRef ref,
int thread_idx
):
stride_((ref.stride(0) / LoadType::kElements)) {
TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx);
// Initialize pointers
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kLoadsPerAccess; ++i) {
pointers_[i] = reinterpret_cast<LoadType const *>(ref.data());
int col_idx = (thread_offset.column() / kElementsPerAccess) * kLoadsPerAccess;
int bank_offset = (col_idx * int(sizeof(LoadType)) / 128) % kLoadsPerAccess;
col_idx += (bank_offset + i) % kLoadsPerAccess;
pointers_[i] += thread_offset.row() * stride_ + col_idx;
}
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kLoadsPerAccess; ++i) {
pointers_[i] += pointer_offset / LoadType::kElements;
}
}
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &offset) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kLoadsPerAccess; ++i) {
pointers_[i] +=
offset.row() * Shape::kRow * stride_ +
offset.column() * Shape::kColumn / LoadType::kElements;
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const {
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int row_ptr_offset =
row * ThreadMap::Delta::kRow * stride_ +
group * ThreadMap::Delta::kGroup* stride_ +
cluster * ThreadMap::Delta::kCluster * stride_ +
pointer_offset / LoadType::kElements;
int frag_row_idx = (row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
LoadType *frag_ptr = reinterpret_cast<LoadType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
int frag_idx = frag_row_idx * ThreadMap::Iterations::kColumn + column;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kLoadsPerAccess; ++v) {
int vector_idx = (column * ThreadMap::Delta::kColumn / kElementsPerAccess * kLoadsPerAccess);
LoadType const *memory_pointer = pointers_[v] + row_ptr_offset;
frag_ptr[frag_idx * kLoadsPerAccess + v] = memory_pointer[vector_idx];
}
}
}
}
}
}
/// Set base smem address
CUTLASS_DEVICE
void set_smem_base_address(Index address) {}
/// Loads a fragment
CUTLASS_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for
/// int32_t x 16 => int8_t/int4b_t x 16 and
/// float x 16 => float_e4m3_t/float_e5m2_t x 16
template <
typename ThreadMap_, ///< Thread map (concept: OutputTileThreadMap)
typename Element_,
int OutputSizeBits_ ///< Size of output element in bits
>
class SharedLoadIteratorMixed<ThreadMap_, Element_, 32, OutputSizeBits_, 16, 8, true> {
public:
using ThreadMap = ThreadMap_;
using Shape = typename ThreadMap::Shape;
using Element = Element_;
static_assert(sizeof_bits<Element>::value == 32, "Element size in bits must be 32.");
using Layout = layout::RowMajor;
using TensorRef = TensorRef<Element, Layout>;
using ConstTensorRef = typename TensorRef::ConstTensorRef;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorCoord = MatrixCoord;
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
static int const kAlignment = 16;
static int const kThreads = ThreadMap::kThreads;
/// Fragment object
using Fragment = Array<
Element,
ThreadMap::Iterations::kColumn *
ThreadMap::Iterations::kRow *
ThreadMap::Iterations::kGroup *
ThreadMap::Iterations::kCluster *
ThreadMap::kElementsPerAccess>;
/// Memory access size
using AccessType = AlignedArray<
Element,
16,
kAlignment>;
/// Vector type used for SMEM loads
using LoadType = AlignedArray<
Element,
4,
16
>;
static int const kLoadsPerAccess = 4;
private:
//
// Data members
//
/// Byte-level pointer
LoadType const *pointers_[kLoadsPerAccess];
/// Stride along adjacent rows in units of LoadType
int stride_;
public:
//
// Methods
//
/// Constructor
CUTLASS_DEVICE
SharedLoadIteratorMixed(
TensorRef ref,
int thread_idx
):
stride_((ref.stride(0) / LoadType::kElements)) {
TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx);
// Initialize pointers
LoadType const *base_ptr = reinterpret_cast<LoadType const *>(ref.data()) + thread_offset.row() * stride_;
int lane_col_idx = thread_offset.column() / 16;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kLoadsPerAccess; ++i) {
int lane_offset = (lane_col_idx % 2) * 4 | ((lane_col_idx / 2) * 8) | ((lane_col_idx / 2) ^ i);
pointers_[i] = base_ptr + lane_offset;
}
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kLoadsPerAccess; ++i) {
pointers_[i] += pointer_offset / LoadType::kElements;
}
}
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &offset) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kLoadsPerAccess; ++i) {
pointers_[i] +=
offset.row() * Shape::kRow * stride_ +
offset.column() * Shape::kColumn / LoadType::kElements;
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int row_ptr_offset =
row * ThreadMap::Delta::kRow * stride_ +
group * ThreadMap::Delta::kGroup* stride_ +
cluster * ThreadMap::Delta::kCluster * stride_ +
pointer_offset / LoadType::kElements;
int frag_row_idx = (row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
LoadType *frag_ptr = reinterpret_cast<LoadType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
int frag_idx = frag_row_idx * ThreadMap::Iterations::kColumn + column;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kLoadsPerAccess; ++v) {
LoadType const *memory_pointer = pointers_[v];
frag_ptr[frag_idx * kLoadsPerAccess + v] = memory_pointer[row_ptr_offset];
}
}
}
}
}
}
/// Set base smem address
CUTLASS_DEVICE
void set_smem_base_address(Index address) {}
/// Loads a fragment
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for:
/// int32_t x 8 => int8_t/int4b_t x 8 and
/// float x 8 => float_e4m3_t/float_e5m2_t x 8
template <
typename ThreadMap_, ///< Thread map (concept: OutputTileThreadMap)
typename Element_,
int OutputSizeBits_
>
class SharedLoadIteratorMixed<ThreadMap_, Element_, 32, OutputSizeBits_, 8, 8, true> {
public:
using ThreadMap = ThreadMap_;
using Shape = typename ThreadMap::Shape;
using Element = Element_;
static_assert(sizeof_bits<Element>::value == 32, "Element size in bits must be 32.");
using Layout = layout::RowMajor;
using TensorRef = TensorRef<Element, Layout>;
using ConstTensorRef = typename TensorRef::ConstTensorRef;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorCoord = MatrixCoord;
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
static int const kAlignment = 8;
static int const kThreads = ThreadMap::kThreads;
/// Fragment object
using Fragment = Array<
Element,
ThreadMap::Iterations::kColumn *
ThreadMap::Iterations::kRow *
ThreadMap::Iterations::kGroup *
ThreadMap::Iterations::kCluster *
ThreadMap::kElementsPerAccess>;
/// Memory access size
using AccessType = AlignedArray<
Element,
8,
kAlignment>;
/// Vector type used for SMEM loads
using LoadType = AlignedArray<
Element,
4,
16
>;
static int const kLoadsPerAccess = 2;
private:
//
// Data members
//
/// Byte-level pointer
LoadType const *pointers_[kLoadsPerAccess];
/// Stride along adjacent rows in units of LoadType
int stride_;
public:
//
// Methods
//
/// Constructor
CUTLASS_DEVICE
SharedLoadIteratorMixed(
TensorRef ref,
int thread_idx
):
stride_((ref.stride(0) / LoadType::kElements)) {
TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx);
// Initialize pointers
LoadType const *base_ptr = reinterpret_cast<LoadType const *>(ref.data()) + thread_offset.row() * stride_;
int lane_col_idx = thread_offset.column() / 8;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kLoadsPerAccess; ++i) {
int lane_offset = (lane_col_idx % 8) * 2 | ((lane_col_idx / 4) ^ i);
pointers_[i] = base_ptr + lane_offset;
}
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kLoadsPerAccess; ++i) {
pointers_[i] += pointer_offset / LoadType::kElements;
}
}
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &offset) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kLoadsPerAccess; ++i) {
pointers_[i] +=
offset.row() * Shape::kRow * stride_ +
offset.column() * Shape::kColumn / LoadType::kElements;
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int row_ptr_offset =
row * ThreadMap::Delta::kRow * stride_ +
group * ThreadMap::Delta::kGroup* stride_ +
cluster * ThreadMap::Delta::kCluster * stride_ +
pointer_offset / LoadType::kElements;
int frag_row_idx = (row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
LoadType *frag_ptr = reinterpret_cast<LoadType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
int frag_idx = frag_row_idx * ThreadMap::Iterations::kColumn + column;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kLoadsPerAccess; ++v) {
LoadType const *memory_pointer = pointers_[v];
frag_ptr[frag_idx * kLoadsPerAccess + v] = memory_pointer[row_ptr_offset];
}
}
}
}
}
}
/// Set base smem address
CUTLASS_DEVICE
void set_smem_base_address(Index address) {}
/// Loads a fragment
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/shared_load_iterator_mixed.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/shared_load_iterator_mixed.h",
"repo_id": "include",
"token_count": 6682
} | 28 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/detail/dependent_false.hpp"
#include "cutlass/gemm/dispatch_policy.hpp"
#include "cutlass/numeric_types.h"
#include "cutlass/detail/layout.hpp"
#include "cutlass/pipeline/pipeline.hpp"
#include "cutlass/transform/collective/sm90_wgmma_transpose.hpp"
#include "cutlass/trace.h"
#include "cutlass/detail/collective.hpp"
#include "cute/arch/cluster_sm90.hpp"
#include "cute/arch/copy_sm90.hpp"
#include "cute/algorithm/functional.hpp"
#include "cute/atom/mma_atom.hpp"
#include "cute/atom/copy_traits_sm90_tma.hpp"
#include "cute/algorithm/gemm.hpp"
#include "cute/tensor_predicate.hpp"
#include "cute/numeric/arithmetic_tuple.hpp"
#include "cutlass/pipeline/pipeline.hpp"
#include "cutlass/trace.h"
#include "cutlass/detail/collective.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass::gemm::collective {
using namespace cute;
/////////////////////////////////////////////////////////////////////////////////////////////////
// WarpSpecialized Mainloop that source A operand from registers
template <
int Stages,
class ClusterShape,
class KernelSchedule,
class TileShape_,
class ElementAOptionalTuple,
class StrideA_,
class ElementBOptionalTuple,
class StrideB_,
class TiledMma_,
class GmemTiledCopyA_,
class SmemLayoutAtomA_,
class SmemCopyAtomA_,
class TransformA_,
class GmemTiledCopyB_,
class SmemLayoutAtomB_,
class SmemCopyAtomB_,
class TransformB_>
struct CollectiveMma<
MainloopSm90TmaGmmaRmemAWarpSpecializedMixedInput<Stages, ClusterShape, KernelSchedule>,
TileShape_,
ElementAOptionalTuple,
StrideA_,
ElementBOptionalTuple,
StrideB_,
TiledMma_,
GmemTiledCopyA_,
SmemLayoutAtomA_,
SmemCopyAtomA_,
TransformA_,
GmemTiledCopyB_,
SmemLayoutAtomB_,
SmemCopyAtomB_,
TransformB_>
{
private:
template <class PointerType>
static constexpr auto
get_logical_ptr(PointerType const* ptr) {
if constexpr (cute::sizeof_bits_v<PointerType> < 8) {
return subbyte_iterator<PointerType const>(ptr);
}
else {
return ptr;
}
}
enum class ConversionMode {
DirectConvert,
ConvertAndScale,
ConvertAndScaleWithZero
};
using ScaleA = detail::deduce_mixed_width_dtype_t<1, ElementAOptionalTuple>;
using ScaleB = detail::deduce_mixed_width_dtype_t<1, ElementBOptionalTuple>;
using ZeroA = detail::deduce_mixed_width_dtype_t<2, ElementAOptionalTuple>;
using ZeroB = detail::deduce_mixed_width_dtype_t<2, ElementBOptionalTuple>;
public:
//
// Type Aliases
//
using DispatchPolicy = MainloopSm90TmaGmmaRmemAWarpSpecializedMixedInput<Stages, ClusterShape, KernelSchedule>;
using TileShape = TileShape_;
static_assert(cute::is_tuple<ElementAOptionalTuple>::value ^ cute::is_tuple<ElementBOptionalTuple>::value,
"Either A OR B must be a tuple. It must take the from {ElementOperand, [ElementScale], [ElementZero]}. Inputs in [] are optional.");
using ElementA = detail::deduce_mixed_width_dtype_t<0, ElementAOptionalTuple>;
using ElementB = detail::deduce_mixed_width_dtype_t<0, ElementBOptionalTuple>;
static constexpr bool IsATransformed = cute::is_tuple<ElementAOptionalTuple>::value;
using ElementScale = cute::conditional_t<IsATransformed, ScaleA, ScaleB>;
using ElementZero = cute::conditional_t<IsATransformed, ZeroA, ZeroB>;
// For cases where we can't have a void type, we can use this to allow the code to compile when the scale / zero is void.
using NonVoidElementScale = cute::conditional_t<cute::is_void_v<ElementScale>, float, ElementScale>;
using NonVoidElementZero = cute::conditional_t<cute::is_void_v<ElementZero>, float, ElementZero>;
using StrideA = StrideA_;
using StrideB = StrideB_;
// These are always MN major
using StrideScale = cute::Stride<cute::Int<1>, int64_t, int64_t>;
// For cases where we can't have a void scale, we can use this to allow the code to compile when the scale is void.
using NonVoidStrideScale = cute::conditional_t<cute::is_void_v<StrideScale>, cute::Stride<_1, int64_t, int64_t>, StrideScale>;
static_assert((IsATransformed && cutlass::gemm::detail::is_k_major<StrideA>()) ||
(!IsATransformed && cutlass::gemm::detail::is_k_major<StrideB>()),
"The transformed type must be K-major.");
static_assert(( IsATransformed && (sizeof(ElementB) == 2)) ||
(!IsATransformed && (sizeof(ElementA) == 2)) ||
(cutlass::gemm::detail::is_k_major<StrideA>() &&
cutlass::gemm::detail::is_k_major<StrideB>()),
"The unscaled element must be 2 bytes OR both inputs must be K-major");
static_assert(cutlass::gemm::detail::is_mn_major<NonVoidStrideScale>(),
"Scale must be MN major [Col Major if A is scaled, Row Major if B is scaled].");
using CtaShape_MNK = decltype(shape_div(TileShape{}, ClusterShape{}));
using TiledMma = TiledMma_;
using ElementAccumulator = typename TiledMma::ValTypeC;
using GmemTiledCopyA = GmemTiledCopyA_;
using GmemTiledCopyB = GmemTiledCopyB_;
using GmemTiledCopyScale = cute::SM90_TMA_LOAD;
using SmemLayoutAtomA = SmemLayoutAtomA_;
using SmemLayoutAtomB = SmemLayoutAtomB_;
// Scale layout atom set after swapping.
using SmemCopyAtomA = SmemCopyAtomA_;
using SmemCopyAtomB = SmemCopyAtomB_;
using SmemCopyAtomScale = Copy_Atom<cute::DefaultCopy, NonVoidElementScale>;
// We must ensure the type to be scaled goes to RF
static constexpr bool SwapAB = !IsATransformed;
using InternalSmemLayoutAtomA = cute::conditional_t<!SwapAB, SmemLayoutAtomA, SmemLayoutAtomB>;
using InternalSmemLayoutAtomB = cute::conditional_t<!SwapAB, SmemLayoutAtomB, SmemLayoutAtomA>;
using InternalSmemCopyAtomA = cute::conditional_t<!SwapAB, SmemCopyAtomA, SmemCopyAtomB>;
using InternalSmemCopyAtomB = cute::conditional_t<!SwapAB, SmemCopyAtomB, SmemCopyAtomA>;
// TMA converts f32 input to tf32 when copying from GMEM to SMEM
// For all other types, cast to size equivalent uint type to avoid any rounding by TMA.
static constexpr bool ConvertF32toTF32A = cute::is_same_v<float, ElementA>;
static constexpr bool ConvertF32toTF32B = cute::is_same_v<float, ElementB>;
using ConvertedElementA = cute::conditional_t<ConvertF32toTF32A, tfloat32_t, uint_bit_t<sizeof_bits_v<ElementA>>>;
using ConvertedElementB = cute::conditional_t<ConvertF32toTF32B, tfloat32_t, uint_bit_t<sizeof_bits_v<ElementB>>>;
using RealInternalElementA = cute::conditional_t<!SwapAB, ElementA, ElementB>;
using RealInternalElementB = cute::conditional_t<!SwapAB, ElementB, ElementA>;
using InternalElementA = cute::conditional_t<!SwapAB, ConvertedElementA, ConvertedElementB>;
using InternalElementB = cute::conditional_t<!SwapAB, ConvertedElementB, ConvertedElementA>;
using InternalStrideA = cute::conditional_t<!SwapAB, StrideA, StrideB>;
using InternalStrideB = cute::conditional_t<!SwapAB, StrideB, StrideA>;
using TransformA = TransformA_;
using TransformB = TransformB_;
using InternalTransformA = cute::conditional_t<!SwapAB, TransformA, TransformB>;
using InternalTransformB = cute::conditional_t<!SwapAB, TransformB, TransformA>;
static constexpr int IsSubbyteA = cute::sizeof_bits_v<InternalElementA> < 8;
using TmaElementA = cute::conditional_t<IsSubbyteA, uint8_t, InternalElementA>;
using ArchTag = typename DispatchPolicy::ArchTag;
using MainloopPipeline = cutlass::PipelineTmaAsync<
DispatchPolicy::Stages>;
using PipelineState = cutlass::PipelineState<DispatchPolicy::Stages>;
using PipelineParams = typename MainloopPipeline::Params;
using SmemLayoutAtomScale = Layout<Shape<decltype(cute::shape<0>(InternalSmemLayoutAtomA{})), cute::Int<1>>>;
using ScaleTileShape = decltype(make_shape(shape<0>(TileShape{}), shape<1>(SmemLayoutAtomScale{})));
static_assert(cute::rank(InternalSmemLayoutAtomA{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)");
static_assert((size<0>(TileShape{}) % size<0>(InternalSmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert((size<2>(TileShape{}) % size<1>(InternalSmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert(cute::rank(InternalSmemLayoutAtomB{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)");
static_assert((size<1>(TileShape{}) % size<0>(InternalSmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert((size<2>(TileShape{}) % size<1>(InternalSmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert(rank(SmemLayoutAtomScale{}) == 2, "SmemLayoutAtomScale must be rank 2");
static_assert((size<0>(TileShape{}) % size<0>(SmemLayoutAtomScale{})) == 0, "SmemLayoutAtomScale must equal the tile shape.");
static_assert((size<2>(TileShape{}) % size<1>(SmemLayoutAtomScale{})) == 0, "SmemLayoutAtomScale must evenly divide tile k shape.");
// Tile along modes in a way that maximizes the TMA box size.
using SmemLayoutA = decltype(tile_to_shape(
InternalSmemLayoutAtomA{},
make_shape(shape<0>(TileShape{}), shape<2>(TileShape{}), Int<DispatchPolicy::Stages>{}),
cute::conditional_t< ::cutlass::gemm::detail::is_major<0,InternalStrideA>(), Step<_2,_1,_3>, Step<_1,_2,_3>>{}));
using SmemLayoutB = decltype(tile_to_shape(
InternalSmemLayoutAtomB{},
make_shape(shape<1>(TileShape{}), shape<2>(TileShape{}), Int<DispatchPolicy::Stages>{}),
cute::conditional_t< ::cutlass::gemm::detail::is_major<0,InternalStrideB>(), Step<_2,_1,_3>, Step<_1,_2,_3>>{}));
// It is assumed that the scales and zero-points share the same smem layout
using SmemLayoutScale = decltype(tile_to_shape(
SmemLayoutAtomScale{},
make_shape(shape<0>(ScaleTileShape{}), shape<1>(ScaleTileShape{}), Int<Stages>{}),
cute::conditional_t< ::cutlass::gemm::detail::is_major<0,NonVoidStrideScale>(), Step<_2,_1,_3>, Step<_1,_2,_3>>{}));
static_assert(DispatchPolicy::Stages >= 2, "Specialization requires Stages set to value 2 or more.");
static_assert(not cute::is_base_of<cute::GMMA::DescriptorIterator, typename TiledMma::FrgTypeA>::value &&
cute::is_base_of<cute::GMMA::DescriptorIterator, typename TiledMma::FrgTypeB>::value,
"MMA atom must source A from rmem and B operand from smem_desc for this mainloop.");
static_assert(cute::is_same_v<GmemTiledCopyA, SM90_TMA_LOAD> || cute::is_same_v<GmemTiledCopyA, SM90_TMA_LOAD_MULTICAST>,
"GmemTiledCopy - invalid SM90 TMA copy atom specified.");
static_assert(cute::is_same_v<GmemTiledCopyB, SM90_TMA_LOAD> || cute::is_same_v<GmemTiledCopyB, SM90_TMA_LOAD_MULTICAST>,
"GmemTiledCopy - invalid SM90 TMA copy atom specified.");
// To relax them, we need to handle loading more than 1 row of scales for every main loop iteration.
// We must also handle updating the pipeline transaction bytes on the fly.
// NOTE: Deleting this assertion without required changes will cause the code to hang.
static_assert(size<1>(SmemLayoutAtomScale{}) == 1, "size<1>(SmemLayoutAtomScale) must be 1.");
private:
static constexpr ConversionMode
get_conversion_mode() {
if constexpr (cute::is_void_v<ElementScale>) {
return ConversionMode::DirectConvert;
}
else if constexpr (cute::is_void_v<ElementZero>) {
return ConversionMode::ConvertAndScale;
}
else {
return ConversionMode::ConvertAndScaleWithZero;
}
}
static constexpr ConversionMode KernelConversionMode = get_conversion_mode();
static constexpr bool ModeHasScales = KernelConversionMode == ConversionMode::ConvertAndScale ||
KernelConversionMode == ConversionMode::ConvertAndScaleWithZero;
static constexpr auto
elements_per_smem_scale() {
if constexpr (KernelConversionMode == ConversionMode::DirectConvert) {
return 0;
}
else if constexpr (ModeHasScales) {
return cute::cosize_v<SmemLayoutScale>;
}
else {
static_assert(cutlass::detail::dependent_false<KernelSchedule>, "Type not handled in scale smem allocation.");
}
}
static constexpr auto
elements_per_smem_zero() {
if constexpr (KernelConversionMode == ConversionMode::DirectConvert ||
KernelConversionMode == ConversionMode::ConvertAndScale ) {
return 0;
}
else if constexpr (KernelConversionMode == ConversionMode::ConvertAndScaleWithZero) {
return cute::cosize_v<SmemLayoutScale>;
}
else {
static_assert(cutlass::detail::dependent_false<KernelSchedule>, "Type not handled in scale smem allocation.");
}
}
// These methods use some the public members of the class. For that reason, we define them after the public section.
static constexpr uint32_t
compute_tma_transaction_bytes() {
constexpr uint32_t a_bytes = cutlass::bits_to_bytes(size<0>(SmemLayoutA{}) * size<1>(SmemLayoutA{}) * static_cast<uint32_t>(cute::sizeof_bits_v<InternalElementA>));
constexpr uint32_t b_bytes = cutlass::bits_to_bytes(size<0>(SmemLayoutB{}) * size<1>(SmemLayoutB{}) * static_cast<uint32_t>(cute::sizeof_bits_v<InternalElementB>));
constexpr uint32_t baseline_bytes = a_bytes + b_bytes;
if constexpr (KernelConversionMode == ConversionMode::DirectConvert) {
return baseline_bytes;
}
else if constexpr (ModeHasScales) {
constexpr uint32_t scale_tx_bytes = cutlass::bits_to_bytes(size<0>(SmemLayoutScale{}) * size<1>(SmemLayoutScale{}) * static_cast<uint32_t>(cute::sizeof_bits_v<ElementScale>));
static_assert(scale_tx_bytes % 128 == 0, "Each scale stage must be 128B aligned."); // required by TMA
if constexpr (KernelConversionMode == ConversionMode::ConvertAndScale) {
return baseline_bytes + scale_tx_bytes;
}
else if constexpr (KernelConversionMode == ConversionMode::ConvertAndScaleWithZero) {
// Scale and zero share smem layout
constexpr uint32_t zero_tx_bytes = cutlass::bits_to_bytes(size<0>(SmemLayoutScale{}) * size<1>(SmemLayoutScale{}) * static_cast<uint32_t>(cute::sizeof_bits_v<ElementZero>));
static_assert(zero_tx_bytes % 128 == 0, "Each zero stage must be 128B aligned."); // required by TMA
return baseline_bytes + scale_tx_bytes + zero_tx_bytes;
}
else {
static_assert(cutlass::detail::dependent_false<KernelSchedule>, "Type not handled in tma transaction bytes computation.");
}
}
else {
static_assert(cutlass::detail::dependent_false<KernelSchedule>, "Type not handled in tma transaction bytes computation.");
}
}
public:
static constexpr size_t SmemAlignmentA = cutlass::detail::alignment_for_swizzle(SmemLayoutA{});
static constexpr size_t SmemAlignmentB = cutlass::detail::alignment_for_swizzle(SmemLayoutB{});
// Just pick the max alignment of A and B since it is required to be at least 128B
static constexpr size_t SmemAlignmentScale = cute::max(SmemAlignmentA, SmemAlignmentB);
static_assert(SmemAlignmentA >= 128 and SmemAlignmentB >= 128, "Require at least 128B alignment");
struct SharedStorage
{
static constexpr int scale_elements = elements_per_smem_scale();
static constexpr int zero_elements = elements_per_smem_zero();
struct TensorStorage : cute::aligned_struct<cute::max(SmemAlignmentA, SmemAlignmentB)> {
cute::ArrayEngine<RealInternalElementA, cute::cosize_v<SmemLayoutA>> smem_A;
cute::ArrayEngine<typename TiledMma::ValTypeB, cute::cosize_v<SmemLayoutB>> smem_B;
cute::ArrayEngine<NonVoidElementScale, scale_elements> smem_scale;
cute::ArrayEngine<NonVoidElementZero, zero_elements> smem_zero;
} tensors;
using PipelineStorage = typename MainloopPipeline::SharedStorage;
PipelineStorage pipeline;
};
using TensorStorage = typename SharedStorage::TensorStorage;
using PipelineStorage = typename SharedStorage::PipelineStorage;
// Host side kernel arguments
struct Arguments {
ElementA const* ptr_A = nullptr;
StrideA dA{};
ElementB const* ptr_B = nullptr;
StrideB dB{};
ElementScale const* ptr_S = nullptr;
NonVoidStrideScale dS{};
int group_size = 0;
ElementZero const* ptr_Z = nullptr;
uint32_t mma_promotion_interval = 4;
};
// Device side kernel params
struct Params {
private:
using Outer = CollectiveMma<DispatchPolicy, TileShape_,
ElementAOptionalTuple, StrideA_,
ElementBOptionalTuple, StrideB_,
TiledMma_,
GmemTiledCopyA_, SmemLayoutAtomA_, SmemCopyAtomA_,
TransformA_,
GmemTiledCopyB_, SmemLayoutAtomB_, SmemCopyAtomB_,
TransformB_>;
public:
// Assumption: StrideA is congruent with Problem_MK
using TMA_A = decltype(make_tma_copy<TmaElementA>(
GmemTiledCopyA{},
make_tensor(Outer::get_logical_ptr(static_cast<InternalElementA const*>(nullptr)), repeat_like(InternalStrideA{}, int32_t(0)), InternalStrideA{}),
SmemLayoutA{}(_,_,cute::Int<0>{}),
make_shape(shape<0>(TileShape{}), shape<2>(TileShape{})),
size<1>(ClusterShape{}))); // mcast along N mode for this M load, if any
using TMA_Scale = decltype(make_tma_copy(
GmemTiledCopyScale{},
make_tensor(Outer::get_logical_ptr(static_cast<NonVoidElementScale const*>(nullptr)), repeat_like(NonVoidStrideScale{}, int32_t(0)), NonVoidStrideScale{}),
SmemLayoutScale{}(_,_,cute::Int<0>{}),
ScaleTileShape{},
_1{})); // mcast along N mode for this M load, if any. Scale is ALWAYS loaded with A for RF kernel
using TMA_Zero = decltype(make_tma_copy(
GmemTiledCopyScale{},
make_tensor(Outer::get_logical_ptr(static_cast<NonVoidElementZero const*>(nullptr)), repeat_like(NonVoidStrideScale{}, int32_t(0)), NonVoidStrideScale{}),
SmemLayoutScale{}(_,_,cute::Int<0>{}),
ScaleTileShape{},
_1{})); // mcast along N mode for this M load, if any. Scale is ALWAYS loaded with A for RF kernel
// Assumption: StrideB is congruent with Problem_NK
using TMA_B = decltype(make_tma_copy(
GmemTiledCopyB{},
make_tensor(Outer::get_logical_ptr(static_cast<InternalElementB const*>(nullptr)), repeat_like(InternalStrideB{}, int32_t(0)), InternalStrideB{}),
SmemLayoutB{}(_,_,cute::Int<0>{}),
make_shape(shape<1>(TileShape{}), shape<2>(TileShape{})),
size<0>(ClusterShape{}))); // mcast along M mode for this N load, if any
TMA_A tma_load_a;
TMA_B tma_load_b;
TMA_Scale tma_load_scale;
TMA_Zero tma_load_zero;
int64_t scale_k;
int group_size;
};
//
// Methods
//
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(ProblemShape const& problem_shape, Arguments const& args, void* workspace) {
(void) workspace;
// Optionally append 1s until problem shape is rank-4 (MNKL), in case it is only rank-3 (MNK)
auto problem_shape_MNKL = append<4>(problem_shape, 1);
auto [M,N,K,L] = problem_shape_MNKL;
if constexpr (SwapAB) {
M = get<1>(problem_shape_MNKL);
N = get<0>(problem_shape_MNKL);
}
InternalElementA const* ptr_A;
InternalStrideA dA;
InternalElementB const* ptr_B;
InternalStrideB dB;
if constexpr (not SwapAB) {
ptr_A = reinterpret_cast<InternalElementA const*>(args.ptr_A);
ptr_B = reinterpret_cast<InternalElementB const*>(args.ptr_B);
dA = args.dA;
dB = args.dB;
}
else {
ptr_A = reinterpret_cast<InternalElementA const*>(args.ptr_B);
ptr_B = reinterpret_cast<InternalElementB const*>(args.ptr_A);
dA = args.dB;
dB = args.dA;
}
Tensor tensor_a = make_tensor(get_logical_ptr(ptr_A), make_layout(make_shape(M,K,L), dA));
Tensor tensor_b = make_tensor(get_logical_ptr(ptr_B), make_layout(make_shape(N,K,L), dB));
typename Params::TMA_A tma_load_a = make_tma_copy<TmaElementA>(
GmemTiledCopyA{},
tensor_a,
SmemLayoutA{}(_,_,cute::Int<0>{}),
make_shape(shape<0>(TileShape{}), shape<2>(TileShape{})),
size<1>(ClusterShape{})); // mcast along N mode for this M load, if any
typename Params::TMA_B tma_load_b = make_tma_copy(
GmemTiledCopyB{},
tensor_b,
SmemLayoutB{}(_,_,cute::Int<0>{}),
make_shape(shape<1>(TileShape{}), shape<2>(TileShape{})),
size<0>(ClusterShape{})); // mcast along M mode for this N load, if any
typename Params::TMA_Scale tma_load_scale;
typename Params::TMA_Zero tma_load_zero;
if constexpr (KernelConversionMode == ConversionMode::DirectConvert) {
return { tma_load_a, tma_load_b, tma_load_scale, tma_load_zero, 0, 0 };
}
else if constexpr (ModeHasScales) {
auto scale_k = (K + args.group_size - 1) / args.group_size;
ElementScale const* ptr_S = args.ptr_S;
StrideScale dS = args.dS;
Tensor tensor_scale = make_tensor(get_logical_ptr(ptr_S), make_layout(make_shape(M,scale_k,L), dS));
tma_load_scale = make_tma_copy(
GmemTiledCopyScale{},
tensor_scale,
SmemLayoutScale{}(_,_,cute::Int<0>{}),
ScaleTileShape{},
_1{}); // mcast along N mode for this M load, if any
if constexpr(KernelConversionMode == ConversionMode::ConvertAndScale) {
return { tma_load_a, tma_load_b, tma_load_scale, tma_load_zero, scale_k, args.group_size };
}
else if constexpr(KernelConversionMode == ConversionMode::ConvertAndScaleWithZero) {
Tensor tensor_zero = make_tensor(get_logical_ptr(args.ptr_Z), make_layout(make_shape(M,scale_k,L), dS));
tma_load_zero = make_tma_copy(
GmemTiledCopyScale{},
tensor_zero,
SmemLayoutScale{}(_,_,cute::Int<0>{}),
ScaleTileShape{},
_1{}); // mcast along N mode for this M load, if any
return { tma_load_a, tma_load_b, tma_load_scale, tma_load_zero, scale_k, args.group_size };
} else {
static_assert(cutlass::detail::dependent_false<KernelSchedule>, "Conversion mode not handled in to_underlying_arguments.");
}
}
else {
static_assert(cutlass::detail::dependent_false<KernelSchedule>, "Conversion mode not handled in to_underlying_arguments.");
}
}
template<class ProblemShape>
CUTLASS_HOST_DEVICE static bool
can_implement(
ProblemShape const& problem_shape,
[[maybe_unused]] Arguments const& args) {
constexpr int tma_alignment_bits = 128;
auto problem_shape_MNKL = append<4>(problem_shape, 1);
auto [M,N,K,L] = problem_shape_MNKL;
bool implementable = true;
constexpr int min_tma_aligned_elements_A = tma_alignment_bits / cutlass::sizeof_bits<ElementA>::value;
implementable = implementable && cutlass::detail::check_alignment<min_tma_aligned_elements_A>(cute::make_shape(M,K,L), StrideA{});
constexpr int min_tma_aligned_elements_B = tma_alignment_bits / cutlass::sizeof_bits<ElementB>::value;
implementable = implementable && cutlass::detail::check_alignment<min_tma_aligned_elements_B>(cute::make_shape(N,K,L), StrideB{});
if constexpr (KernelConversionMode == ConversionMode::DirectConvert) {
implementable = implementable && (args.ptr_S == nullptr);
implementable = implementable && (args.ptr_Z == nullptr);
}
else if constexpr (ModeHasScales) {
const int scale_mn = SwapAB ? N : M;
const int scale_k = (K + args.group_size - 1) / args.group_size;
constexpr int min_tma_aligned_elements_scale = tma_alignment_bits / cutlass::sizeof_bits<ElementScale>::value;
implementable = implementable && cutlass::detail::check_alignment<min_tma_aligned_elements_scale>(cute::make_shape(scale_mn,scale_k,L), StrideScale{});
implementable = implementable && (args.group_size == K || ((args.group_size % size<2>(TileShape{})) == 0));
implementable = implementable && args.group_size != 0;
implementable = implementable && (args.ptr_S != nullptr);
if constexpr (KernelConversionMode == ConversionMode::ConvertAndScale) {
implementable = implementable && (args.ptr_Z == nullptr);
}
else if constexpr (KernelConversionMode == ConversionMode::ConvertAndScaleWithZero) {
constexpr int min_tma_aligned_elements_zero = tma_alignment_bits / cutlass::sizeof_bits<ElementZero>::value;
implementable = implementable && cutlass::detail::check_alignment<min_tma_aligned_elements_zero>(cute::make_shape(scale_mn,scale_k,L), StrideScale{});
implementable = implementable && (args.ptr_Z != nullptr);
}
else {
static_assert(cutlass::detail::dependent_false<KernelSchedule>, "Conversion mode not handled in can_implement.");
}
}
else {
static_assert(cutlass::detail::dependent_false<KernelSchedule>, "Conversion mode not handled in can_implement.");
}
if (!implementable) {
CUTLASS_TRACE_HOST(" CAN IMPLEMENT: Problem Size doesn't meet the minimum alignment requirements for TMA.\n");
}
return implementable;
}
static constexpr int K_PIPE_MAX = DispatchPolicy::Stages;
static constexpr uint32_t TmaTransactionBytes = compute_tma_transaction_bytes();
/// Issue Tma Descriptor Prefetch -- ideally from a single thread for best performance
CUTLASS_DEVICE
static void prefetch_tma_descriptors(Params const& mainloop_params) {
cute::prefetch_tma_descriptor(mainloop_params.tma_load_a.get_tma_descriptor());
cute::prefetch_tma_descriptor(mainloop_params.tma_load_b.get_tma_descriptor());
if constexpr (KernelConversionMode == ConversionMode::DirectConvert) {
// Nothing extra to do
}
else if constexpr (KernelConversionMode == ConversionMode::ConvertAndScale) {
cute::prefetch_tma_descriptor(mainloop_params.tma_load_scale.get_tma_descriptor());
}
else if constexpr (KernelConversionMode == ConversionMode::ConvertAndScaleWithZero) {
cute::prefetch_tma_descriptor(mainloop_params.tma_load_scale.get_tma_descriptor());
cute::prefetch_tma_descriptor(mainloop_params.tma_load_zero.get_tma_descriptor());
}
else {
static_assert(cutlass::detail::dependent_false<KernelSchedule>, "Conversion mode not handled in TMA prefetch.");
}
}
/// Set up the data needed by this collective for load and mma.
/// Returns a tuple of tensors. The collective and the kernel layer have the contract
/// Returned tuple must contain at least two elements, with the first two elements being:
/// gA_mkl - The tma tensor, A after a local tile so it has shape (BLK_M,BLK_K,m,k,l)
/// gB_nkl - The tma tensor, B after a local tile so it has shape (BLK_N,BLK_K,n,k,l)
/// The rest of the tensors can be specified as needed by this collective.
template <class ProblemShape_MNKL>
CUTLASS_DEVICE auto
load_init(ProblemShape_MNKL const& problem_shape_MNKL, Params const& mainloop_params) const {
using X = Underscore;
// Separate out problem shape for convenience
auto [M,N,K,L] = problem_shape_MNKL;
// TMA requires special handling of strides to deal with coord codomain mapping
// Represent the full tensors -- get these from TMA
Tensor mA_mkl = mainloop_params.tma_load_a.get_tma_tensor(make_shape(M,K,L)); // (m,k,l)
Tensor mB_nkl = mainloop_params.tma_load_b.get_tma_tensor(make_shape(N,K,L)); // (n,k,l)
// Make tiled views, defer the slice
Tensor gA_mkl = local_tile(mA_mkl, TileShape{}, make_coord(_,_,_), Step<_1, X,_1>{}); // (BLK_M,BLK_K,m,k,l)
Tensor gB_nkl = local_tile(mB_nkl, TileShape{}, make_coord(_,_,_), Step< X,_1,_1>{}); // (BLK_N,BLK_K,n,k,l)
if constexpr (KernelConversionMode == ConversionMode::DirectConvert) {
return cute::make_tuple(gA_mkl, gB_nkl);
}
else if constexpr (ModeHasScales) {
auto scale_k = mainloop_params.scale_k;
Tensor mS_mkl = mainloop_params.tma_load_scale.get_tma_tensor(make_shape(M,scale_k,L)); // (m,scale_k,l)
Tensor gS_mkl = local_tile(mS_mkl, ScaleTileShape{}, make_coord(_,_)); // (BLK_M,BLK_Scale_K,m,scale_k,l)
if constexpr (KernelConversionMode == ConversionMode::ConvertAndScale) {
return cute::make_tuple(gA_mkl, gB_nkl, gS_mkl);
}
else if constexpr (KernelConversionMode == ConversionMode::ConvertAndScaleWithZero) {
Tensor mZ_mkl = mainloop_params.tma_load_zero.get_tma_tensor(make_shape(M,scale_k,L)); // (m,scale_k,l)
Tensor gZ_mkl = local_tile(mZ_mkl, ScaleTileShape{}, make_coord(_,_)); // (BLK_M,BLK_Scale_K,m,scale_k,l)
return cute::make_tuple(gA_mkl, gB_nkl, gS_mkl, gZ_mkl);
}
else {
static_assert(cutlass::detail::dependent_false<KernelSchedule>, "Conversion mode not handled in load_init.");
}
}
else {
static_assert(cutlass::detail::dependent_false<KernelSchedule>, "Conversion mode not handled in load_init.");
}
}
/// Perform a collective-scoped matrix multiply-accumulate
/// Producer Perspective
/// This overload gets triggered when we have scales.
template <
class... Ts,
class KTileIterator, class BlockCoord
>
CUTLASS_DEVICE void
load(
Params const& mainloop_params,
MainloopPipeline pipeline,
PipelineState smem_pipe_write,
cute::tuple<Ts...> const& load_inputs,
BlockCoord const& blk_coord,
KTileIterator k_tile_iter, int k_tile_count,
int thread_idx,
uint32_t block_rank_in_cluster,
TensorStorage& shared_tensors) {
if constexpr (KernelConversionMode == ConversionMode::DirectConvert) {
static_assert(sizeof... (Ts) == 2, "Direct convert needs two inputs");
}
else if constexpr (KernelConversionMode == ConversionMode::ConvertAndScale) {
static_assert(sizeof... (Ts) == 3, "Scaled convert needs three inputs");
}
else if constexpr (KernelConversionMode == ConversionMode::ConvertAndScaleWithZero) {
static_assert(sizeof... (Ts) == 4, "Scaled and zero convert needs four inputs");
}
else {
static_assert(cutlass::detail::dependent_false<KernelSchedule>, "Conversion mode not handled in TMA load.");
}
int lane_predicate = cute::elect_one_sync();
if (lane_predicate) {
Tensor sA_ = make_tensor(make_smem_ptr(shared_tensors.smem_A.begin()), SmemLayoutA{}); // (BLK_M,BLK_K,PIPE)
Tensor sB_ = make_tensor(make_smem_ptr(shared_tensors.smem_B.begin()), SmemLayoutB{}); // (BLK_N,BLK_K,PIPE)
Tensor sA = as_position_independent_swizzle_tensor(sA_); // (BLK_M,BLK_K,PIPE)
Tensor sB = as_position_independent_swizzle_tensor(sB_); // (BLK_N,BLK_K,PIPE)
//
// Prepare the TMA loads for A, B and Scales
//
constexpr uint32_t cluster_shape_x = get<0>(ClusterShape());
uint2 cluster_local_block_id = {block_rank_in_cluster % cluster_shape_x, block_rank_in_cluster / cluster_shape_x};
Tensor gA_mkl = get<0>(load_inputs);
Tensor gB_nkl = get<1>(load_inputs);
auto block_tma_a = mainloop_params.tma_load_a.get_slice(cluster_local_block_id.y);
auto block_tma_b = mainloop_params.tma_load_b.get_slice(cluster_local_block_id.x);
// Partition the inputs based on the current block coordinates.
auto [m_coord, n_coord, k_coord, l_coord] = blk_coord;
Tensor gA = gA_mkl(_,_,m_coord,_,l_coord); // (BLK_M,BLK_K,k)
Tensor gB = gB_nkl(_,_,n_coord,_,l_coord); // (BLK_N,BLK_K,k)
// Applies the mapping from block_tma_a
Tensor tAgA = block_tma_a.partition_S(gA); // (TMA,TMA_M,TMA_K,k)
Tensor tAsA = block_tma_a.partition_D(sA); // (TMA,TMA_M,TMA_K,PIPE)
Tensor tBgB = block_tma_b.partition_S(gB); // (TMA,TMA_N,TMA_K,k)
Tensor tBsB = block_tma_b.partition_D(sB); // (TMA,TMA_N,TMA_K,PIPE)
uint16_t mcast_mask_a = 0;
uint16_t mcast_mask_b = 0;
uint16_t mcast_mask_s = 0;
// Issue TmaLoads
// Maps the tile -> block, value
if constexpr (cute::is_same_v<GmemTiledCopyA, SM90_TMA_LOAD_MULTICAST>) {
auto block_layout = Layout<typename DispatchPolicy::ClusterShape>{}; // (m,n) -> block_id
for (int n = 0; n < size<1>(block_layout); ++n) {
mcast_mask_a |= (uint16_t(1) << block_layout(cluster_local_block_id.x,n,Int<0>{}));
}
}
if constexpr (cute::is_same_v<GmemTiledCopyB, SM90_TMA_LOAD_MULTICAST>) {
auto block_layout = Layout<typename DispatchPolicy::ClusterShape>{}; // (m,n) -> block_id
for (int m = 0; m < size<0>(block_layout); ++m) {
mcast_mask_b |= (uint16_t(1) << block_layout(m,cluster_local_block_id.y,Int<0>{}));
}
}
auto extra_input_partitions = partition_extra_tma_inputs(mainloop_params, load_inputs, shared_tensors, cluster_local_block_id, m_coord, l_coord);
// Mainloop
CUTLASS_PRAGMA_NO_UNROLL
for ( ; k_tile_count > 0; --k_tile_count) {
// LOCK smem_pipe_write for _writing_
pipeline.producer_acquire(smem_pipe_write);
//
// Copy gmem to smem for *k_tile_iter
//
using BarrierType = typename MainloopPipeline::ProducerBarrierType;
BarrierType* tma_barrier = pipeline.producer_get_barrier(smem_pipe_write);
int write_stage = smem_pipe_write.index();
copy(mainloop_params.tma_load_a.with(*tma_barrier, mcast_mask_a), tAgA(_,_,_,*k_tile_iter), tAsA(_,_,_,write_stage));
copy(mainloop_params.tma_load_b.with(*tma_barrier, mcast_mask_b), tBgB(_,_,_,*k_tile_iter), tBsB(_,_,_,write_stage));
if constexpr (KernelConversionMode == ConversionMode::DirectConvert) {
// Nothing extra to do.
}
else if constexpr (ModeHasScales) {
auto tSgS = get<0>(extra_input_partitions);
auto tSsS = get<1>(extra_input_partitions);
// Temporary factor which will determine which k tile to reload from gmem. Needed so we don't modify tma transaction bytes
// on the fly.
// We must do a ceiling divide here to correctly handle with group_size == K. In that case, we don't require that K
// is a multiple of the threadblock tile K
const int ReloadFactor = (mainloop_params.group_size + size<2>(TileShape{}) - 1) / size<2>(TileShape{});
const int scale_load_k = *k_tile_iter / ReloadFactor; // This will always be 0 when group_size == K.
copy(mainloop_params.tma_load_scale.with(*tma_barrier, mcast_mask_s), tSgS(_,_,_,scale_load_k), tSsS(_,_,_,write_stage));
if constexpr (KernelConversionMode == ConversionMode::ConvertAndScale) {
// Nothing extra to do
}
else if constexpr (KernelConversionMode == ConversionMode::ConvertAndScaleWithZero) {
auto tZgZ = get<2>(extra_input_partitions);
auto tZsZ = get<3>(extra_input_partitions);
copy(mainloop_params.tma_load_zero.with(*tma_barrier, mcast_mask_s), tZgZ(_,_,_,scale_load_k), tZsZ(_,_,_,write_stage));
}
else {
static_assert(cutlass::detail::dependent_false<KernelSchedule>, "Conversion mode not handled for TMA copy op.");
}
}
else {
static_assert(cutlass::detail::dependent_false<KernelSchedule>, "Conversion mode not handled for TMA copy op.");
}
++k_tile_iter;
// Advance smem_pipe_write
++smem_pipe_write;
}
}
}
/// Perform a Producer Epilogue to prevent early exit of blocks in a Cluster
CUTLASS_DEVICE void
load_tail(MainloopPipeline pipeline, PipelineState smem_pipe_write) {
int lane_predicate = cute::elect_one_sync();
// Issue the epilogue waits
if (lane_predicate) {
/* This helps avoid early exit of blocks in Cluster
* Waits for all stages to either be released (all
* Consumer UNLOCKs), or if the stage was never used
* then would just be acquired since the phase was
* still inverted from make_producer_start_state
*/
pipeline.producer_tail(smem_pipe_write);
}
}
/// Perform a collective-scoped matrix multiply-accumulate
/// Consumer Perspective
template <
class FrgTensorC
>
CUTLASS_DEVICE void
mma(MainloopPipeline pipeline,
PipelineState smem_pipe_read,
FrgTensorC& accum,
int k_tile_count,
int thread_idx,
TensorStorage& shared_tensors,
Params const& mainloop_params) {
static_assert(is_rmem<FrgTensorC>::value, "C tensor must be rmem resident.");
static_assert(cute::rank(SmemLayoutA{}) == 3, "Smem layout must be rank 3.");
static_assert(cute::rank(SmemLayoutB{}) == 3, "Smem layout must be rank 3.");
static_assert(cute::rank(InternalSmemLayoutAtomA{}) == 2, "InternalSmemLayoutAtomA must be rank 2.");
static_assert(cute::rank(InternalSmemLayoutAtomB{}) == 2, "InternalSmemLayoutAtomB must be rank 2.");
static_assert(!cute::is_void_v<InternalSmemCopyAtomA>,
"SM90 GMMA mainloops must specify a non-void copy atom for RF sourced instructions.");
static_assert(cute::is_void_v<InternalSmemCopyAtomB>,
"SM90 GMMA mainloops cannot have a non-void copy atom for smem sourced instructions.");
// Obtain warp index
int warp_idx = canonical_warp_idx_sync();
[[maybe_unused]] int warp_group_thread_idx = thread_idx % 128;
Tensor sA_ = make_tensor(make_smem_ptr(shared_tensors.smem_A.begin()), SmemLayoutA{}); // (BLK_M,BLK_K,PIPE)
Tensor sA = as_position_independent_swizzle_tensor(sA_); // (BLK_M,BLK_K,PIPE)
Tensor sB = make_tensor(make_smem_ptr(shared_tensors.smem_B.begin()), SmemLayoutB{}); // (BLK_N,BLK_K,PIPE)
//
// Define C accumulators and A/B partitioning
//
TiledMma tiled_mma;
auto thread_mma = tiled_mma.get_thread_slice(thread_idx);
Tensor tCsA = thread_mma.partition_A(sA);
// Allocate fragments and descriptors
Tensor tCrA_mma = thread_mma.partition_fragment_A(sA(_,_,Int<0>{})); // (MMA,MMA_M,MMA_K,PIPE)
Tensor tCrA_load = make_fragment_like<RealInternalElementA>(tCrA_mma);
Tensor tCsB = thread_mma.partition_B(sB); // (MMA,MMA_N,MMA_K,PIPE)
Tensor tCrB = thread_mma.make_fragment_B(tCsB); // (MMA,MMA_N,MMA_K,PIPE)
//
// Copy Atom A retiling
//
auto smem_tiled_copy_A = make_tiled_copy_A(InternalSmemCopyAtomA{}, tiled_mma);
auto smem_thr_copy_A = smem_tiled_copy_A.get_thread_slice(warp_group_thread_idx);
Tensor tCrA_copy_view = smem_thr_copy_A.retile_D(tCrA_load); // (CPY,CPY_M,CPY_K)
// Compute the max vector length that can be used to copy A. This will match the vector width of the
// conversions used. It helps by allowing the compiler to convert using the same register that was used
// to load the data from smem. This significantly reduces the need to move data among registers.
// Note that this is correct even if copy fails to vectorize, since the granularity at which we perform
// the conversion does not impact correctness.
using A_CPY_VEC = decltype(max_common_vector(tCsA, tCrA_copy_view));
// Partition of thread -> shared and thread -> RF
auto partitioned_extra_info = partition_extra_mma_info(thread_mma, shared_tensors);
auto copy_partitions_extra_info = retile_extra_mma_info(tiled_mma, partitioned_extra_info, warp_group_thread_idx);
CUTE_STATIC_ASSERT_V(size<1>(tCsA) == size<1>(tCrA_copy_view)); // CPY_M
CUTE_STATIC_ASSERT_V(size<2>(tCsA) == size<2>(tCrA_copy_view)); // CPY_K
CUTE_STATIC_ASSERT_V(size<1>(tCrA_mma) == size<1>(accum)); // MMA_M
CUTE_STATIC_ASSERT_V(size<1>(tCsB) == size<2>(accum)); // N
CUTE_STATIC_ASSERT_V(size<2>(tCsA) == size<2>(tCsB)); // K
CUTE_STATIC_ASSERT_V(size<3>(tCsA) == size<3>(tCsB)); // PIPE
CUTE_STATIC_ASSERT_V(Int<DispatchPolicy::Stages>{} == size<2>(sA)); // PIPE
CUTE_STATIC_ASSERT_V(Int<DispatchPolicy::Stages>{} == size<2>(sB)); // PIPE
//
// PIPELINED MAIN LOOP
//
// We release buffers to producer warps(dma load) with some mmas in flight
PipelineState smem_pipe_release = smem_pipe_read;
tiled_mma.accumulate_ = GMMA::ScaleOut::Zero;
warpgroup_fence_operand(accum);
constexpr int K_BLOCK_MAX = size<2>(tCrA_load);
ConsumerToken barrier_token = {BarrierStatus::WaitAgain};
// first k tile
{
barrier_token = pipeline.consumer_try_wait(smem_pipe_read);
pipeline.consumer_wait(smem_pipe_read, barrier_token);
int read_stage = smem_pipe_read.index();
++smem_pipe_read;
barrier_token = pipeline.consumer_try_wait(smem_pipe_read);
// copy smem->rmem for A operand
copy_A_and_extra_info(smem_tiled_copy_A, tCsA, tCrA_copy_view,
partitioned_extra_info, copy_partitions_extra_info, 0, read_stage);
transform_A_kblock(tCrA_load, A_CPY_VEC{}, tCrA_mma, partitioned_extra_info, 0);
// Unroll the K mode manually to set scale D to 1
CUTLASS_PRAGMA_UNROLL
for (int k_block = 0; k_block < K_BLOCK_MAX; ++k_block) {
if (k_block < K_BLOCK_MAX - 1) {
copy_A_and_extra_info(smem_tiled_copy_A, tCsA, tCrA_copy_view,
partitioned_extra_info, copy_partitions_extra_info, k_block + 1, read_stage);
transform_A_kblock(tCrA_load, A_CPY_VEC{}, tCrA_mma, partitioned_extra_info, k_block + 1);
}
warpgroup_arrive();
// (V,M) x (V,N) => (V,M,N)
cute::gemm(tiled_mma, tCrA_mma(_,_,k_block), tCrB(_,_,k_block,read_stage), accum);
tiled_mma.accumulate_ = GMMA::ScaleOut::One;
warpgroup_commit_batch();
}
--k_tile_count;
if (k_tile_count > 0) {
// Wait for K_BLOCK_MAX - 1 to be in flight to ensure that it is safe to overwrite the A registers for the first mma.
warpgroup_wait<K_BLOCK_MAX - 1>();
pipeline.consumer_wait(smem_pipe_read, barrier_token);
copy_A_and_extra_info(smem_tiled_copy_A, tCsA, tCrA_copy_view,
partitioned_extra_info, copy_partitions_extra_info, 0, smem_pipe_read.index());
transform_A_kblock(tCrA_load, A_CPY_VEC{}, tCrA_mma, partitioned_extra_info, 0);
}
}
if (k_tile_count == 0) {
return;
}
warpgroup_fence_operand(accum);
// Mainloop GMMAs
CUTLASS_PRAGMA_NO_UNROLL
for ( ; k_tile_count > 1; --k_tile_count) {
//
// Compute on k_tile
//
int read_stage = smem_pipe_read.index();
++smem_pipe_read;
warpgroup_fence_operand(accum);
// Unroll the K mode manually to set scale D to 1
CUTLASS_PRAGMA_UNROLL
for (int k_block = 0; k_block < K_BLOCK_MAX; ++k_block) {
warpgroup_arrive();
// (V,M) x (V,N) => (V,M,N)
cute::gemm(tiled_mma, tCrA_mma(_,_,k_block), tCrB(_,_,k_block,read_stage), accum);
tiled_mma.accumulate_ = GMMA::ScaleOut::One;
warpgroup_commit_batch();
warpgroup_wait<K_BLOCK_MAX - 1>();
if (k_block == K_BLOCK_MAX - 1) {
// We have K_BLOCK_MAX - 1 GMMA instructions pending for this stage, so we can release prior barrier
pipeline.consumer_release(smem_pipe_release); // UNLOCK smem_pipe_release, done _computing_ on it
++smem_pipe_release;
}
if (k_block == 0) {
barrier_token = pipeline.consumer_try_wait(smem_pipe_read);
}
if (k_block == K_BLOCK_MAX - 1) {
pipeline.consumer_wait(smem_pipe_read, barrier_token);
copy_A_and_extra_info(smem_tiled_copy_A, tCsA, tCrA_copy_view,
partitioned_extra_info, copy_partitions_extra_info, 0, smem_pipe_read.index());
transform_A_kblock(tCrA_load, A_CPY_VEC{}, tCrA_mma, partitioned_extra_info, 0);
}
else {
copy_A_and_extra_info(smem_tiled_copy_A, tCsA, tCrA_copy_view,
partitioned_extra_info, copy_partitions_extra_info, k_block + 1, read_stage);
transform_A_kblock(tCrA_load, A_CPY_VEC{}, tCrA_mma, partitioned_extra_info, k_block + 1);
}
}
warpgroup_fence_operand(accum);
}
warpgroup_fence_operand(accum);
{
//
// Compute on k_tile
//
int read_stage = smem_pipe_read.index();
warpgroup_fence_operand(accum);
// Unroll the K mode manually to set scale D to 1
CUTLASS_PRAGMA_UNROLL
for (int k_block = 0; k_block < K_BLOCK_MAX; ++k_block) {
warpgroup_arrive();
// (V,M) x (V,N) => (V,M,N)
cute::gemm(tiled_mma, tCrA_mma(_,_,k_block), tCrB(_,_,k_block,read_stage), accum);
tiled_mma.accumulate_ = GMMA::ScaleOut::One;
warpgroup_commit_batch();
warpgroup_wait<K_BLOCK_MAX - 1>();
if (k_block == K_BLOCK_MAX - 1) {
// release prior barrier
pipeline.consumer_release(smem_pipe_release); // UNLOCK smem_pipe_release, done _computing_ on it
++smem_pipe_release;
}
if (k_block < K_BLOCK_MAX - 1) {
copy_A_and_extra_info(smem_tiled_copy_A, tCsA, tCrA_copy_view,
partitioned_extra_info, copy_partitions_extra_info, k_block + 1, read_stage);
transform_A_kblock(tCrA_load, A_CPY_VEC{}, tCrA_mma, partitioned_extra_info, k_block + 1);
}
}
}
warpgroup_fence_operand(accum);
}
/// Perform a Consumer Epilogue to release all buffers
CUTLASS_DEVICE void
mma_tail(MainloopPipeline pipeline, PipelineState smem_pipe_release, int k_tile_count) {
// Prologue GMMAs
int prologue_mma_count = 1;
k_tile_count -= prologue_mma_count;
smem_pipe_release.advance(k_tile_count);
// Wait on all GMMAs to complete
warpgroup_wait<0>();
for (int count = 0; count < prologue_mma_count; ++count) {
pipeline.consumer_release(smem_pipe_release); // UNLOCK smem_pipe_release, done _computing_ on it
++smem_pipe_release;
}
}
private:
/// Utilities for any additional inputs inside of the TMA load
template <class... Ts>
CUTLASS_DEVICE
auto partition_extra_tma_inputs(
Params const& mainloop_params,
cute::tuple<Ts...> const& load_inputs,
TensorStorage& shared_tensors,
uint2 const& cluster_local_block_id,
int const m_coord,
int const l_coord) {
if constexpr (KernelConversionMode == ConversionMode::DirectConvert) {
return cute::tuple{};
}
else if constexpr (ModeHasScales) {
Tensor sS = make_tensor(make_smem_ptr(shared_tensors.smem_scale.begin()), SmemLayoutScale{}); // (BLK_M,BLK_K,PIPE)
Tensor gS_mkl = get<2>(load_inputs);
auto block_tma_s = mainloop_params.tma_load_scale.get_slice(cluster_local_block_id.y);
Tensor gS = gS_mkl(_,_,m_coord,_,l_coord); // (BLK_M,BLK_K,k)
Tensor tSgS = block_tma_s.partition_S(gS); // (TMA,TMA_M,TMA_K,k)
Tensor tSsS = block_tma_s.partition_D(sS); // (TMA,TMA_M,TMA_K,PIPE)
if constexpr (KernelConversionMode == ConversionMode::ConvertAndScale) {
return cute::make_tuple(tSgS, tSsS);
}
else if constexpr (KernelConversionMode == ConversionMode::ConvertAndScaleWithZero) {
Tensor sZ = make_tensor(make_smem_ptr(shared_tensors.smem_zero.begin()), SmemLayoutScale{}); // (BLK_M,BLK_K,PIPE)
Tensor gZ_mkl = get<3>(load_inputs);
auto block_tma_z = mainloop_params.tma_load_zero.get_slice(cluster_local_block_id.y);
Tensor gZ = gZ_mkl(_,_,m_coord,_,l_coord); // (BLK_M,BLK_K,k)
Tensor tZgZ = block_tma_z.partition_S(gZ); // (TMA,TMA_M,TMA_K,k)
Tensor tZsZ = block_tma_z.partition_D(sZ); // (TMA,TMA_M,TMA_K,PIPE)
return cute::make_tuple(tSgS, tSsS, tZgZ, tZsZ);
}
else {
static_assert(cutlass::detail::dependent_false<KernelSchedule>, "Conversion mode not handled for input partitioning.");
}
}
else {
static_assert(cutlass::detail::dependent_false<KernelSchedule>, "Conversion mode not handled for input partitioning.");
}
}
/// Utilities for partitioning extra inputs for loading from smem in the mainloop.
template <class ThreadMma>
CUTLASS_DEVICE
auto partition_extra_mma_info(
ThreadMma const& thread_mma,
TensorStorage& shared_tensors) {
if constexpr (KernelConversionMode == ConversionMode::DirectConvert) {
// noting to do
return cute::tuple{};
}
else if constexpr (ModeHasScales) {
Tensor sS = make_tensor(make_smem_ptr(shared_tensors.smem_scale.begin()), SmemLayoutScale{}); // (BLK_M,BLK_SCALE_K,PIPE)
Tensor tCsS = thread_mma.partition_A(sS);
Tensor tCrS = make_tensor<ElementScale>(thread_mma.partition_fragment_A(sS(_,_,Int<0>{})).shape());
if constexpr (KernelConversionMode == ConversionMode::ConvertAndScale) {
return cute::make_tuple(tCsS, tCrS);
}
else if constexpr (KernelConversionMode == ConversionMode::ConvertAndScaleWithZero) {
Tensor sZ = make_tensor(make_smem_ptr(shared_tensors.smem_zero.begin()), SmemLayoutScale{}); // (BLK_M,BLK_SCALE_K,PIPE)
Tensor tCsZ = thread_mma.partition_A(sZ);
Tensor tCrZ = make_tensor<ElementZero>(thread_mma.partition_fragment_A(sZ(_,_,Int<0>{})).shape());
return cute::make_tuple(tCsS, tCrS, tCsZ, tCrZ);
}
else {
static_assert(cutlass::detail::dependent_false<KernelSchedule>, "Conversion mode not handled in A -> RF path.");
}
}
else {
static_assert(cutlass::detail::dependent_false<KernelSchedule>, "Conversion mode not handled in A -> RF path.");
}
}
/// Returns the tiled copy and copy views for the extra inputs.
template <class TiledMma, class... Ts>
CUTLASS_DEVICE
auto retile_extra_mma_info(
TiledMma const& tiled_mma,
cute::tuple<Ts...>& partitioned_extra_info,
int const warp_group_thread_idx) {
if constexpr (KernelConversionMode == ConversionMode::DirectConvert) {
// noting to do
return cute::tuple{};
}
else if constexpr (ModeHasScales) {
auto smem_tiled_copy_S = make_tiled_copy_A(SmemCopyAtomScale{}, tiled_mma);
auto smem_thr_copy_S = smem_tiled_copy_S.get_thread_slice(warp_group_thread_idx);
Tensor tCrS_copy_view = smem_thr_copy_S.retile_D(cute::get<1>(partitioned_extra_info)); // (CPY,CPY_M,CPY_K)
if constexpr (KernelConversionMode == ConversionMode::ConvertAndScale) {
return cute::make_tuple(smem_tiled_copy_S, tCrS_copy_view);
}
else if constexpr (KernelConversionMode == ConversionMode::ConvertAndScaleWithZero) {
Tensor tCrZ_copy_view = smem_thr_copy_S.retile_D(cute::get<3>(partitioned_extra_info)); // (CPY,CPY_M,CPY_K)
return cute::make_tuple(smem_tiled_copy_S, tCrS_copy_view, tCrZ_copy_view);
}
else {
static_assert(cutlass::detail::dependent_false<KernelSchedule>, "Conversion mode not handled in A -> RF path.");
}
}
else {
static_assert(cutlass::detail::dependent_false<KernelSchedule>, "Conversion mode not handled in A -> RF path.");
}
}
/// Utilities to copy A and extra inputs from smem to RF
template <class SmemTiledCopyA,
class TensorASmemView,
class TensorACopyView,
class... Ts,
class... Us
>
CUTLASS_DEVICE
void copy_A_and_extra_info(
SmemTiledCopyA const& smem_tiled_copy_A,
TensorASmemView const& tCsA,
TensorACopyView& tCrA_copy_view,
cute::tuple<Ts...> const& partitioned_mma_extra_info,
cute::tuple<Us...> const& tiled_copy_and_views,
int k_block,
int read_stage) {
copy(smem_tiled_copy_A, tCsA(_,_,k_block,read_stage), tCrA_copy_view(_,_,k_block));
if (k_block == 0) {
// We are starting a new k-tile so copy the scale
if constexpr (KernelConversionMode == ConversionMode::DirectConvert) {
// nothing to do
}
else if constexpr (ModeHasScales) {
auto smem_tiled_copy_S = cute::get<0>(tiled_copy_and_views);
auto tCrS_copy_view = cute::get<1>(tiled_copy_and_views);
auto tCsS = cute::get<0>(partitioned_mma_extra_info);
copy(smem_tiled_copy_S, tCsS(_,_,k_block,read_stage), tCrS_copy_view(_,_,k_block));
if constexpr (KernelConversionMode == ConversionMode::ConvertAndScale) {
// Nothing extra to do
} else if constexpr (KernelConversionMode == ConversionMode::ConvertAndScaleWithZero) {
auto tCsZ = cute::get<2>(partitioned_mma_extra_info);
auto tCrZ_copy_view = cute::get<2>(tiled_copy_and_views);
copy(smem_tiled_copy_S, tCsZ(_,_,k_block,read_stage), tCrZ_copy_view(_,_,k_block));
} else {
static_assert(cutlass::detail::dependent_false<KernelSchedule>, "Conversion mode not handled in A -> RF path.");
}
}
else {
static_assert(cutlass::detail::dependent_false<KernelSchedule>, "Conversion mode not handled in A -> RF path.");
}
}
}
/// Utilities to transform A.
template <class TCrA_load,
int VectorWidthA,
class TCrA_mma,
class... Ts>
CUTLASS_DEVICE
void transform_A_kblock(
TCrA_load const& tCrA_load,
cute::Int<VectorWidthA> vec_A,
TCrA_mma& tCrA_mma,
cute::tuple<Ts...> const& partitioned_extra_info,
int const k_block) {
if constexpr (KernelConversionMode == ConversionMode::DirectConvert) {
transform_internal_A(tCrA_load(_, _, k_block), vec_A, tCrA_mma(_, _, k_block));
}
else if constexpr (KernelConversionMode == ConversionMode::ConvertAndScale) {
auto tCrS = cute::get<1>(partitioned_extra_info);
transform_internal_A(tCrA_load(_, _, k_block), vec_A, make_fragment_like<ElementScale>(tCrA_mma)(_, _, k_block), tCrS(_, _, 0), tCrA_mma(_, _, k_block));
}
else if constexpr (KernelConversionMode == ConversionMode::ConvertAndScaleWithZero) {
auto tCrS = cute::get<1>(partitioned_extra_info);
auto tCrZ = cute::get<3>(partitioned_extra_info);
transform_internal_A(tCrA_load(_, _, k_block),
vec_A,
make_fragment_like<ElementScale>(tCrA_mma)(_, _, k_block),
tCrS(_, _, 0),
tCrZ(_, _, 0),
make_fragment_like<ElementScale>(tCrZ)(_, _, 0),
tCrA_mma(_, _, k_block));
}
else {
static_assert(cutlass::detail::dependent_false<KernelSchedule>, "No A data is loaded.");
}
}
/// Utilities for transforming the A operand prior to issuing tensorcore math.
template <class EngineIn,
class EngineOut,
class TensorLayout,
int ConversionVectorWidth = cosize_v<TensorLayout>>
CUTLASS_DEVICE void
convert_tensor(
Tensor<EngineIn,TensorLayout> const& in,
Tensor<EngineOut,TensorLayout>& out,
cute::Int<ConversionVectorWidth> width = {}) {
/// This is an element-wise conversion where we expect both tensors to have the same layout.
/// As a result, we can cast as a cutlass array to use the fast numeric converters without
/// worrying about indexing into the layout.
constexpr int N = cosize_v<TensorLayout>;
/// The inputs must be backed by registers & be statically sized.
static_assert(is_rmem<EngineIn>::value, "Input tensor for A conversion must come from registers");
static_assert(is_rmem<EngineOut>::value, "Output tensor for A conversion must come from registers");
static_assert(is_static_v<TensorLayout>, "Tensor layout for the conversion must be static");
static_assert(cosize_v<TensorLayout> == size(TensorLayout{}), "Cosize and size of the layout must be equal.");
static_assert(N % ConversionVectorWidth == 0, "Conversion vector width must divide cosize of the tensor layout.");
using SrcType = typename EngineIn::value_type;
using DstType = typename EngineOut::value_type;
using SrcArray = cutlass::Array<SrcType, ConversionVectorWidth>;
using DstArray = cutlass::Array<DstType, ConversionVectorWidth>;
constexpr cutlass::FloatRoundStyle RoundStyle = cutlass::FloatRoundStyle::round_to_nearest;
using Converter = cutlass::NumericArrayConverter<DstType, SrcType, ConversionVectorWidth, RoundStyle>;
constexpr int NumIterations = N / ConversionVectorWidth;
for (int ii = 0; ii < NumIterations; ++ii) {
SrcArray const* src_array_ptr = reinterpret_cast<SrcArray const*>(raw_pointer_cast(in.data())) + ii;
DstArray* dst_array_ptr = reinterpret_cast<DstArray*>(raw_pointer_cast(out.data())) + ii;
*dst_array_ptr = Converter::convert(*src_array_ptr);
}
}
template <class EngineIn,
class EngineOut,
class TensorLayout,
int A_VectorConversionWidth>
CUTLASS_DEVICE void
transform_internal_A(
Tensor<EngineIn,TensorLayout>&& in,
cute::Int<A_VectorConversionWidth> a_vec_width,
Tensor<EngineOut,TensorLayout>&& out) {
convert_tensor(in, out, a_vec_width);
}
template <class EngineIn,
class EngineInputBuffer,
class EngineScale,
class EngineOut,
class TensorLayout,
int A_VectorConversionWidth>
CUTLASS_DEVICE void
transform_internal_A(
Tensor<EngineIn,TensorLayout>&& in,
cute::Int<A_VectorConversionWidth> a_vec_width,
Tensor<EngineInputBuffer,TensorLayout>&& converted_inputs,
Tensor<EngineScale,TensorLayout>&& scales,
Tensor<EngineOut,TensorLayout>&& out) {
static_assert(cute::is_same_v<typename EngineInputBuffer::value_type, typename EngineScale::value_type>,
"Type of the engine input buffer must equal the scale buffer");
// First, we upcast the inputs to the scale type
convert_tensor(in, converted_inputs, a_vec_width);
// Apply scales and broadcast across inputs, store in converted_inputs
cute::transform(converted_inputs, scales, converted_inputs, cute::multiplies{});
// Finally, we convert the scaled inputs to the mma type.
convert_tensor(converted_inputs, out);
}
template <class EngineIn,
class EngineInputBuffer,
class EngineScale,
class EngineZero,
class EngineZeroBuffer,
class EngineOut,
class TensorLayout,
int A_VectorConversionWidth>
CUTLASS_DEVICE void
transform_internal_A(
Tensor<EngineIn,TensorLayout>&& in,
cute::Int<A_VectorConversionWidth> a_vec_width,
Tensor<EngineInputBuffer,TensorLayout>&& converted_inputs,
Tensor<EngineScale,TensorLayout>&& scales,
Tensor<EngineZero,TensorLayout>&& zeros,
Tensor<EngineZeroBuffer,TensorLayout>&& converted_zeros,
Tensor<EngineOut,TensorLayout>&& out) {
static_assert(cute::is_same_v<typename EngineInputBuffer::value_type, typename EngineScale::value_type>,
"Type of the engine input buffer must equal the scale buffer");
static_assert(cute::is_same_v<typename EngineZeroBuffer::value_type, typename EngineScale::value_type>,
"Type of the engine zero buffer must equal the scale buffer");
// First, we upcast the inputs to the scale type
convert_tensor(in, converted_inputs, a_vec_width);
convert_tensor(zeros, converted_zeros);
// Apply scales and broadcast across inputs, store in converted_inputs
cute::transform(converted_inputs, scales, converted_inputs, cute::multiplies{});
cute::transform(converted_inputs, converted_zeros, converted_inputs, cute::plus{});
// Finally, we convert the scaled inputs to the mma type.
convert_tensor(converted_inputs, out);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::gemm::collective
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/collective/sm90_mma_tma_gmma_rs_warpspecialized_mixed_input.hpp/0 | {
"file_path": "include/cutlass/gemm/collective/sm90_mma_tma_gmma_rs_warpspecialized_mixed_input.hpp",
"repo_id": "include",
"token_count": 26780
} | 29 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for GEMM performing a reduction over K partitions in parallel.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/arch.h"
#include "cutlass/device_kernel.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/gemm/kernel/gemm.h"
#include "cutlass/gemm/kernel/default_gemm_splitk_parallel.h"
#include "cutlass/gemm/device/default_gemm_configuration.h"
#include "cutlass/epilogue/thread/conversion_op.h"
#include "cutlass/reduction/kernel/reduce_split_k.h"
#include "cutlass/reduction/thread/reduction_operators.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace device {
////////////////////////////////////////////////////////////////////////////////
/*!
Gemm device-level operator performing parallel reduction over the K partition.
*/
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator_ = ElementC_,
/// Operator class tag
typename OperatorClass_ = arch::OpClassSimt,
/// Tag indicating architecture to tune for. This is the minimum SM that
/// supports the intended feature. The device kernel can be built
/// targeting any SM larger than this number.
typename ArchTag_ = arch::Sm70,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::EpilogueOutputOp,
/// Epilogue output operator
typename ConvertScaledOp_ = cutlass::epilogue::thread::Convert<
ElementAccumulator_,
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementAccumulator_,
ElementAccumulator_>::EpilogueOutputOp::kCount,
ElementAccumulator_>,
/// Reduction operator
typename ReductionOp_ = cutlass::reduction::thread::ReduceAdd<
ElementAccumulator_, typename EpilogueOutputOp_::ElementAccumulator,
EpilogueOutputOp_::kCount>,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle_ =
threadblock::GemmSplitKHorizontalThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kStages,
/// Access granularity of A matrix in units of elements
int kAlignmentA =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kAlignmentA,
/// Access granularity of B matrix in units of elements
int kAlignmentB =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kAlignmentB,
/// Operation performed by GEMM
typename Operator_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::Operator>
class GemmSplitKParallel {
public:
using ElementA = ElementA_;
using LayoutA = LayoutA_;
using ElementB = ElementB_;
using LayoutB = LayoutB_;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using ElementAccumulator = ElementAccumulator_;
using OperatorClass = OperatorClass_;
using ArchTag = ArchTag_;
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ConvertScaledOp = ConvertScaledOp_;
using EpilogueOutputOp = EpilogueOutputOp_;
using ReductionOp = ReductionOp_;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using Operator = Operator_;
static int const kStages = Stages;
/// GEMM kernel
using GemmKernel = typename kernel::DefaultGemmSplitKParallel<
ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementAccumulator,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
ConvertScaledOp,
ThreadblockSwizzle,
kStages,
Operator
>::GemmKernel;
/// Reduction kernel
using ReductionKernel = cutlass::reduction::kernel::ReduceSplitK<
cutlass::MatrixShape<4, 32 * EpilogueOutputOp::kCount>,
EpilogueOutputOp,
ReductionOp
>;
//
//
//
/// Argument structure
struct Arguments {
//
// Data members
//
GemmCoord problem_size;
TensorRef<ElementA const, LayoutA> ref_A;
TensorRef<ElementB const, LayoutB> ref_B;
TensorRef<ElementC const, LayoutC> ref_C;
TensorRef<ElementC, LayoutC> ref_D;
typename EpilogueOutputOp::Params epilogue;
int split_k_slices;
typename ConvertScaledOp::Params convert;
typename ReductionOp::Params reduction;
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
Arguments() { }
/// Constructs an Arguments structure
CUTLASS_HOST_DEVICE
Arguments(
GemmCoord problem_size_,
TensorRef<ElementA const, LayoutA> ref_A_,
TensorRef<ElementB const, LayoutB> ref_B_,
TensorRef<ElementC const, LayoutC> ref_C_,
TensorRef<ElementC, LayoutC> ref_D_,
typename EpilogueOutputOp::Params epilogue_ =
typename EpilogueOutputOp::Params(),
int split_k_slices = 1,
typename ConvertScaledOp::Params convert_ =
typename ConvertScaledOp::Params(),
typename ReductionOp::Params reduction_ =
typename ReductionOp::Params()
):
problem_size(problem_size_),
ref_A(ref_A_),
ref_B(ref_B_),
ref_C(ref_C_),
ref_D(ref_D_),
epilogue(epilogue_),
split_k_slices(split_k_slices),
convert(convert_),
reduction(reduction_) { }
};
private:
/// Kernel parameters object
typename GemmKernel::Params gemm_params_;
/// Reduction kernel parameters object
typename ReductionKernel::Params reduction_params_;
public:
/// Constructs the GEMM.
GemmSplitKParallel() { }
/// Determines whether the GEMM can execute the given problem.
static Status can_implement(Arguments const &args) {
return Status::kSuccess;
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord grid_shape = threadblock_swizzle.get_tiled_shape(
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.split_k_slices);
return sizeof(ElementAccumulator_) * size_t(args.problem_size.m()) * size_t(args.problem_size.n()) * grid_shape.k();
}
/// Initializes GEMM state from arguments.
Status initialize(Arguments const &args, void *workspace) {
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord grid_shape = threadblock_swizzle.get_tiled_shape(
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.split_k_slices);
// Define a reference to the workspace - this is an aligned region in device memory.
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
TensorRef<ElementAccumulator_, layout::RowMajor> ref_workspace(
static_cast<ElementAccumulator_ *>(workspace),
args.problem_size.n());
int64_t partition_stride = int64_t(args.problem_size.m()) * int64_t(args.problem_size.n());
// Initialize the Params structure
gemm_params_ = typename GemmKernel::Params{
args.problem_size,
grid_shape,
args.ref_A.non_const_ref(),
args.ref_B.non_const_ref(),
ref_workspace,
args.convert,
partition_stride
};
reduction_params_ = typename ReductionKernel::Params(
args.problem_size.mn(),
grid_shape.k(),
partition_stride,
ref_workspace,
args.ref_D,
args.ref_C.non_const_ref(),
args.epilogue
);
return Status::kSuccess;
}
/// Lightweight update given a subset of arguments
Status update(Arguments const &args, void *workspace = nullptr) {
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
gemm_params_.ref_A.reset(args.ref_A.data());
gemm_params_.ref_B.reset(args.ref_B.data());
gemm_params_.ref_D.reset(workspace);
reduction_params_.ref_D.reset(args.ref_D.data());
reduction_params_.ref_C.reset(args.ref_C.data());
return Status::kSuccess;
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
//
// Launch GEMM kernel
//
ThreadblockSwizzle threadblock_swizzle;
dim3 grid = threadblock_swizzle.get_grid_shape(gemm_params_.grid_tiled_shape);
dim3 block(GemmKernel::kThreadCount, 1, 1);
cudaError_t result;
int smem_size = int(sizeof(typename GemmKernel::SharedStorage));
if (smem_size >= (48 << 10)) {
result = cudaFuncSetAttribute(
Kernel<GemmKernel>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
Kernel<GemmKernel><<<grid, block, smem_size, stream>>>(gemm_params_);
result = cudaGetLastError();
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
//
// Launch reduction kernel
//
block = ReductionKernel::block_shape();
grid = ReductionKernel::grid_shape(gemm_params_.problem_size.mn());
Kernel<ReductionKernel><<< grid, block, 0, stream >>>(reduction_params_);
result = cudaGetLastError();
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal;
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for column-major output
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Element type for internal accumulation
typename ElementAccumulator_,
/// Operator class tag
typename OperatorClass_,
/// Tag indicating architecture to tune for. This is the minimum SM that
/// supports the intended feature. The device kernel can be built
/// targeting any SM larger than this number.
typename ArchTag_,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape_,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape_,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape_,
/// Epilogue output operator
typename EpilogueOutputOp_,
/// Epilogue output operator
typename ConvertScaledOp_,
/// Reduction operator
typename ReductionOp_,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle_,
/// Number of stages used in the pipelined mainloop
int Stages, int kAlignmentA, int kAlignmentB,
/// Operation performed by GEMM
typename Operator_>
class GemmSplitKParallel<ElementA_, LayoutA_, ElementB_, LayoutB_, ElementC_,
layout::ColumnMajor, ElementAccumulator_,
OperatorClass_, ArchTag_, ThreadblockShape_,
WarpShape_, InstructionShape_, EpilogueOutputOp_,
ConvertScaledOp_, ReductionOp_, ThreadblockSwizzle_,
Stages, kAlignmentA, kAlignmentB, Operator_> {
public:
using ElementA = ElementA_;
using LayoutA = LayoutA_;
using ElementB = ElementB_;
using LayoutB = LayoutB_;
using ElementC = ElementC_;
using LayoutC = layout::ColumnMajor;
using ElementAccumulator = ElementAccumulator_;
using OperatorClass = OperatorClass_;
using ArchTag = ArchTag_;
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ConvertScaledOp = ConvertScaledOp_;
using EpilogueOutputOp = EpilogueOutputOp_;
using ReductionOp = ReductionOp_;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using Operator = Operator_;
static int const kStages = Stages;
using UnderlyingOperator = GemmSplitKParallel<
ElementB,
typename layout::LayoutTranspose<LayoutB>::type,
ElementA,
typename layout::LayoutTranspose<LayoutA>::type,
ElementC,
layout::RowMajor,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ConvertScaledOp,
ReductionOp,
ThreadblockSwizzle,
Stages,
kAlignmentA,
kAlignmentB,
Operator
>;
using UnderlyingArguments = typename UnderlyingOperator::Arguments;
using GemmKernel = typename UnderlyingOperator::GemmKernel;
using ReductionKernel = typename UnderlyingOperator::ReductionKernel;
/// Argument structure
struct Arguments {
//
// Data members
//
GemmCoord problem_size;
TensorRef<ElementA const, LayoutA> ref_A;
TensorRef<ElementB const, LayoutB> ref_B;
TensorRef<ElementC const, LayoutC> ref_C;
TensorRef<ElementC, LayoutC> ref_D;
typename EpilogueOutputOp::Params epilogue;
int split_k_slices;
typename ConvertScaledOp::Params convert;
typename ReductionOp::Params reduction;
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
Arguments() { }
/// Constructs an Arguments structure
CUTLASS_HOST_DEVICE
Arguments(
GemmCoord problem_size_,
TensorRef<ElementA const, LayoutA> ref_A_,
TensorRef<ElementB const, LayoutB> ref_B_,
TensorRef<ElementC const, LayoutC> ref_C_,
TensorRef<ElementC, LayoutC> ref_D_,
typename EpilogueOutputOp::Params epilogue_ =
typename EpilogueOutputOp::Params(),
int split_k_slices = 1,
typename ConvertScaledOp::Params convert_ =
typename ConvertScaledOp::Params(),
typename ReductionOp::Params reduction_ =
typename ReductionOp::Params()
):
problem_size(problem_size_),
ref_A(ref_A_),
ref_B(ref_B_),
ref_C(ref_C_),
ref_D(ref_D_),
epilogue(epilogue_),
split_k_slices(split_k_slices),
convert(convert_),
reduction(reduction_) { }
};
private:
/// Kernel parameters object
UnderlyingOperator underlying_operator_;
public:
/// Constructs the GEMM.
GemmSplitKParallel() { }
/// Helper to construct a transposed equivalent for the underying GEMM operator
static UnderlyingArguments to_underlying_arguments(Arguments const &args) {
return UnderlyingArguments(
{args.problem_size.n(), args.problem_size.m(), args.problem_size.k()},
{args.ref_B.data(), args.ref_B.stride(0)},
{args.ref_A.data(), args.ref_A.stride(0)},
{args.ref_C.data(), args.ref_C.stride(0)},
{args.ref_D.data(), args.ref_D.stride(0)},
args.epilogue,
args.split_k_slices,
args.convert,
args.reduction
);
}
/// Determines whether the GEMM can execute the given problem.
static Status can_implement(Arguments const &args) {
return UnderlyingOperator::can_implement(to_underlying_arguments(args));
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
return UnderlyingOperator::get_workspace_size(to_underlying_arguments(args));
}
/// Initializes GEMM state from arguments.
Status initialize(Arguments const &args, void *workspace) {
return underlying_operator_.initialize(to_underlying_arguments(args), workspace);
}
/// Lightweight update given a subset of arguments
Status update(Arguments const &args, void *workspace = nullptr) {
return underlying_operator_.update(to_underlying_arguments(args), workspace);
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
return underlying_operator_.run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace, stream);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/device/gemm_splitk_parallel.h/0 | {
"file_path": "include/cutlass/gemm/device/gemm_splitk_parallel.h",
"repo_id": "include",
"token_count": 7350
} | 30 |
/***************************************************************************************************
* Copyright (c) 2024 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Sparse GEMM kernel with an epilogue that computes the absolute maximum value of the output
and a pre-activation-function auxiliary output. The auxiliary output is also (optionally)
stored to global memory.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/kernel/params_sparse_base.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/semaphore.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_, ///! Threadblock swizzling function
bool SplitKSerial ///! If true, code supporting split-K via serial reduction is enabled.
>
struct SparseGemmWithAbsmax {
using Mma = Mma_;
using Epilogue = Epilogue_;
using OutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
static bool const kSplitKSerial = SplitKSerial;
static int const kSparse = Mma::kSparse;
static int const kMetaSizeInBits = Mma::kMetaSizeInBits;
static int const kMaxID2 = Mma::kMaxID2;
static int const kElementsPerElementE = Mma::kElementsPerElementE;
using ElementE = typename Mma::ElementE;
using LayoutE = typename Mma::LayoutE;
using LayoutC = typename Epilogue::OutputTileIterator::Layout;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
using ParamsA = typename Mma::IteratorA::Params;
using TensorRefA = typename Mma::IteratorA::TensorRef;
using ParamsB = typename Mma::IteratorB::Params;
using TensorRefB = typename Mma::IteratorB::TensorRef;
using ParamsE = typename Mma::IteratorE::Params;
using TensorRefE = typename Mma::IteratorE::TensorRef;
using ParamsC = typename Epilogue::OutputTileIterator::Params;
using TensorRefC = typename Epilogue::OutputTileIterator::TensorRef;
using ParamsD = typename Epilogue::OutputTileIterator::Params;
using TensorRefD = typename Epilogue::OutputTileIterator::TensorRef;
using ParamsAux = typename Epilogue::AuxOutputTileIterator::Params;
using TensorRefAux = typename Epilogue::AuxOutputTileIterator::TensorRef;
/// Argument structure
struct Arguments {
//
// Data members
//
GemmCoord problem_size;
TensorRefA ref_A;
TensorRefB ref_B;
TensorRefC ref_C;
TensorRefD ref_D;
TensorRefE ref_E;
TensorRefAux ref_Aux;
void* ptr_Vector;
typename LayoutC::Stride::Index ldr;
typename Epilogue::OutputOp::Params epilogue;
int split_k_slices;
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
Arguments(): problem_size(0, 0, 0), split_k_slices(1) {
}
/// Constructs an Arguments structure
CUTLASS_HOST_DEVICE
Arguments(
GemmCoord problem_size_,
TensorRefA ref_A_,
TensorRefB ref_B_,
TensorRefC ref_C_,
TensorRefD ref_D_,
TensorRefE ref_E_,
TensorRefAux ref_Aux_,
void* ptr_Vector_,
typename LayoutC::Stride::Index ldr_,
typename OutputOp::Params epilogue_ =
typename OutputOp::Params(),
int split_k_slices = 1
):
problem_size(problem_size_),
ref_A(ref_A_),
ref_B(ref_B_),
ref_C(ref_C_),
ref_D(ref_D_),
ref_E(ref_E_),
ref_Aux(ref_Aux_),
ptr_Vector(ptr_Vector_),
ldr(ldr_),
epilogue(epilogue_),
split_k_slices(split_k_slices) {
}
};
/// Parameters structure
struct Params : public SparseParamsBase<
ThreadblockSwizzle, ParamsA, TensorRefA, ParamsB, TensorRefB,
ParamsE, TensorRefE> {
using Base = SparseParamsBase<
ThreadblockSwizzle, ParamsA, TensorRefA, ParamsB, TensorRefB,
ParamsE, TensorRefE>;
//
// Data members
//
ParamsC params_C;
TensorRefC ref_C;
ParamsD params_D;
TensorRefD ref_D;
ParamsAux params_Aux;
TensorRefAux ref_Aux;
void* ptr_Vector;
typename LayoutC::Stride::Index ldr;
typename OutputOp::Params output_op;
int *semaphore;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(
cutlass::gemm::GemmCoord const & problem_size,
cutlass::gemm::GemmCoord const & grid_tiled_shape,
TensorRefA ref_A,
TensorRefB ref_B,
TensorRefC ref_C,
TensorRefD ref_D,
TensorRefE ref_E,
TensorRefAux ref_Aux,
void* ptr_Vector,
typename LayoutC::Stride::Index ldr,
typename OutputOp::Params output_op = typename OutputOp::Params(),
int *workspace = nullptr
):
Base(problem_size, grid_tiled_shape, ref_A, ref_B, ref_E, Mma::Shape::kK),
params_C(ref_C.layout()),
ref_C(ref_C),
params_D(ref_D.layout()),
ref_D(ref_D),
output_op(output_op),
ref_Aux(ref_Aux),
params_Aux(ref_Aux.layout()),
ptr_Vector(ptr_Vector),
ldr(ldr) {
semaphore = workspace;
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
//
// Methods
//
CUTLASS_HOST_DEVICE
SparseGemmWithAbsmax() { }
/// Determines whether kernel satisfies alignment
static Status can_implement(
cutlass::gemm::GemmCoord const & problem_size,
typename Mma::IteratorA::TensorRef ref_A,
typename Mma::IteratorB::TensorRef ref_B,
typename Epilogue::OutputTileIterator::TensorRef ref_C,
typename Epilogue::OutputTileIterator::TensorRef ref_D,
typename Mma::IteratorE::TensorRef ref_E) {
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
static int const kAlignmentE = Mma::IteratorE::AccessType::kElements;
if (!TensorRef_aligned(ref_A, kAlignmentA)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_B, kAlignmentB)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_C, kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_D, kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_E, kAlignmentE)) {
return Status::kErrorMisalignedOperand;
}
if ((problem_size.m() % kAlignmentA) || ((problem_size.k() / kSparse) % kAlignmentA) ||
(problem_size.n() % kAlignmentB) || (problem_size.k() % kAlignmentB) ||
(problem_size.m() % kAlignmentC) || (problem_size.n() % kAlignmentC) ||
(problem_size.m() % kAlignmentE) || ((problem_size.k() / kSparse) % kAlignmentE)) {
return Status::kErrorMisalignedOperand;
}
// The k dimension has to be the multiple of the Threadblock k because out
// of bound meta data would be initialized to 0 by acync.zfill but 0 is not
// a valid meta data.
if (problem_size.k() % Mma::Shape::kK) {
return Status::kErrorMisalignedOperand;
}
// M dimension has to be multiple of 32 (sparse float) or 16 (sparse int)
// because of the row reordering of operand E
static int const kAlignmentM = (sizeof(ElementE) == 2) ? 32 : 16;
if (problem_size.m() % kAlignmentM) {
return Status::kErrorMisalignedOperand;
}
return Status::kSuccess;
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.k() * params.gemm_k_size / kSparse,
};
cutlass::MatrixCoord tb_offset_B{
threadblock_tile_offset.k() * params.gemm_k_size,
threadblock_tile_offset.n() * Mma::Shape::kN
};
cutlass::MatrixCoord tb_offset_E{
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.k() * params.gemm_k_size / kSparse,
};
// Problem size is a function of threadblock index in the K dimension
int problem_size_k = min(
params.problem_size.k(),
(threadblock_tile_offset.k() + 1) * params.gemm_k_size);
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size_k - tb_offset_B.row() + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A, B, and E operands
typename Mma::IteratorA iterator_A(
params.params_A,
params.ref_A.data(),
{params.problem_size.m(), problem_size_k / kSparse},
thread_idx,
tb_offset_A);
typename Mma::IteratorB iterator_B(
params.params_B,
params.ref_B.data(),
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_B);
typename Mma::IteratorE iterator_E(
params.params_E, params.ref_E.data(),
{params.problem_size.m(),
problem_size_k / kSparse / kElementsPerElementE},
thread_idx, tb_offset_E);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = canonical_warp_idx_sync();
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
if (!kSplitKSerial || gemm_k_iterations > 0) {
// Compute threadblock-scoped matrix multiply-add
mma(gemm_k_iterations, accumulators, iterator_A, iterator_B, iterator_E, accumulators);
}
//
// Epilogue
//
OutputOp output_op(params.output_op);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.n() * Mma::Shape::kN
);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
// Construct the semaphore.
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
// If performing a reduction via split-K, fetch the initial synchronization
if (kSplitKSerial && params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
}
typename Epilogue::ElementVector *ptr_Vector = static_cast<typename Epilogue::ElementVector *>(params.ptr_Vector);
// Move to appropriate location for this output tile
if (ptr_Vector) {
ptr_Vector += threadblock_offset.column() + threadblock_tile_offset.m() * params.ldr;
}
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
params.ref_C.data(),
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
params.ref_D.data(),
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
// Tile iterator writing to auxiliary destination tensor.
typename Epilogue::AuxOutputTileIterator iterator_Aux(
params.params_Aux,
// Only the final block writes the auxiliary tensor
((kSplitKSerial && params.grid_tiled_shape.k() > 1) &&
(params.grid_tiled_shape.k() != threadblock_tile_offset.k() + 1))
? nullptr
: params.ref_Aux.data(),
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Wait on the semaphore - this latency may have been covered by iterator construction
if (kSplitKSerial && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_offset.k()) {
iterator_C = iterator_D;
}
semaphore.wait(threadblock_tile_offset.k());
__threadfence();
}
// Execute the epilogue operator to update the destination tensor.
epilogue(output_op,
// Only the final block uses Vector
((kSplitKSerial && params.grid_tiled_shape.k() > 1) &&
(params.grid_tiled_shape.k() != threadblock_tile_offset.k() + 1))
? nullptr
: ptr_Vector,
iterator_D,
accumulators,
iterator_C,
iterator_Aux,
params.problem_size.mn(),
threadblock_offset);
//
// Release the semaphore
//
if (kSplitKSerial && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_offset.k() + 1;
}
__threadfence();
semaphore.release(lock);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
| include/cutlass/gemm/kernel/sparse_gemm_with_absmax.h/0 | {
"file_path": "include/cutlass/gemm/kernel/sparse_gemm_with_absmax.h",
"repo_id": "include",
"token_count": 6338
} | 31 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines basic properties needed by CTA-level GEMMs assuming expectations about data
layout of the global memory fragments, data types, and internal tile sizes.
Partial specializations for threadblock::Mma operations targeting TensorOp instructions.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/layout/tensor_op_multiplicand_sm70.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/transform/threadblock/regular_tile_iterator_tensor_op_sm70.h"
#include "cutlass/gemm/warp/mma_tensor_op_sm70.h"
#include "cutlass/gemm/threadblock/default_mma_core.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: column-major
/// B: row-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by GEMM
typename Operator_>
struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<8, 8, 4>, ElementA_,
layout::ColumnMajor, ElementB_, layout::RowMajor,
ElementC_, LayoutC_, arch::OpClassTensorOp, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<8, 8, 4>;
using ElementA = ElementA_;
using LayoutA = layout::ColumnMajor;
using ElementB = ElementB_;
using LayoutB = layout::RowMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassTensorOp;
/// Default Operator
using Operator = Operator_;
/// Number of warps present
using WarpCount = GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
//
// Shared memory layouts
//
using SmemLayoutA =
layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<
sizeof_bits<ElementA>::value>;
// Shared memory layout
using SmemLayoutB =
layout::RowMajorVoltaTensorOpMultiplicandBCongruous<
sizeof_bits<ElementB>::value>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>,
kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementA>::value
>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
ElementA,
SmemLayoutA,
1,
IteratorThreadMapA
>;
/// Policy of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>,
kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementB>::value
>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
ElementB,
SmemLayoutB,
0,
IteratorThreadMapB
>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using MmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
WarpShape,
ElementA,
SmemLayoutA,
ElementB,
SmemLayoutB,
ElementC,
LayoutC,
Policy
>;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaTensorOp,
MatrixShape<0, 0>,
MatrixShape<0, 0>,
WarpCount::kK
>;
};
/// Partial specialization:
///
/// A: row-major
/// B: column-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by GEMM
typename Operator_>
struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<8, 8, 4>, ElementA_,
layout::RowMajor, ElementB_, layout::ColumnMajor,
ElementC_, LayoutC_, arch::OpClassTensorOp, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<8, 8, 4>;
using ElementA = ElementA_;
using LayoutA = layout::RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassTensorOp;
/// Default Operator
using Operator = Operator_;
/// Number of warps present
using WarpCount = GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
//
// Shared memory layouts
//
using SmemLayoutA = layout::RowMajorVoltaTensorOpMultiplicandCrosswise<
sizeof_bits<ElementA>::value, Shape::kK>;
// Shared memory layout
using SmemLayoutB = layout::ColumnMajorVoltaTensorOpMultiplicandCrosswise<
sizeof_bits<ElementB>::value, Shape::kK>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>,
kThreads,
layout::PitchLinearShape<4, 8>,
kAccessSizeInBits / sizeof_bits<ElementA>::value
>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
ElementA,
SmemLayoutA,
0,
IteratorThreadMapA
>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>,
kThreads,
layout::PitchLinearShape<4, 8>,
kAccessSizeInBits / sizeof_bits<ElementB>::value
>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
ElementB,
SmemLayoutB,
1,
IteratorThreadMapB
>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using MmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
WarpShape,
ElementA,
SmemLayoutA,
ElementB,
SmemLayoutB,
ElementC,
LayoutC,
Policy
>;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaTensorOp,
MatrixShape<0, 0>,
MatrixShape<0, 0>,
WarpCount::kK
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: row-major
/// B: row-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by GEMM
typename Operator_>
struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<8, 8, 4>, ElementA_,
layout::RowMajor, ElementB_, layout::RowMajor, ElementC_,
LayoutC_, arch::OpClassTensorOp, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<8, 8, 4>;
using ElementA = ElementA_;
using LayoutA = layout::RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::RowMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassTensorOp;
/// Default Operator
using Operator = Operator_;
/// Number of warps present
using WarpCount = GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
//
// Shared memory layouts
//
using SmemLayoutA = layout::RowMajorVoltaTensorOpMultiplicandCrosswise<
sizeof_bits<ElementA>::value, Shape::kK>;
// Shared memory layout
using SmemLayoutB = layout::RowMajorVoltaTensorOpMultiplicandBCongruous<
sizeof_bits<ElementB>::value>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>,
kThreads,
layout::PitchLinearShape<4, 8>,
kAccessSizeInBits / sizeof_bits<ElementA>::value
>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
ElementA,
SmemLayoutA,
0,
IteratorThreadMapA
>;
/// Policy of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>,
kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementB>::value
>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
ElementB,
SmemLayoutB,
0,
IteratorThreadMapB
>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using MmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
WarpShape,
ElementA,
SmemLayoutA,
ElementB,
SmemLayoutB,
ElementC,
LayoutC,
Policy
>;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaTensorOp,
MatrixShape<0, 0>,
MatrixShape<0, 0>,
WarpCount::kK
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: column-major
/// B: column-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by GEMM
typename Operator_>
struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<8, 8, 4>, ElementA_,
layout::ColumnMajor, ElementB_, layout::ColumnMajor,
ElementC_, LayoutC_, arch::OpClassTensorOp, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<8, 8, 4>;
using ElementA = ElementA_;
using LayoutA = layout::ColumnMajor;
using ElementB = ElementB_;
using LayoutB = layout::ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassTensorOp;
/// Default Operator
using Operator = Operator_;
/// Number of warps present
using WarpCount = GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<
sizeof_bits<ElementA>::value>;
// Shared memory layout
using SmemLayoutB = layout::ColumnMajorVoltaTensorOpMultiplicandCrosswise<
sizeof_bits<ElementB>::value, Shape::kK>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>,
kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementA>::value
>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
ElementA,
SmemLayoutA,
1,
IteratorThreadMapA
>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>,
kThreads,
layout::PitchLinearShape<4, 8>,
kAccessSizeInBits / sizeof_bits<ElementB>::value
>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
ElementB,
SmemLayoutB,
1,
IteratorThreadMapB
>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using MmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
WarpShape,
ElementA,
SmemLayoutA,
ElementB,
SmemLayoutB,
ElementC,
LayoutC,
Policy
>;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaTensorOp,
MatrixShape<0, 0>,
MatrixShape<0, 0>,
WarpCount::kK
>;
};
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
| include/cutlass/gemm/threadblock/default_mma_core_sm70.h/0 | {
"file_path": "include/cutlass/gemm/threadblock/default_mma_core_sm70.h",
"repo_id": "include",
"token_count": 6940
} | 32 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a double-buffered threadblock-scoped GEMM kernel.
*/
#pragma once
#include "cutlass/aligned_buffer.h"
#include "cutlass/arch/memory.h"
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Policy object describing MmaTensorOp
template <
/// Warp-level GEMM operator (concept: gemm::warp::Mma)
typename Operator_,
/// Padding used for A operand in shared memory (concept: MatrixShape)
typename SmemPaddingA_,
/// Padding used for B operand in shared memory (concept: MatrixShape)
typename SmemPaddingB_,
/// Padding used for E operand in shared memory (concept: MatrixShape)
typename SmemPaddingE_,
/// Number of partitions of K dimension of GEMM
int PartitionsK = 1>
struct SparseMmaPolicy {
/// Warp-level GEMM operator (concept: gemm::warp::MmaTensorOp or gemm::warp::MmaSimt)
using Operator = Operator_;
/// Padding used for A operand in shared memory
using SmemPaddingA = SmemPaddingA_;
/// Padding used for B operand in shared memory
using SmemPaddingB = SmemPaddingB_;
/// Padding used for B operand in shared memory
using SmemPaddingE = SmemPaddingE_;
/// Number of partitions of K dimension
static int const kPartitionsK = PartitionsK;
};
////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// Number of stages,
int Stages,
/// Used for partial specialization
typename Enable = bool>
class SparseMmaBase {
public:
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
///< Policy describing tuning details
using Policy = Policy_;
//
// Dependent types
//
/// Warp-level Mma
using Operator = typename Policy::Operator;
/// Shape describing the overall GEMM computed from shared memory
/// by each warp.
using WarpGemm = typename Policy::Operator::Shape;
/// Shape describing the number of warps filling the CTA
using WarpCount = GemmShape<Shape::kM / WarpGemm::kM,
Shape::kN / WarpGemm::kN,
Shape::kK / WarpGemm::kK>;
/// Number of warp-level GEMM oeprations
static int const kWarpGemmIterations =
(WarpGemm::kK / Operator::Policy::MmaShape::kK);
static_assert(kWarpGemmIterations > 1,
"The pipelined structure requires at least two warp-level "
"GEMM operations.");
static_assert((kWarpGemmIterations % 2) == 0,
"Inner loop iteration must be an even number.");
/// Number of stages
static int const kStages = Stages;
static int const kSparse = Operator::kSparse;
static int const kElementsPerElementE = Operator::kElementsPerElementE;
/// Tensor reference to the A operand
using TensorRefA = TensorRef<typename Operator::ElementA, typename Operator::LayoutA>;
/// Tensor reference to the B operand
using TensorRefB = TensorRef<typename Operator::ElementB, typename Operator::LayoutB>;
/// Tensor reference to the E operand
using TensorRefE = TensorRef<typename Operator::ElementE, typename Operator::LayoutE>;
//
// Nested structs
//
/// Shared storage object needed by threadblock-scoped GEMM
class SharedStorage {
public:
//
// Type definitions
//
/// Shape of the A matrix operand in shared memory
using ShapeA = MatrixShape<Shape::kM + Policy::SmemPaddingA::kRow,
Shape::kK / kSparse * kStages +
Policy::SmemPaddingA::kColumn>;
/// Shape of the B matrix operand in shared memory
using ShapeB =
MatrixShape<Shape::kK * kStages + Policy::SmemPaddingB::kRow,
Shape::kN + Policy::SmemPaddingB::kColumn>;
/// Shape of the E matrix operand in shared memory
using ShapeE =
MatrixShape<Shape::kM * 2 + Policy::SmemPaddingE::kRow,
Shape::kK / kSparse / kElementsPerElementE / 2 * kStages +
Policy::SmemPaddingE::kColumn>;
public:
//
// Data members
//
/// Buffer for A operand
AlignedBuffer<typename Operator::ElementA, ShapeA::kCount> operand_A;
/// Buffer for B operand
AlignedBuffer<typename Operator::ElementB, ShapeB::kCount> operand_B;
/// Buffer for E operand
AlignedBuffer<typename Operator::ElementE, ShapeE::kCount> operand_E;
public:
//
// Methods
//
/// Returns a layout object for the A matrix
CUTLASS_DEVICE
static typename Operator::LayoutA LayoutA() {
return Operator::LayoutA::packed({ShapeA::kRow, ShapeA::kColumn});
}
/// Returns a layout object for the B matrix
CUTLASS_HOST_DEVICE
static typename Operator::LayoutB LayoutB() {
return Operator::LayoutB::packed({ShapeB::kRow, ShapeB::kColumn});
}
/// Returns a layout object for the E matrix
CUTLASS_HOST_DEVICE
static typename Operator::LayoutE LayoutE() {
return Operator::LayoutE::packed({ShapeE::kRow, ShapeE::kColumn});
}
/// Returns a TensorRef to the A operand
CUTLASS_HOST_DEVICE
TensorRefA operand_A_ref() {
return TensorRefA{operand_A.data(), LayoutA()};
}
/// Returns a TensorRef to the B operand
CUTLASS_HOST_DEVICE
TensorRefB operand_B_ref() {
return TensorRefB{operand_B.data(), LayoutB()};
}
/// Returns a TensorRef to the E operand
CUTLASS_HOST_DEVICE
TensorRefE operand_E_ref() {
return TensorRefE{operand_E.data(), LayoutE()};
}
};
protected:
//
// Data members
//
/// Iterator to load a warp-scoped tile of A operand from shared memory
typename Operator::IteratorA warp_tile_iterator_A_;
/// Iterator to load a warp-scoped tile of B operand from shared memory
typename Operator::IteratorB warp_tile_iterator_B_;
/// Iterator to load a warp-scoped tile of E operand from shared memory
typename Operator::IteratorE warp_tile_iterator_E_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
SparseMmaBase(
///< Shared storage needed for internal use by threadblock-scoped GEMM
SharedStorage &shared_storage,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx
):
warp_tile_iterator_A_(shared_storage.operand_A_ref(), lane_idx),
warp_tile_iterator_B_(shared_storage.operand_B_ref(), lane_idx),
warp_tile_iterator_E_(shared_storage.operand_E_ref(), lane_idx) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/threadblock/mma_sparse_base.h/0 | {
"file_path": "include/cutlass/gemm/threadblock/mma_sparse_base.h",
"repo_id": "include",
"token_count": 3072
} | 33 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing warp-level matrix multiply-accumulate operations targeting
Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/complex.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/arch/mma_sm75.h"
#include "cutlass/arch/mma_sm80.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/mma.h"
#include "cutlass/gemm/warp/mma_tensor_op_policy.h"
#include "cutlass/gemm/warp/mma_tensor_op.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h"
#include "cutlass/gemm/warp/mma_gaussian_complex_tensor_op_tile_iterator_sm80.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename RealElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Data type of B elements
typename RealElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Element type of C matrix
typename RealElementC,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
typename Policy_,
/// Complex transform on A operand
ComplexTransform TransformA = ComplexTransform::kNone,
/// Complex transform on B operand
ComplexTransform TransformB = ComplexTransform::kNone,
/// Do source operands need more than one elements
bool GeneralizedOperatorElements = false,
/// Used for partial specialization
typename Enable = bool
>
class MmaGaussianComplexTensorOp;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for complex*complex+complex => complex using real-valued TensorOps
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename RealElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Data type of B elements
typename RealElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Element type of C matrix
typename RealElementC,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
typename Policy_,
/// Complex transform on A operand
ComplexTransform TransformA,
/// Complex transform on B operand
ComplexTransform TransformB
>
class MmaGaussianComplexTensorOp<
Shape_,
complex<RealElementA>,
LayoutA_,
complex<RealElementB>,
LayoutB_,
complex<RealElementC>,
LayoutC_,
Policy_,
TransformA,
TransformB> {
public:
/// Shape of warp-level matrix operation (concept: GemmShape)
using Shape = Shape_;
/// Data type of multiplicand A
using ElementA = complex<RealElementA>;
/// Layout of multiplicand A
using LayoutA = LayoutA_;
/// Data type of multiplicand B
using ElementB = complex<RealElementB>;
/// Layout of multiplicand B
using LayoutB = LayoutB_;
/// Data type of accumulator matrix C
using ElementC = complex<RealElementC>;
/// Layout of accumulator matrix C
using LayoutC = LayoutC_;
/// Shape of the warp in units of thread (concept: MmaLanePolicySimt)
using Policy = Policy_;
/// Underlying matrix multiply operator (concept: arch::Mma)
using ArchMmaOperator = typename Policy::Operator;
/// Shape of underlying instruction
using InstructionShape = typename ArchMmaOperator::Shape;
/// Underlying arch tag
using ArchTag = typename ArchMmaOperator::ArchTag;
/// Indicates class of matrix operator
using OperatorClass = arch::OpClassTensorOp;
/// Indicates math operator
using MathOperator = arch::OpMultiplyAddGaussianComplex;
/// Complex transform on A operand
static ComplexTransform const kTransformA = TransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB = TransformB;
/// Number of threads participating in warp-level matrix product
static int const kThreadCount = 32;
public:
/// Iterates over the A operand in memory
using IteratorA = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
Operand::kA,
ElementA,
LayoutA,
MatrixShape<ArchMmaOperator::Shape::kM, ArchMmaOperator::Shape::kK>,
Policy::OpDelta::kRow,
32,
1
>;
/// Storage for A tile
using FragmentA = typename IteratorA::Fragment;
/// Storage for transformed A tile
using TransformedFragmentA = FragmentA;
/// Iterates over the B operand in memory
using IteratorB = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
Operand::kB,
ElementB,
LayoutB,
MatrixShape<ArchMmaOperator::Shape::kK, ArchMmaOperator::Shape::kN>,
Policy::OpDelta::kColumn,
32,
1
>;
/// Storage for B tile
using FragmentB = typename IteratorB::Fragment;
/// Storage for transformed B tile
using TransformedFragmentB = FragmentB;
static_assert(
!(Shape::kM % ArchMmaOperator::Shape::kM) &&
!(Shape::kN % ArchMmaOperator::Shape::kN),
"Shape of warp-level Mma must be divisible by operator shape.");
/// Number of mma operations performed
using MmaIterations = MatrixShape<
Shape::kM / ArchMmaOperator::Shape::kM,
Shape::kN / ArchMmaOperator::Shape::kN
>;
/// Iterates over the C operand in memory
using IteratorC = MmaTensorOpGaussianComplexAccumulatorTileIterator<
MatrixShape<Shape::kM, Shape::kN>,
ElementC,
LayoutC,
typename ArchMmaOperator::Shape,
typename Policy::OpDelta>;
/// Storage for C tile, the accumulator. Note, regardless of multiplicand type, this
/// storage arrangement is to be considered 'gaussian complex' in the sense that the accumulation is
/// done in three parts namely part1, part2, and part3. The parts 1, 2, and 3 are stored consecutively
/// in InteratorC::Frament. This matches the structure of Tensor Cores which are always real-valued matrix multiplies.
using FragmentC = typename IteratorC::Fragment;
static_assert(
FragmentC::kElements == 3 * MmaIterations::kCount * ArchMmaOperator::FragmentC::kElements,
"Unexpected gaussian complex fragment length.");
private:
//
// Data members
//
/// Underlying real-valued matrix multiply operator (concept: arch::Mma)
ArchMmaOperator mma;
public:
//
// Methods
//
/// Ctor
CUTLASS_DEVICE
MmaGaussianComplexTensorOp() {}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
FragmentA const &A,
FragmentB const &B,
FragmentC const &C
) const {
// Alias types for underlying real-valued matrix multiply operator
using MmaOperandA = typename ArchMmaOperator::FragmentA;
using MmaOperandB = typename ArchMmaOperator::FragmentB;
using MmaOperandC = typename ArchMmaOperator::FragmentC;
static_assert(MmaOperandA::kElements == 1,
"This implementation only supports math instructions in which exactly one element is needed for the A operand."
"We can geneneralize later.");
static_assert(MmaOperandB::kElements == 1,
"This implementation only supports math instructions in which exactly one element is needed for the B operand."
"We can geneneralize later.");
D = C;
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < MmaIterations::kRow; ++m) {
// mma(accum.part1(), (a.real() + a.imag()), b.real(), accum.part1());
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_Asum;
MmaOperandB operand_Br;
operand_Asum[0] = A[m].real() + ((kTransformA == ComplexTransform::kConjugate) ? -A[m].imag() : +A[m].imag());
operand_Br[0] = B[n].real();
// accumulator part1
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow);
mma(*accum, operand_Asum, operand_Br, *accum);
}
// mma(accum.part2(), -a.real(), (b.real() - b.imag()), accum.part2());
CUTLASS_PRAGMA_UNROLL
for (int n = MmaIterations::kColumn - 1; n >= 0; --n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_Ar;
MmaOperandB operand_Bdiff;
operand_Ar[0] = -A[m].real();
operand_Bdiff[0] = B[n].real() - ((kTransformB == ComplexTransform::kConjugate) ? -B[n].imag() : +B[n].imag());
// accumulator part2
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow) + MmaIterations::kCount;
mma(*accum, operand_Ar, operand_Bdiff, *accum);
}
// mma(accum.part3(), a.imag(), (b.real() + b.imag()), accum.part3())
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_Ai;
MmaOperandB operand_Bsum;
operand_Ai[0] = (kTransformA == ComplexTransform::kConjugate) ? -A[m].imag() : +A[m].imag();
operand_Bsum[0] = B[n].real() + ((kTransformB == ComplexTransform::kConjugate) ? -B[n].imag() : +B[n].imag());
// accumulator part3
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow) + 2 * MmaIterations::kCount;
mma(*accum, operand_Ai, operand_Bsum, *accum);
}
}
}
/// Transform the mma operands to the required types
CUTLASS_DEVICE
void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B,
FragmentA const &A, FragmentB const &B) const {
dst_A = A;
dst_B = B;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for complex*complex+complex => complex using real-valued TensorOps
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename RealElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Data type of B elements
typename RealElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Element type of C matrix
typename RealElementC,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
typename Policy_,
/// Complex transform on A operand
ComplexTransform TransformA,
/// Complex transform on B operand
ComplexTransform TransformB
>
class MmaGaussianComplexTensorOp<
Shape_,
complex<RealElementA>,
LayoutA_,
complex<RealElementB>,
LayoutB_,
complex<RealElementC>,
LayoutC_,
Policy_,
TransformA,
TransformB,
true> {
public:
/// Shape of warp-level matrix operation (concept: GemmShape)
using Shape = Shape_;
/// Data type of multiplicand A
using ElementA = complex<RealElementA>;
/// Layout of multiplicand A
using LayoutA = LayoutA_;
/// Data type of multiplicand B
using ElementB = complex<RealElementB>;
/// Layout of multiplicand B
using LayoutB = LayoutB_;
/// Data type of accumulator matrix C
using ElementC = complex<RealElementC>;
/// Layout of accumulator matrix C
using LayoutC = LayoutC_;
/// Shape of the warp in units of thread (concept: MmaLanePolicySimt)
using Policy = Policy_;
/// Underlying matrix multiply operator (concept: arch::Mma)
using ArchMmaOperator = typename Policy::Operator;
/// Shape of underlying instruction
using InstructionShape = typename ArchMmaOperator::Shape;
/// Underlying arch tag
using ArchTag = typename ArchMmaOperator::ArchTag;
/// Indicates class of matrix operator
using OperatorClass = arch::OpClassTensorOp;
/// Indicates math operator
using MathOperator = arch::OpMultiplyAddGaussianComplex;
/// Complex transform on A operand
static ComplexTransform const kTransformA = TransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB = TransformB;
/// Number of threads participating in warp-level matrix product
static int const kThreadCount = 32;
public:
/// Iterates over the A operand in memory
using IteratorA = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
Operand::kA,
ElementA,
LayoutA,
MatrixShape<ArchMmaOperator::Shape::kM, ArchMmaOperator::Shape::kK>,
Policy::OpDelta::kRow,
32,
1
>;
/// Storage for A tile
using FragmentA = typename IteratorA::Fragment;
/// Storage for transformed A tile
using TransformedFragmentA = FragmentA;
/// Iterates over the B operand in memory
using IteratorB = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
Operand::kB,
ElementB,
LayoutB,
MatrixShape<ArchMmaOperator::Shape::kK, ArchMmaOperator::Shape::kN>,
Policy::OpDelta::kColumn,
32,
1
>;
/// Storage for B tile
using FragmentB = typename IteratorB::Fragment;
/// Storage for transformed B tile
using TransformedFragmentB = FragmentB;
static_assert(
!(Shape::kM % ArchMmaOperator::Shape::kM) &&
!(Shape::kN % ArchMmaOperator::Shape::kN),
"Shape of warp-level Mma must be divisible by operator shape.");
/// Number of mma operations performed
using MmaIterations = MatrixShape<
Shape::kM / ArchMmaOperator::Shape::kM,
Shape::kN / ArchMmaOperator::Shape::kN
>;
/// Iterates over the C operand in memory
using IteratorC = MmaTensorOpGaussianComplexAccumulatorTileIterator<
MatrixShape<Shape::kM, Shape::kN>,
ElementC,
LayoutC,
typename ArchMmaOperator::Shape,
typename Policy::OpDelta>;
/// Storage for C tile, the accumulator. Note, regardless of multiplicand type, this
/// storage arrangement is to be considered 'gaussian complex' in the sense that the accumulation is
/// done in three parts namely part1, part2, and part3. The parts 1, 2, and 3 are stored consecutively
/// in InteratorC::Frament. This matches the structure of Tensor Cores which are always real-valued matrix multiplies.
using FragmentC = typename IteratorC::Fragment;
static_assert(
FragmentC::kElements == 3 * MmaIterations::kCount * ArchMmaOperator::FragmentC::kElements,
"Unexpected gaussian complex fragment length.");
private:
//
// Data members
//
/// Underlying real-valued matrix multiply operator (concept: arch::Mma)
ArchMmaOperator mma;
public:
//
// Methods
//
/// Ctor
CUTLASS_DEVICE
MmaGaussianComplexTensorOp() {}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
FragmentA const &A,
FragmentB const &B,
FragmentC const &C
) const {
// Alias types for underlying real-valued matrix multiply operator
using MmaOperandA = typename ArchMmaOperator::FragmentA;
using MmaOperandB = typename ArchMmaOperator::FragmentB;
using MmaOperandC = typename ArchMmaOperator::FragmentC;
D = C;
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < MmaIterations::kRow; ++m) {
// mma(accum.part1(), (a.real() + a.imag()), b.real(), accum.part1());
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_Asum;
MmaOperandB operand_Br;
CUTLASS_PRAGMA_UNROLL
for (int mk = 0; mk < MmaOperandA::kElements; ++mk)
operand_Asum[mk] = A[m*MmaOperandA::kElements + mk].real() + ((kTransformA == ComplexTransform::kConjugate) ?
-A[m*MmaOperandA::kElements + mk].imag() : +A[m*MmaOperandA::kElements + mk].imag());
CUTLASS_PRAGMA_UNROLL
for (int nk = 0; nk < MmaOperandB::kElements; ++nk)
operand_Br[nk] = B[n*MmaOperandB::kElements + nk].real();
// accumulator part1
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow);
mma(*accum, operand_Asum, operand_Br, *accum);
}
// mma(accum.part2(), -a.real(), (b.real() - b.imag()), accum.part2());
CUTLASS_PRAGMA_UNROLL
for (int n = MmaIterations::kColumn - 1; n >= 0; --n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_Ar;
MmaOperandB operand_Bdiff;
CUTLASS_PRAGMA_UNROLL
for (int mk = 0; mk < MmaOperandA::kElements; ++mk)
operand_Ar[mk] = -A[m*MmaOperandA::kElements + mk].real();
CUTLASS_PRAGMA_UNROLL
for (int nk = 0; nk < MmaOperandB::kElements; ++nk)
operand_Bdiff[nk] = B[n*MmaOperandB::kElements + nk].real() - ((kTransformB == ComplexTransform::kConjugate) ?
-B[n*MmaOperandB::kElements + nk].imag() : +B[n*MmaOperandB::kElements + nk].imag());
// accumulator part2
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow) + MmaIterations::kCount;
mma(*accum, operand_Ar, operand_Bdiff, *accum);
}
// mma(accum.part3(), a.imag(), (b.real() + b.imag()), accum.part3())
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_Ai;
MmaOperandB operand_Bsum;
CUTLASS_PRAGMA_UNROLL
for (int mk = 0; mk < MmaOperandA::kElements; ++mk)
operand_Ai[mk] = (kTransformA == ComplexTransform::kConjugate) ?
-A[m*MmaOperandA::kElements + mk].imag() : +A[m*MmaOperandA::kElements + mk].imag();
CUTLASS_PRAGMA_UNROLL
for (int nk = 0; nk < MmaOperandB::kElements; ++nk)
operand_Bsum[nk] = B[n*MmaOperandB::kElements + nk].real() + ((kTransformB == ComplexTransform::kConjugate) ?
-B[n*MmaOperandB::kElements + nk].imag() : +B[n*MmaOperandB::kElements + nk].imag());
// accumulator part3
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow) + 2 * MmaIterations::kCount;
mma(*accum, operand_Ai, operand_Bsum, *accum);
}
}
}
/// Transform the mma operands to the required types
CUTLASS_DEVICE
void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B,
FragmentA const &A, FragmentB const &B) const {
dst_A = A;
dst_B = B;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/warp/mma_gaussian_complex_tensor_op.h/0 | {
"file_path": "include/cutlass/gemm/warp/mma_gaussian_complex_tensor_op.h",
"repo_id": "include",
"token_count": 7585
} | 34 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines iterators used by warp-level matrix multiply operations targeting Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor_op_multiplicand_sm80.h"
#include "cutlass/platform/platform.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for loading 128b vectors of 64b elements.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: PitchLinearShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: PitchLinearShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::TensorOpMultiplicandCongruous64b,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
static_assert(!(Shape::kContiguous % 16) && !(Shape::kStrided % 4), "Divisibility.");
static_assert(sizeof_bits<Element_>::value == 64, "This is specialized for 64b accesses.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::TensorOpMultiplicandCongruous64b;
/// Shape of one matrix product operation (concept: GemmShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// Number of partitions along K dimension
static int const kPartitionsK = PartitionsK_;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Long Index type
using StrideIndex = typename TensorRef::Layout::Stride::Index;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Load two elements per access
static int const kElementsPerAccess = 2;
/// Policy defining internal details of tile iterator
struct Policy {
/// Shape of one access
using Delta = layout::PitchLinearShape<8, 4>;
/// Number of iterations to load
using Iterations = layout::PitchLinearShape<
Shape::kContiguous / kElementsPerAccess / Delta::kContiguous,
InstructionShape::kStrided / Delta::kStrided
>;
};
private:
/// Not working on this feature at the moment.
static_assert(kOpDelta == 1,
"Alternative arrangements not supported at present.");
/// Pointer type used for accesses
using AccessType = AlignedArray<Element, kElementsPerAccess, 16>;
/// Internal counter used to jump to next K partition
int k_group_idx_;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment =
Array<Element, Shape::kContiguous * InstructionShape::kStrided / kThreads>;
private:
/// Layout object storing stride values
StrideIndex stride_;
/// Shared memory base pointers - not advanced
AccessType const *pointer_;
/// Byte offset incremented as iterator advances
Index byte_offset_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(): stride_(0), byte_offset_(0) { }
/// Constructor from TensorRef
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
):
stride_(ref.stride(0) / kElementsPerAccess), byte_offset_(0),
k_group_idx_(0) {
int access_strided = lane_id / Policy::Delta::kContiguous;
int access_contiguous = (lane_id % Policy::Delta::kContiguous) ^ access_strided;
pointer_= reinterpret_cast<AccessType const *>(ref.data()) +
access_contiguous + access_strided * stride_;
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
byte_offset_ += offset * sizeof(Element);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
int offset =
(tile_offset.strided() * InstructionShape::kStrided) * stride_ * kElementsPerAccess +
tile_offset.contiguous() * Shape::kContiguous;
add_pointer_offset(offset);
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
add_tile_offset({0, 1});
return *this;
}
/// Advances the iterator along the opposite of the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator--() {
add_tile_offset({0, -1});
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_byte_offset(frag, 0);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset in units of bytes
Index byte_offset) const {
AccessType *fetch_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < Policy::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < Policy::Iterations::kContiguous; ++c) {
int access_idx = c + s * Policy::Iterations::kContiguous;
AccessType const *source_ptr = pointer_ +
Policy::Delta::kContiguous * c +
Policy::Delta::kStrided * s * stride_;
char const *source_byte_ptr = reinterpret_cast<char const *>(source_ptr) + byte_offset + byte_offset_;
AccessType const *source = reinterpret_cast<AccessType const *>(source_byte_ptr);
fetch_ptr[access_idx] = *source;
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
load_with_byte_offset(frag, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
load_with_byte_offset(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
Index pointer_offset =
tile_offset.contiguous() * Shape::kContiguous / Layout::kElementsPerAccess +
tile_offset.strided() * InstructionShape::kStrided * stride_;
byte_offset += sizeof(AccessType) * pointer_offset;
load_with_byte_offset(frag, byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
}
};
////////////////////////////////////////////////////////////////////////////////
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Underlying tile iterator implementation
using Base = MmaTensorOpMultiplicandTileIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, kOperand, Element,
layout::TensorOpMultiplicandCongruous64b,
layout::PitchLinearShape<InstructionShape::kColumn,
InstructionShape::kRow>,
kOpDelta, kThreads, PartitionsK_>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = typename Base::Fragment;
private:
/// Underlying tile iterator
Base iterator_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
): iterator_({ref.data(), ref.stride()}, lane_id) {
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
iterator_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
++iterator_;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator--() {
--iterator_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(PitchLinearCoord(tile_offset.column(), tile_offset.row()));
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-PitchLinearCoord(tile_offset.column(), tile_offset.row()));
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
iterator_.load(frag);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
iterator_.load_with_byte_offset(
frag,
{tile_offset.strided(), tile_offset.contiguous()},
byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
iterator_.set_kgroup_index(k_group);
}
};
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread TensorOps. It uses LDSM to load from shared
/// memory and therefore must be initialized with a TensorRef to shared memory.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Underlying tile iterator implementation
using Base = MmaTensorOpMultiplicandTileIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, kOperand, Element,
layout::TensorOpMultiplicandCongruous64b,
layout::PitchLinearShape<InstructionShape::kRow,
InstructionShape::kColumn>,
kOpDelta, kThreads, PartitionsK_>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = typename Base::Fragment;
private:
/// Underlying tile iterator
Base iterator_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
): iterator_({ref.data(), ref.stride()}, lane_id) {
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
iterator_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
++iterator_;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator--() {
--iterator_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(PitchLinearCoord(tile_offset.row(), tile_offset.column()));
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-PitchLinearCoord(tile_offset.row(), tile_offset.column()));
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
iterator_.load(frag);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
iterator_.load_with_byte_offset(
frag,
{tile_offset.contiguous(), tile_offset.strided()},
byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
iterator_.set_kgroup_index(k_group);
}
};
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for loading 128b vectors of 64b elements.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: PitchLinearShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: PitchLinearShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::TensorOpMultiplicand64bCrosswise,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
static_assert(!(Shape::kContiguous % 4) && !(Shape::kStrided % 16), "Divisibility.");
static_assert(sizeof_bits<Element_>::value == 64, "This is specialized for 64b accesses.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::TensorOpMultiplicand64bCrosswise;
/// Shape of one matrix product operation (concept: GemmShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// Number of partitions along K dimension
static int const kPartitionsK = PartitionsK_;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Long Index type
using StrideIndex = typename TensorRef::Layout::Stride::Index;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Load two elements per access
static int const kElementsPerAccess = 2;
/// Policy defining internal details of tile iterator
struct Policy {
/// Shape of one access
using Delta = layout::PitchLinearShape<4, 16>;
/// Number of iterations to load
using Iterations = layout::PitchLinearShape<
InstructionShape::kContiguous / Delta::kContiguous,
Shape::kStrided / Delta::kStrided
>;
};
private:
/// Not working on this feature at the moment.
static_assert(kOpDelta == 1,
"Alternative arrangements not supported at present.");
/// Pointer type used for accesses
using AccessType = AlignedArray<Element, kElementsPerAccess, 16>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment =
Array<Element, Shape::kStrided * InstructionShape::kContiguous / kThreads>;
private:
/// Layout object storing stride values
StrideIndex stride_;
/// Shared memory base pointers - not advanced
AccessType const *pointer_;
/// Byte offset incremented as iterator advances
Index byte_offset_;
/// Internal counter for tracking K-group
Index k_group_idx_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(): stride_(0), byte_offset_(0) { }
/// Constructor from TensorRef
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
):
stride_(ref.stride(0) / kElementsPerAccess), byte_offset_(0),
k_group_idx_(0) {
int access_strided = lane_id / 8;
int access_contiguous = (lane_id % 8);
byte_offset_ = (access_contiguous + access_strided * stride_) * sizeof(AccessType);
pointer_= reinterpret_cast<AccessType const *>(ref.data());
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
pointer_ += offset / kElementsPerAccess;
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
int offset = (tile_offset.contiguous() * InstructionShape::kContiguous) *
stride_ * kElementsPerAccess +
tile_offset.strided() * Shape::kStrided;
add_pointer_offset(offset);
int old_k_group_idx = k_group_idx_;
k_group_idx_ += tile_offset.contiguous();
if ((k_group_idx_ & 2) ^ (old_k_group_idx & 2)) {
byte_offset_ ^= 0x40;
}
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset_negative(TensorCoord const &tile_offset) {
// TODO: fix this if it becomes an issue during warp it reset
add_tile_offset(tile_offset);
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
pointer_ += stride_ * InstructionShape::kContiguous;
if (k_group_idx_ & 0x1) {
// xor ptr
byte_offset_ ^= 0x40;
}
++k_group_idx_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_byte_offset(frag, 0);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset in units of bytes
Index byte_offset) const {
AccessType *fetch_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < Policy::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < Policy::Iterations::kStrided; ++s) {
int access_idx = c + s * Policy::Iterations::kContiguous;
AccessType const *source_ptr = pointer_ +
Policy::Delta::kContiguous * c * stride_ +
Policy::Delta::kStrided * s / kElementsPerAccess;
char const *source_byte_ptr = reinterpret_cast<char const *>(source_ptr) + byte_offset + byte_offset_;
AccessType const *source = reinterpret_cast<AccessType const *>(source_byte_ptr);
fetch_ptr[access_idx] = *source;
}
}
Element *exchange_ptr = reinterpret_cast<Element *>(&frag);
if (k_group_idx_ & 1) {
// exchange on 64b granularity
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Fragment::kElements; i += 2) {
Element tmp = exchange_ptr[i];
exchange_ptr[i] = exchange_ptr[i + 1];
exchange_ptr[i + 1] = tmp;
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
load_with_byte_offset(frag, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
load_with_byte_offset(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
Index pointer_offset = tile_offset.contiguous() *
InstructionShape::kContiguous /
Layout::kElementsPerAccess +
tile_offset.strided() * Shape::kStrided * stride_;
byte_offset += sizeof(AccessType) * pointer_offset;
load_with_byte_offset(frag, byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
k_group_idx_ = k_group;
}
};
////////////////////////////////////////////////////////////////////////////////
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::RowMajorTensorOpMultiplicand64bCrosswise,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::RowMajorTensorOpMultiplicand64bCrosswise;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Underlying tile iterator implementation
using Base = MmaTensorOpMultiplicandTileIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, kOperand, Element,
layout::TensorOpMultiplicand64bCrosswise,
layout::PitchLinearShape<InstructionShape::kColumn,
InstructionShape::kRow>,
kOpDelta, kThreads, PartitionsK_>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = typename Base::Fragment;
private:
/// Underlying tile iterator
Base iterator_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
): iterator_({ref.data(), ref.stride()}, lane_id) {
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
iterator_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset_negative(TensorCoord const &tile_offset) {
iterator_.add_tile_offset_negative({tile_offset.column(), tile_offset.row()});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
++iterator_;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator--() {
--iterator_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(PitchLinearCoord(tile_offset.column(), tile_offset.row()));
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-PitchLinearCoord(tile_offset.column(), tile_offset.row()));
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
iterator_.load(frag);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
iterator_.load_with_byte_offset(
frag,
{tile_offset.strided(), tile_offset.contiguous()},
byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
iterator_.set_kgroup_index(k_group);
}
};
////////////////////////////////////////////////////////////////////////////////
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::ColumnMajorTensorOpMultiplicand64bCrosswise,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::ColumnMajorTensorOpMultiplicand64bCrosswise;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Underlying tile iterator implementation
using Base = MmaTensorOpMultiplicandTileIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, kOperand, Element,
layout::TensorOpMultiplicand64bCrosswise,
layout::PitchLinearShape<InstructionShape::kRow,
InstructionShape::kColumn>,
kOpDelta, kThreads, PartitionsK_>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = typename Base::Fragment;
private:
/// Underlying tile iterator
Base iterator_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
): iterator_({ref.data(), ref.stride()}, lane_id) {
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
iterator_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset_negative(TensorCoord const &tile_offset) {
iterator_.add_tile_offset_negative({tile_offset.row(), tile_offset.column()});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
++iterator_;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator--() {
--iterator_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(PitchLinearCoord(tile_offset.row(), tile_offset.column()));
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-PitchLinearCoord(tile_offset.row(), tile_offset.column()));
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
iterator_.load(frag);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
iterator_.load_with_byte_offset(
frag,
{tile_offset.contiguous(), tile_offset.strided()},
byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
iterator_.set_kgroup_index(k_group);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for canonical matrix layouts
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Operand identity
Operand Operand_,
/// Data type of A elements
typename Element_,
/// Layout of operand
typename Layout_,
/// Shape of one matrix production operation (concept: MatrixShape)
typename InstructionShape_,
/// Delta between *MMA operations (in units of *MMA operations, concept:
/// MatrixShape)
int OpDelta_,
/// Number of threads participating in one matrix operation
int Threads = 32,
/// Number of partitions along K dimension
int PartitionsK_ = 1>
class MmaTensorOpMultiplicandTileIteratorCanonical {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
/// Basic check
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = Layout_;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Number of elements accessed per Shared Memory load
static int const kElementsPerAccess =
(sizeof_bits<Element>::value >= 32 ? 1 : 32 / sizeof_bits<Element>::value);
private:
static int const kWarpShapeOuter =
(kOperand == Operand::kA ? Shape::kRow : Shape::kColumn);
static int const kWarpShapeInner =
(kOperand == Operand::kA ? Shape::kColumn : Shape::kRow);
/// Rounded up instruction counts
using InstructionCount = MatrixShape<
Shape::kRow / InstructionShape::kRow,
Shape::kColumn / InstructionShape::kColumn
>;
/// Rounded up tile dimensions
using WarpShapeDivisible = MatrixShape<
InstructionCount::kRow * InstructionShape::kRow,
InstructionCount::kColumn * InstructionShape::kColumn
>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = Array<
Element,
WarpShapeDivisible::kRow * WarpShapeDivisible::kColumn / kThreads
>;
/// Memory access type
using AccessType = AlignedArray<Element, kElementsPerAccess>;
private:
/// Underlying tensor reference
TensorRef ref_;
/// Extent of tensor
MatrixCoord extent_;
/// Origin
MatrixCoord origin_;
/// Used to conditionally enable extents checking
bool divisible_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIteratorCanonical(): divisible_(true) { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIteratorCanonical(
TensorRef const &ref,
int lane_id
): ref_(ref), extent_(Shape::kRow, Shape::kColumn), divisible_(true) {
if (kOperand == Operand::kA) {
origin_ = MatrixCoord(lane_id / 4, (lane_id % 4) * kElementsPerAccess);
}
else {
origin_ = MatrixCoord((lane_id % 4) * kElementsPerAccess, lane_id / 4);
}
ref_.add_coord_offset(origin_);
}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIteratorCanonical(
TensorRef const &ref,
TensorCoord extent,
int lane_id
): ref_(ref), extent_(extent), divisible_(false) {
if (kOperand == Operand::kA) {
origin_ = MatrixCoord(lane_id / 4, (lane_id % 4) * kElementsPerAccess);
}
else {
origin_ = MatrixCoord((lane_id % 4) * kElementsPerAccess, lane_id / 4);
}
ref_.add_coord_offset(origin_);
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIteratorCanonical &add_pointer_offset(LongIndex offset) {
ref_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIteratorCanonical &add_tile_offset(TensorCoord const &tile_offset) {
TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn);
origin_ += coord_offset;
ref_.add_coord_offset(coord_offset);
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIteratorCanonical & operator++() {
if (kOperand == Operand::kA) {
add_tile_offset({0, 1});
}
else {
add_tile_offset({1, 0});
}
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIteratorCanonical & operator--() {
if (kOperand == Operand::kA) {
add_tile_offset({0, -1});
}
else {
add_tile_offset({-1, 0});
}
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIteratorCanonical & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIteratorCanonical & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
int const kWarpShapeDivisibleInner =
(kOperand == Operand::kA ? WarpShapeDivisible::kColumn : WarpShapeDivisible::kRow);
// Take advantage of Tensor Op's 8 x 4T access pattern
int const kAccessesInner = (kWarpShapeDivisibleInner / kElementsPerAccess) / 4;
AccessType *access_ptr = reinterpret_cast<AccessType *>(&frag);
if (kOperand == Operand::kA) {
int const kTilesPerInstruction = InstructionShape::kRow / 8;
CUTLASS_PRAGMA_UNROLL
for (int inst_m_idx = 0; inst_m_idx < InstructionCount::kRow; ++inst_m_idx) {
CUTLASS_PRAGMA_UNROLL
for (int inner_idx = 0; inner_idx < kAccessesInner; ++inner_idx) {
CUTLASS_PRAGMA_UNROLL
for (int access_m_idx = 0; access_m_idx < kTilesPerInstruction; ++access_m_idx) {
int access_idx =
access_m_idx + kTilesPerInstruction * (inner_idx + kAccessesInner * inst_m_idx);
MatrixCoord offset(
access_m_idx * 8 + inst_m_idx * InstructionShape::kRow,
inner_idx * 4 * kElementsPerAccess);
MatrixCoord access_coord = origin_ + offset;
if (divisible_ ||
(access_coord.row() < extent_.row() && access_coord.column() < extent_.column())) {
access_ptr[access_idx] = *reinterpret_cast<AccessType const *>(
ref_.data() + ref_.offset(offset));
}
else {
AccessType zero;
zero.clear();
access_ptr[access_idx] = zero;
}
}
}
}
}
else {
CUTLASS_PRAGMA_UNROLL
for (int inst_n_idx = 0; inst_n_idx < InstructionCount::kColumn; ++inst_n_idx) {
CUTLASS_PRAGMA_UNROLL
for (int inner_idx = 0; inner_idx < kAccessesInner; ++inner_idx) {
int access_idx = inner_idx + kAccessesInner * inst_n_idx;
MatrixCoord offset(
inner_idx * 4 * kElementsPerAccess,
inst_n_idx * 8);
MatrixCoord access_coord = origin_ + offset;
if (divisible_ ||
(access_coord.row() < extent_.row() && access_coord.column() < extent_.column())) {
access_ptr[access_idx] = *reinterpret_cast<AccessType const *>(
ref_.data() + ref_.offset(offset));
}
else {
AccessType zero;
zero.clear();
access_ptr[access_idx] = zero;
}
}
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
load_with_pointer_offset(frag, byte_offset * 8 / sizeof_bits<Element>::value);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn);
load_with_pointer_offset(frag, ref_.offset(coord_offset));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn);
load_with_pointer_offset(frag, ref_.offset(coord_offset) + pointer_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn);
load_with_pointer_offset(frag, ref_.offset(coord_offset) + byte_offset * 8 / sizeof_bits<Element>::value);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
// no operation
}
};
/// Wrapper for ColumnMajor
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::ColumnMajor,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::ColumnMajor;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Underlying tile iterator implementation
using Base = MmaTensorOpMultiplicandTileIteratorCanonical<
Shape, kOperand, Element,
layout::ColumnMajor,
InstructionShape,
kOpDelta, kThreads, PartitionsK_>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = typename Base::Fragment;
private:
/// Underlying tile iterator
Base iterator_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
): iterator_({ref.data(), ref.stride()}, lane_id) {
}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
TensorCoord const & extent,
int lane_id
): iterator_({ref.data(), ref.stride()}, extent, lane_id) {
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
iterator_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
++iterator_;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator--() {
--iterator_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(PitchLinearCoord(tile_offset.row(), tile_offset.column()));
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-PitchLinearCoord(tile_offset.row(), tile_offset.column()));
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
iterator_.load(frag);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
iterator_.load_with_byte_offset(
frag,
{tile_offset.contiguous(), tile_offset.strided()},
byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
iterator_.set_kgroup_index(k_group);
}
};
/// Wrapper for RowMajor
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::RowMajor,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::RowMajor;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Underlying tile iterator implementation
using Base = MmaTensorOpMultiplicandTileIteratorCanonical<
Shape, kOperand, Element,
layout::RowMajor,
InstructionShape,
kOpDelta, kThreads, PartitionsK_>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = typename Base::Fragment;
private:
/// Underlying tile iterator
Base iterator_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
): iterator_({ref.data(), ref.stride()}, lane_id) {
}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
TensorCoord const &extent,
int lane_id
): iterator_({ref.data(), ref.stride()}, extent, lane_id) {
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
iterator_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
++iterator_;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator--() {
--iterator_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(PitchLinearCoord(tile_offset.row(), tile_offset.column()));
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-PitchLinearCoord(tile_offset.row(), tile_offset.column()));
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
iterator_.load(frag);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
iterator_.load_with_byte_offset(
frag,
{tile_offset.contiguous(), tile_offset.strided()},
byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
iterator_.set_kgroup_index(k_group);
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h/0 | {
"file_path": "include/cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h",
"repo_id": "include",
"token_count": 25010
} | 35 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines layout functions used by TensorRef and derived classes.
Layout functions map logical coordinates to linear memory. They often require additional
data to describe strides between elements.
Layout functions must implement all members in the public interface of IdentityTensorLayout<>
defined in cutlass/tensor_ref.h.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/pitch_linear_coord.h"
namespace cutlass {
namespace layout {
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Defines data layouts of various matrix formats usable by TensorRef and other classes.
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Mapping function for row-major matrices.
class RowMajor {
public:
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, LongIndex>;
private:
//
// Data members
//
/// Stride data member
Stride stride_;
public:
//
// Methods
//
/// Constructor
CUTLASS_HOST_DEVICE
RowMajor(LongIndex ldm = 0): stride_(ldm) { }
/// Ctor
CUTLASS_HOST_DEVICE
RowMajor(Stride stride): stride_(stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static RowMajor packed(MatrixCoord const &extent) {
return RowMajor(extent.column());
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (row, column)
CUTLASS_HOST_DEVICE
LongIndex operator()(MatrixCoord const &coord) const {
return LongIndex(coord.row()) * LongIndex(stride_[0]) + coord.column();
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
MatrixCoord inverse(LongIndex offset) const {
return MatrixCoord(Index(offset / stride_[0]), Index(offset % stride_[0]));
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index stride(int idx) const {
return stride_[idx];
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index & stride(int idx) {
return stride_[idx];
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(MatrixCoord const &extent) const {
return LongIndex(extent.row()) * LongIndex(stride_[0]);
}
};
/// Mapping function for column-major matrices.
class ColumnMajor {
public:
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, LongIndex>;
private:
//
// Data members
//
/// Stride data member
Stride stride_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
ColumnMajor(LongIndex ldm = 0): stride_(ldm) { }
/// Ctor
CUTLASS_HOST_DEVICE
ColumnMajor(Stride stride): stride_(stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static ColumnMajor packed(MatrixCoord const &extent) {
return ColumnMajor(extent.row());
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (row, column)
CUTLASS_HOST_DEVICE
LongIndex operator()(MatrixCoord const &coord) const {
return LongIndex(coord.column()) * LongIndex(stride_[0]) + coord.row();
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
MatrixCoord inverse(LongIndex offset) const {
return MatrixCoord(Index(offset % stride_[0]), Index(offset / stride_[0]));
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index stride(int idx) const {
return stride_[idx];
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index & stride(int idx) {
return stride_[idx];
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(MatrixCoord const &extent) const {
return LongIndex(extent.column()) * LongIndex(stride_[0]);
}
};
/// Mapping function for interleaved matrices. Matrix is structured
/// as row-major arrangement of fixed-size columns.
template <int Interleave>
struct RowMajorInterleaved {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, LongIndex>;
/// Size of interleaved columns
static int const kInterleave = Interleave;
private:
//
// Data members
//
/// Stride data member
Stride stride_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
RowMajorInterleaved(LongIndex ldm = 0): stride_(ldm) { }
/// Ctor
CUTLASS_HOST_DEVICE
RowMajorInterleaved(Stride stride): stride_(stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static RowMajorInterleaved packed(MatrixCoord const &extent) {
return RowMajorInterleaved(extent.column() * kInterleave);
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (row, column)
CUTLASS_HOST_DEVICE
LongIndex operator()(MatrixCoord const &coord) const {
Index row_major = coord.row() / kInterleave;
Index row_minor = coord.row() % kInterleave;
return LongIndex(row_major) * LongIndex(stride_[0]) + LongIndex(coord.column()) * kInterleave + row_minor;
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
MatrixCoord inverse(LongIndex offset) const {
Index row_major = Index(offset / stride_[0]);
Index residual = Index(offset % stride_[0]);
Index column = residual / kInterleave;
Index row_minor = residual % kInterleave;
return MatrixCoord(row_major * kInterleave + row_minor, column);
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index stride(int idx) const {
return stride_[idx];
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index & stride(int idx) {
return stride_[idx];
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(MatrixCoord const &extent) const {
return (extent.row() + kInterleave - 1) / kInterleave * stride_[0];
}
};
/// Mapping function for interleaved matrices. Matrix is structured
/// as column-major arrangement of fixed-size rows.
template <int Interleave>
struct ColumnMajorInterleaved {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, LongIndex>;
/// Size of interleaved columns
static int const kInterleave = Interleave;
private:
//
// Data members
//
/// Stride data member
Stride stride_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
ColumnMajorInterleaved(LongIndex ldm = 0): stride_(ldm) { }
/// Ctor
CUTLASS_HOST_DEVICE
ColumnMajorInterleaved(Stride stride): stride_(stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static ColumnMajorInterleaved packed(MatrixCoord const &extent) {
return ColumnMajorInterleaved(extent.row() * kInterleave);
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (row, column)
CUTLASS_HOST_DEVICE
LongIndex operator()(MatrixCoord const &coord) const {
Index column_major = coord.column() / kInterleave;
Index column_minor = coord.column() % kInterleave;
return LongIndex(column_major) * LongIndex(stride_[0]) + LongIndex(coord.row()) * kInterleave + column_minor;
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
MatrixCoord inverse(LongIndex offset) const {
Index column_major = Index(offset / stride_[0]);
Index residual = Index(offset % stride_[0]);
Index row = residual / kInterleave;
Index column_minor = residual % kInterleave;
return MatrixCoord(row, column_major * kInterleave + column_minor);
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index stride(int idx) const {
return stride_[idx];
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index & stride(int idx) {
return stride_[idx];
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(MatrixCoord const &extent) const {
return (extent.column() + kInterleave - 1) / kInterleave * stride_[0];
}
};
/// Enumerated type for canonical pitch-linear matrix layouts
enum class Matrix {
kColumnMajor, ///< leading dimension refers to stride between columns; stride along rows is 1
kRowMajor ///< leading dimension refers to stride between rows; stride along columns is 1
};
/// Mapping function for scenario in which layout is row-major or column-major but this information
/// is only available at runtime.
struct ContiguousMatrix {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, LongIndex>;
private:
//
// Data members
//
/// Stride data member
Stride stride_;
/// Enumerated type indicating canonical matrix layout
Matrix layout_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
ContiguousMatrix(
Index ldm = 0,
Matrix layout = Matrix::kColumnMajor
):
stride_(ldm), layout_(layout) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static ContiguousMatrix packed(
MatrixCoord const &extent,
Matrix layout = Matrix::kColumnMajor) {
Index ldm = 0;
if (layout == Matrix::kColumnMajor) {
ldm = extent.row();
}
else if (layout == Matrix::kRowMajor) {
ldm = extent.column();
}
return ContiguousMatrix(ldm, layout);
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (row, column)
CUTLASS_HOST_DEVICE
LongIndex operator()(MatrixCoord const &coord) const {
if (layout_ == Matrix::kColumnMajor) {
return coord.row() + coord.column() * stride_[0];
}
else if (layout_ == Matrix::kRowMajor) {
return coord.row() * stride_[0] + coord.column();
}
else {
// degenerate case
return 0;
}
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
MatrixCoord inverse(LongIndex offset) const {
CUTLASS_UNUSED(offset);
return MatrixCoord(0, 0);
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index stride(int idx) const {
return stride_[idx];
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index & stride(int idx) {
return stride_[idx];
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(MatrixCoord const &extent) const {
if (layout_ == Matrix::kColumnMajor) {
return stride_[0] * extent.column();
}
else if (layout_ == Matrix::kRowMajor) {
return stride_[0] * extent.row();
}
else {
// degenerate case
return 0;
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Mapping function for scenario in which both rows and columns are separated by a stride.
template <int Rank>
struct AffineRankN {
/// Logical rank of tensor
static int const kRank = Rank;
/// Rank of stride vector
static int const kStrideRank = kRank;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = Coord<kRank, Index>;
/// Stride vector
using Stride = Coord<kStrideRank, LongIndex>;
private:
//
// Data members
//
/// Stride data member
Stride stride_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
AffineRankN(
Stride const &stride = Stride()
):
stride_(stride) { }
/// Ctor
CUTLASS_HOST_DEVICE
AffineRankN(
Coord<kRank/2, LongIndex> const &stride_m,
Coord<kRank/2, LongIndex> const &stride_n
) {
// Concatenate the strides
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kRank/2; ++m) {
stride_[m] = stride_m[m];
}
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kRank/2; ++n) {
stride_[n + kRank/2] = stride_n[n];
}
}
/// Ctor for N = 2
CUTLASS_HOST_DEVICE
AffineRankN(
LongIndex const &stride_m,
LongIndex const &stride_n
) {
stride_[0] = stride_m;
stride_[1] = stride_n;
}
/// Ctor for N = 2
CUTLASS_HOST_DEVICE
AffineRankN(
LongIndex const &stride
) {
stride_[0] = stride;
stride_[1] = 1;
}
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static AffineRankN packed(TensorCoord const &extent) {
AffineRankN layout;
layout.stride_[kRank - 1] = 1;
CUTLASS_PRAGMA_UNROLL
for (int i = kRank - 1; i > 0; --i) {
layout.stride_[i - 1] = layout.stride_[i] * extent[i];
}
return layout;
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (row, column)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return dot(coord, stride_);
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
TensorCoord inverse(LongIndex offset) const {
return TensorCoord();
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index stride(int idx) const {
return stride_[idx];
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index & stride(int idx) {
return stride_[idx];
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
int idx = stride_.max_dim_index();
return extent[idx] * stride_[idx];
}
};
/// Mapping function for scenario in which both rows and columns are separated by a stride.
/// Row stride is smaller than column stride in AffineRank2ColumnMajor.
struct AffineRank2ColumnMajor {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 2;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, LongIndex>;
private:
//
// Data members
//
/// Stride data member
Stride stride_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
AffineRank2ColumnMajor(
Stride const &stride = Stride()
):
stride_(stride) { }
/// Ctor
CUTLASS_HOST_DEVICE
AffineRank2ColumnMajor(
LongIndex row_stride, ///< stride between elements in consecutive rows
LongIndex column_stride ///< stride between elements in consecutive columns
)
{ stride_[0] = row_stride; stride_[1] = column_stride;}
/// Ctor
CUTLASS_HOST_DEVICE
AffineRank2ColumnMajor(
LongIndex stride
)
{ stride_[0] = 1; stride_[1] = stride;}
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static AffineRank2ColumnMajor packed(MatrixCoord const &extent) {
return AffineRank2ColumnMajor(1, extent.row());
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (row, column)
CUTLASS_HOST_DEVICE
LongIndex operator()(MatrixCoord const &coord) const {
return dot(coord, stride_);
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
MatrixCoord inverse(LongIndex offset) const {
CUTLASS_UNUSED(offset);
return MatrixCoord(0, 0);
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index stride(int idx) const {
return stride_[idx];
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index & stride(int idx) {
return stride_[idx];
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(MatrixCoord const &extent) const {
return extent.column() * stride_[1];
}
};
/// Mapping function for scenario in which both rows and columns are separated by a stride.
/// Column stride is smaller than row stride in AffineRank2RowMajor.
struct AffineRank2RowMajor {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 2;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, LongIndex>;
private:
//
// Data members
//
/// Stride data member
Stride stride_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
AffineRank2RowMajor(
Stride const &stride = Stride()
):
stride_(stride) { }
/// Ctor
CUTLASS_HOST_DEVICE
AffineRank2RowMajor(
LongIndex row_stride, ///< stride between elements in consecutive rows
LongIndex column_stride ///< stride between elements in consecutive columns
) { stride_[0] = row_stride; stride_[1] = column_stride;}
/// Ctor
CUTLASS_HOST_DEVICE
AffineRank2RowMajor(
LongIndex stride
) { stride_[0] = stride; stride_[1] = 1;}
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static AffineRank2RowMajor packed(MatrixCoord const &extent) {
return AffineRank2RowMajor(1, extent.row());
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (row, column)
CUTLASS_HOST_DEVICE
LongIndex operator()(MatrixCoord const &coord) const {
return dot(coord, stride_);
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
MatrixCoord inverse(LongIndex offset) const {
CUTLASS_UNUSED(offset);
return MatrixCoord(0, 0);
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index stride(int idx) const {
return stride_[idx];
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index & stride(int idx) {
return stride_[idx];
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(MatrixCoord const &extent) const {
return extent.row() * stride_[0];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Utility functions to convert stride_factor to the strides used by the Affine2 layout.
//
// stride_factor is the logical distance between two coorinates.
//
// All Coodinates used here are matrix coordinates. stride[0] and extent[0] are for the
// rows. stride[1] and extent[1] are for the columns.
template <typename Affine2Layout>
struct Affine2Layout_Factory {
CUTLASS_HOST_DEVICE
static Affine2Layout layout_factory(cutlass::Coord<2> const &extent, typename Affine2Layout::Stride stride_factor) {
return Affine2Layout::packed(extent);
}
};
template <>
struct Affine2Layout_Factory<cutlass::layout::AffineRank2ColumnMajor> {
CUTLASS_HOST_DEVICE
static cutlass::layout::AffineRank2ColumnMajor layout_factory(
cutlass::Coord<2> const &extent,
typename cutlass::layout::AffineRank2ColumnMajor::Stride stride_factor) {
return cutlass::layout::AffineRank2ColumnMajor({ stride_factor[0], stride_factor[0] * stride_factor[1] * extent[0] });
}
};
template <>
struct Affine2Layout_Factory<cutlass::layout::AffineRank2RowMajor> {
CUTLASS_HOST_DEVICE
static cutlass::layout::AffineRank2RowMajor layout_factory(
cutlass::Coord<2> const &extent,
typename cutlass::layout::AffineRank2RowMajor::Stride stride_factor) {
return cutlass::layout::AffineRank2RowMajor({ stride_factor[0] * stride_factor[1] * extent[1], stride_factor[1] });
}
};
// The base layout cutlass::layout::AffineRankN<2> is similar to AffineRank2ColumnMajor
template <>
struct Affine2Layout_Factory<cutlass::layout::AffineRankN<2>> {
CUTLASS_HOST_DEVICE
static cutlass::layout::AffineRankN<2> layout_factory(
cutlass::Coord<2> const &extent,
typename cutlass::layout::AffineRankN<2>::Stride stride_factor) {
return cutlass::layout::AffineRankN<2>({ stride_factor[0], stride_factor[0] * stride_factor[1] * extent[0] });
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Mapping function for block-linear matrices. Matrix is structured
/// as column-major arrangement of 2D tiles (that are column-major).
template <int BlockRows, int BlockColumns>
struct ColumnMajorBlockLinear {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, LongIndex>;
/// Size of a block in rows
static int const kBlockRows = BlockRows;
/// Size of a block in columns
static int const kBlockColumns = BlockColumns;
private:
//
// Data members
//
/// Stride data member
Stride stride_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
ColumnMajorBlockLinear(Index ldm = 0): stride_(ldm) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static ColumnMajorBlockLinear packed(MatrixCoord const &extent) {
return ColumnMajorBlockLinear(extent.row() * kBlockRows * kBlockColumns);
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (row, column)
CUTLASS_HOST_DEVICE
LongIndex operator()(MatrixCoord const &coord) const {
return
(coord.row() % kBlockRows) +
(coord.column() % kBlockColumns) * kBlockRows +
(coord.row() / kBlockRows) * kBlockRows * kBlockColumns +
(coord.column() / kBlockColumns) * stride_[0];
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
MatrixCoord inverse(LongIndex offset) const {
return MatrixCoord(0, 0);
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index stride(int idx) const {
return stride_[idx];
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index & stride(int idx) {
return stride_[idx];
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(MatrixCoord const &extent) const {
return (extent.column() + kBlockColumns - 1) / kBlockColumns * stride_[0];
}
};
/// Mapping function for block-linear matrices. Matrix is structured
/// as row-major arrangement of 2D tiles (that are row-major)
template <int BlockRows, int BlockColumns>
struct RowMajorBlockLinear {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, LongIndex>;
/// Size of a block in rows
static int const kBlockRows = BlockRows;
/// Size of a block in columns
static int const kBlockColumns = BlockColumns;
private:
//
// Data members
//
/// Stride data member
Stride stride_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
RowMajorBlockLinear(Index ldm = 0): stride_(ldm) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static RowMajorBlockLinear packed(MatrixCoord const &extent) {
return RowMajorBlockLinear(extent.column() * kBlockRows * kBlockColumns);
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (row, column)
CUTLASS_HOST_DEVICE
LongIndex operator()(MatrixCoord const &coord) const {
return
(coord.column() % kBlockColumns) +
(coord.row() % kBlockRows) * kBlockColumns +
(coord.column() / kBlockColumns) * kBlockRows * kBlockColumns +
(coord.row() / kBlockRows) * stride_[0];
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
MatrixCoord inverse(LongIndex offset) const {
return MatrixCoord(0, 0);
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index stride(int idx) const {
return stride_[idx];
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index & stride(int idx) {
return stride_[idx];
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(MatrixCoord const &extent) const {
return (extent.row() + kBlockRows - 1) / kBlockRows * stride_[0];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
struct GeneralMatrix {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 2;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index>;
private:
//
// Data members
//
Matrix layout_id_;
/// Stride data member
Stride stride_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
GeneralMatrix(): layout_id_(Matrix::kColumnMajor), stride_(make_Coord(0, 1)) { }
/// Ctor
CUTLASS_HOST_DEVICE
GeneralMatrix(
Matrix layout_id,
Index ldm,
Index interleave): layout_id_(layout_id), stride_(make_Coord(ldm, interleave)) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static GeneralMatrix packed(
MatrixCoord const &extent,
Matrix layout_id = Matrix::kColumnMajor,
Index interleave = 1) {
Index c;
if (layout_id == Matrix::kRowMajor) {
c = extent.column();
}
else {
c = extent.row();
}
Index ldm = c * interleave;
return GeneralMatrix(layout_id, ldm, interleave);
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (row, column)
CUTLASS_HOST_DEVICE
LongIndex operator()(MatrixCoord const &coord) const {
Index c, s;
if (layout_id_ == Matrix::kRowMajor) {
c = coord.column();
s = coord.row();
}
else {
s = coord.column();
c = coord.row();
}
Index v = s / stride_[1];
Index residual = (s % stride_[1]);
return LongIndex(c) * LongIndex(stride_[1]) + LongIndex(v) * LongIndex(stride_[0]) + residual;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
CUTLASS_HOST_DEVICE
Matrix layout_id() const {
return layout_id_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
CUTLASS_HOST_DEVICE
Matrix & layout_id() {
return layout_id_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index stride(int idx) const {
return stride_[idx];
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index & stride(int idx) {
return stride_[idx];
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(MatrixCoord const &extent) const {
Index s;
if (layout_id_ == Matrix::kRowMajor) {
s = extent.row();
}
else {
s = extent.column();
}
Index v = Index((s + stride_[1] - 1) / stride_[1]);
return LongIndex(v) * LongIndex(stride_[0]);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines transposes of matrix layouts
template <typename Layout>
struct LayoutTranspose;
/// Transpose of row-major is column-major
template <>
struct LayoutTranspose<layout::RowMajor> {
using type = layout::ColumnMajor;
};
/// Transpose of column-major is row-major
template <>
struct LayoutTranspose<layout::ColumnMajor> {
using type = layout::RowMajor;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace layout
} // namespace cutlass
| include/cutlass/layout/matrix.h/0 | {
"file_path": "include/cutlass/layout/matrix.h",
"repo_id": "include",
"token_count": 11541
} | 36 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines layout functions used by TensorRef and derived classes for pitch-linear memory.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/coord.h"
namespace cutlass {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template defining a shape used by pitch-linear operators
template <
int Contiguous,
int Strided
>
struct PitchLinearShape {
static int const kContiguous = Contiguous;
static int const kStrided = Strided;
static int const kCount = Contiguous * Strided;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Coordinate in pitch-linear space
struct PitchLinearCoord : public Coord<2, int> {
public:
/// Integer-valued index
using Index = int;
/// Base type is a Coord of rank=2
using Base = Coord<2, Index>;
/// Long integer type
using LongIndex = typename Base::LongIndex;
private:
/// Rows dimension
static int const kContiguous = 0;
/// Columns dimension
static int const kStrided = 1;
public:
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
PitchLinearCoord() { }
/// Constructs from Coord<2>
CUTLASS_HOST_DEVICE
PitchLinearCoord(Coord<2, Index> const &coord): Base(coord) { }
/// Helper to construct from a row and column
CUTLASS_HOST_DEVICE
PitchLinearCoord(Index contiguous_, Index strided_): Base(make_Coord(contiguous_, strided_)) { }
/// Helper to construct from a row and column based on LongIndex
CUTLASS_HOST_DEVICE
PitchLinearCoord(LongIndex contiguous_, LongIndex strided_)
: Base(make_Coord(Index(contiguous_), Index(strided_))) { }
/// Returns the contiguous dimension
CUTLASS_HOST_DEVICE
Index const & contiguous() const { return this->at(kContiguous); }
/// Returns the contiguous dimension
CUTLASS_HOST_DEVICE
Index & contiguous() { return this->at(kContiguous); }
/// Returns the column of the coordinate
CUTLASS_HOST_DEVICE
Index const & strided() const { return this->at(kStrided); }
/// Returns the column of the coordinate
CUTLASS_HOST_DEVICE
Index & strided() { return this->at(kStrided); }
//
// Coord operators
//
/// Element-wise addition
CUTLASS_HOST_DEVICE
PitchLinearCoord operator+(Base const& b) const {
return PitchLinearCoord(Base::operator+(b));
}
/// Element-wise subtraction
CUTLASS_HOST_DEVICE
PitchLinearCoord operator-(Base const& b) const {
return PitchLinearCoord(Base::operator-(b));
}
CUTLASS_HOST_DEVICE
PitchLinearCoord operator-() const {
return PitchLinearCoord(-at(0), -at(1));
}
/// Element-wise multiplication
CUTLASS_HOST_DEVICE
PitchLinearCoord operator*(Base const& b) const {
return PitchLinearCoord(Base::operator*(b));
}
/// Element-wise division
CUTLASS_HOST_DEVICE
PitchLinearCoord operator/(Base const& b) const {
return PitchLinearCoord(Base::operator/(b));
}
/// In-place addition
CUTLASS_HOST_DEVICE
PitchLinearCoord& operator+=(Base const& b) {
Base::operator+=(b);
return *this;
}
/// In-place subtraction
CUTLASS_HOST_DEVICE
PitchLinearCoord& operator-=(Base const& b) {
Base::operator-=(b);
return *this;
}
/// In-place multiplication
CUTLASS_HOST_DEVICE
PitchLinearCoord& operator*=(Base const& b) {
Base::operator*=(b);
return *this;
}
/// In-place division
CUTLASS_HOST_DEVICE
PitchLinearCoord& operator/=(Base const& b) {
Base::operator/=(b);
return *this;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
| include/cutlass/pitch_linear_coord.h/0 | {
"file_path": "include/cutlass/pitch_linear_coord.h",
"repo_id": "include",
"token_count": 1639
} | 37 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Performs comparison between two elements with support for floating-point comparisons.
*/
#pragma once
#include "numeric_types.h"
namespace cutlass {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
CUTLASS_HOST_DEVICE
bool relatively_equal(T a, T b, T epsilon, T nonzero_floor);
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
// This floating-point comparison function implements the method described in
//
// https://floating-point-gui.de/errors/comparison/
//
template <typename T>
CUTLASS_HOST_DEVICE
bool relatively_equal_float(T a, T b, T epsilon, T nonzero_floor) {
#if defined(__CUDACC_RTC__)
using cuda::std::abs;
#else
using std::abs;
#endif
T abs_A = abs(a);
T abs_B = abs(b);
T diff = abs(a - b);
T zero = T(0);
if (a == b) {
return true;
}
else if (a == zero || b == zero || diff < nonzero_floor) {
return diff < epsilon * nonzero_floor;
}
return diff < epsilon * (abs_A + abs_B);
}
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<bool>(bool a, bool b, bool, bool) {
return (a == b);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<uint1b_t>(uint1b_t a, uint1b_t b, uint1b_t, uint1b_t) {
return (a == b);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<int2b_t>(int2b_t a, int2b_t b, int2b_t, int2b_t) {
return (a == b);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<uint2b_t>(uint2b_t a, uint2b_t b, uint2b_t, uint2b_t) {
return (a == b);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<int4b_t>(int4b_t a, int4b_t b, int4b_t, int4b_t) {
return (a == b);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<uint4b_t>(uint4b_t a, uint4b_t b, uint4b_t, uint4b_t) {
return (a == b);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<int8_t>(int8_t a, int8_t b, int8_t, int8_t) {
return (a == b);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<uint8_t>(uint8_t a, uint8_t b, uint8_t, uint8_t) {
return (a == b);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<int16_t>(int16_t a, int16_t b, int16_t, int16_t) {
return (a == b);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<uint16_t>(uint16_t a, uint16_t b, uint16_t, uint16_t) {
return (a == b);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<int32_t>(int32_t a, int32_t b, int32_t, int32_t) {
return (a == b);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<uint32_t>(uint32_t a, uint32_t b, uint32_t, uint32_t) {
return (a == b);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<int64_t>(int64_t a, int64_t b, int64_t, int64_t) {
return (a == b);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<uint64_t>(uint64_t a, uint64_t b, uint64_t, uint64_t) {
return (a == b);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<float_e4m3_t>(float_e4m3_t a, float_e4m3_t b, float_e4m3_t epsilon, float_e4m3_t nonzero_floor) {
return detail::relatively_equal_float<float>(a, b, epsilon, nonzero_floor);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<float_e5m2_t>(float_e5m2_t a, float_e5m2_t b, float_e5m2_t epsilon, float_e5m2_t nonzero_floor) {
return detail::relatively_equal_float<float>(a, b, epsilon, nonzero_floor);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<half_t>(half_t a, half_t b, half_t epsilon, half_t nonzero_floor) {
return detail::relatively_equal_float(a, b, epsilon, nonzero_floor);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<bfloat16_t>(
bfloat16_t a,
bfloat16_t b,
bfloat16_t epsilon,
bfloat16_t nonzero_floor) {
return detail::relatively_equal_float(a, b, epsilon, nonzero_floor);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<tfloat32_t>(
tfloat32_t a,
tfloat32_t b,
tfloat32_t epsilon,
tfloat32_t nonzero_floor) {
return detail::relatively_equal_float(a, b, epsilon, nonzero_floor);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<float>(float a, float b, float epsilon, float nonzero_floor) {
return detail::relatively_equal_float(a, b, epsilon, nonzero_floor);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<double>(double a, double b, double epsilon, double nonzero_floor) {
return detail::relatively_equal_float(a, b, epsilon, nonzero_floor);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
| include/cutlass/relatively_equal.h/0 | {
"file_path": "include/cutlass/relatively_equal.h",
"repo_id": "include",
"token_count": 2329
} | 38 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Ell iterator for Blocked-Ell matrix (ellValue matrix) used with EllMmaMultistage
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// EllPredicatedTileAccessIterator
///
template <typename Shape, typename Element, typename Layout, int AdvanceRank,
typename ThreadMap, typename AccessType>
class EllPredicatedTileAccessIterator;
////////////////////////////////////////////////////////////////////////////////
/// Specialization of EllPredicatedTileAccessIterator for pitch-linear data.
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, typename AccessType_>
class EllPredicatedTileAccessIterator<Shape_, Element_, layout::PitchLinear,
AdvanceRank, ThreadMap_, AccessType_> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::PitchLinear;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
static int const kPredicatesPerByte = 4;
static int const kPredicatesPerWord = 4 * kPredicatesPerByte;
static int const kPredicateCount = ThreadMap::Iterations::kCount * kAccessesPerVector;
/// Number of 32b words containing predicates
static int const kPredicateByteCount =
(kPredicateCount + kPredicatesPerByte - 1) / kPredicatesPerByte;
static int const kPredicateWordCount = (kPredicateByteCount + 3) / 4;
static unsigned const kPredicateMask = (1u << kPredicatesPerByte) - 1u;
static_assert(kPredicateWordCount <= 4, "Too many predicates.");
/// Predicate vector stores mask to guard accesses
using Mask = Array<uint32_t, kPredicateWordCount>;
/// Parameters object is precomputed state and is host-constructible
class Params {
public:
friend EllPredicatedTileAccessIterator;
private:
/// stride of pitch-linear layout (units of Element)
LongIndex stride_;
/// amount (in byte) to increment pointer to move to next access along
/// strided dimension
LongIndex inc_strided_;
/// amount (in byte) to increment pointer from last access to first access
/// of next tile
LongIndex inc_next_;
/// amount (in byte) to increment pointer from first access of current tile
/// to first access of next tile
LongIndex inc_advance_;
public:
// Default ctor
CUTLASS_HOST_DEVICE
Params(): stride_(0), inc_strided_(0), inc_next_(0), inc_advance_(0) { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout) : stride_(layout.stride(0)) {
inc_strided_ = (LongIndex(stride_) * ThreadMap::Delta::kStrided) *
sizeof_bits<Element>::value / 8;
if (kAdvanceRank) {
// advance along strided dimension
inc_advance_ =
Shape::kStrided * LongIndex(stride_) * sizeof_bits<Element>::value / 8;
} else {
// advance along contiguous dimension
inc_advance_ = Shape::kContiguous * sizeof_bits<Element>::value / 8;
}
inc_next_ = inc_advance_ - LongIndex(ThreadMap::Iterations::kStrided - 1) *
ThreadMap::Delta::kStrided * LongIndex(stride_) *
sizeof_bits<Element>::value / 8;
};
};
private:
/// Internal pointer type permits fast address arithmetic
using BytePointer = char *;
private:
//
// Data members
//
/// Parameters object with precomputed internal state
Params const ¶ms_;
/// Internal pointer to first access of tile
BytePointer pointer_;
/// Guard predicates
uint32_t predicates_[kPredicateWordCount];
/// Size of tensor
TensorCoord extent_;
/// Initial offset for each thread
TensorCoord thread_offset_;
/// Offset to the first steady-state tile
TensorCoord residue_offset_;
/// Initial offset to define ELL block
TensorCoord ell_offset_;
/// Used for out-of-order visitation
bool is_residue_tile_;
/// Iteration along vectors implied by the thread map
int iteration_vector_;
/// Iteration in the contiguous dimension
int iteration_contiguous_;
/// Iteration in the strided dimension
int iteration_strided_;
public:
/// Computes predicates based on internally tracked per-thread offset.
CUTLASS_DEVICE
void compute_predicates_(
/// Extent of the matrix window
TensorCoord extent,
/// optionally, simplify predicate calculation during 'steady state' phase
bool is_steady_state = false) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
predicates_[i] = 0u;
}
CUTLASS_PRAGMA_UNROLL
for (int access_idx = 0; access_idx < ThreadMap::Iterations::kCount * kAccessesPerVector; ++access_idx) {
int s = access_idx / (ThreadMap::Iterations::kContiguous * kAccessesPerVector);
int access_residual = access_idx % (ThreadMap::Iterations::kContiguous * kAccessesPerVector);
int c = access_residual / kAccessesPerVector;
int v = access_residual % kAccessesPerVector;
TensorCoord iteration_coord(c * ThreadMap::Delta::kContiguous + v * AccessType::kElements,
s * ThreadMap::Delta::kStrided);
TensorCoord coord = thread_offset_ + iteration_coord;
bool guard;
if (is_steady_state) {
if (kAdvanceRank == 0) {
guard = (coord.strided() < extent.strided());
} else {
guard = (coord.contiguous() < extent.contiguous());
}
} else {
guard = (coord.strided() < extent.strided() &&
coord.contiguous() < extent.contiguous());
}
int pred_idx = v + kAccessesPerVector * (c + ThreadMap::Iterations::kContiguous * s);
int word_idx = pred_idx / kPredicatesPerWord;
int residual = pred_idx % kPredicatesPerWord;
int byte_idx = residual / kPredicatesPerByte;
int bit_idx = residual % kPredicatesPerByte;
predicates_[word_idx] |= (unsigned(guard) << (byte_idx * 8 + bit_idx));
}
}
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset)
: params_(params),
pointer_(reinterpret_cast<BytePointer>(
const_cast<NonConstPointer>(pointer))),
extent_(extent),
is_residue_tile_(true) {
TensorCoord residue_extent;
if (kAdvanceRank) {
typename TensorCoord::Index residue_size = (extent_[kAdvanceRank] - threadblock_offset.strided()) % Shape::kStrided;
if (!residue_size) {
residue_size = Shape::kStrided;
}
residue_offset_ = make_Coord(0, residue_size);
residue_extent = make_Coord(
extent_.contiguous(),
min(threadblock_offset.strided() + residue_size, extent_.strided())
);
} else {
typename TensorCoord::Index residue_size = (extent_[kAdvanceRank] - threadblock_offset.contiguous()) % Shape::kContiguous;
if (!residue_size) {
residue_size = Shape::kContiguous;
}
residue_offset_ = make_Coord(residue_size, 0);
residue_extent = make_Coord(
min(extent_.contiguous(), threadblock_offset.contiguous() + residue_size),
extent_.strided()
);
}
// Per-thread offset in logical coordinates of tensor
ell_offset_ = ThreadMap::initial_offset(thread_id);
thread_offset_ = threadblock_offset + ThreadMap::initial_offset(thread_id);
// update internal pointers
Layout layout(params_.stride_);
add_pointer_offset(layout(thread_offset_));
compute_predicates_(residue_extent, false);
set_iteration_index(0);
}
/// Construct a EllPredicatedTileAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id)
: EllPredicatedTileAccessIterator(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += sizeof_bits<Element>::value * pointer_offset / 8;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_DEVICE
void add_tile_offset(
TensorCoord const &tile_offset) {
if (is_residue_tile_) {
thread_offset_ += residue_offset_;
Layout layout(params_.stride_);
add_pointer_offset(layout(residue_offset_));
compute_predicates_(extent_, true);
if (kAdvanceRank) {
pointer_ += params_.inc_advance_ * LongIndex(tile_offset.strided() - 1);
pointer_ += Shape::kContiguous * tile_offset.contiguous();
} else {
pointer_ += params_.inc_advance_ * LongIndex(tile_offset.contiguous() - 1);
pointer_ += Shape::kStrided * tile_offset.strided();
}
} else {
if (kAdvanceRank) {
pointer_ += params_.inc_advance_ * LongIndex(tile_offset.strided());
pointer_ += Shape::kContiguous * tile_offset.contiguous();
} else {
pointer_ += params_.inc_advance_ * LongIndex(tile_offset.contiguous());
pointer_ += Shape::kStrided * tile_offset.strided();
}
}
is_residue_tile_ = false;
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(
pointer_ +
iteration_contiguous_ * (ThreadMap::Delta::kContiguous * sizeof_bits<Element>::value) / 8) + iteration_vector_;
}
/// Returns a k_location
CUTLASS_HOST_DEVICE
int get_k() const {
if(kAdvanceRank){ //strided
return ell_offset_.strided() + iteration_strided_ * ThreadMap::Delta::kStrided;
}else{
return ell_offset_.contiguous() + iteration_contiguous_ * ThreadMap::Delta::kContiguous + iteration_vector_ * AccessType::kElements;
}
}
CUTLASS_HOST_DEVICE
int get_stride() const {
if(kAdvanceRank)
return params_.stride_;
else
return 1;
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
// Enter here only if (iteration_contiguous_ ==
// ThreadMap::Iteration::kContiguous)
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
pointer_ += params_.inc_strided_;
return *this;
}
// Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
// which means we enter the next tile.
iteration_strided_ = 0;
// advance to next tile
pointer_ += params_.inc_next_;
// now return to start tile - if the iterator is subsequently advanced, this
// subtraction as well as the subsequent integer addition are both elided by
// the compiler.
pointer_ -= params_.inc_advance_;
return *this;
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator operator++(int) {
EllPredicatedTileAccessIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
predicates_[i] = enable ? 0u : predicates_[i];
}
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
predicates_[i] = 0xffffffff;
}
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
predicates_[i] = mask[i];
}
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
mask[i] = predicates_[i];
}
}
/// add mask for small tiles in ELL
CUTLASS_DEVICE
void ell_add_mask(int blocksize) {
Mask mask;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
mask[i] = 0u;
}
CUTLASS_PRAGMA_UNROLL
for (int access_idx = 0; access_idx < ThreadMap::Iterations::kCount * kAccessesPerVector; ++access_idx) {
int s = access_idx / (ThreadMap::Iterations::kContiguous * kAccessesPerVector);
int access_residual = access_idx % (ThreadMap::Iterations::kContiguous * kAccessesPerVector);
int c = access_residual / kAccessesPerVector;
int v = access_residual % kAccessesPerVector;
TensorCoord iteration_coord(c * ThreadMap::Delta::kContiguous + v * AccessType::kElements,
s * ThreadMap::Delta::kStrided);
TensorCoord coord = ell_offset_ + iteration_coord;
bool guard;
if (kAdvanceRank == 0) {
guard = (coord.strided() < blocksize);
} else {
guard = (coord.contiguous() < blocksize);
}
int pred_idx = v + kAccessesPerVector * (c + ThreadMap::Iterations::kContiguous * s);
int word_idx = pred_idx / kPredicatesPerWord;
int residual = pred_idx % kPredicatesPerWord;
int byte_idx = residual / kPredicatesPerByte;
int bit_idx = residual % kPredicatesPerByte;
mask[word_idx] |= (unsigned(guard) << (byte_idx * 8 + bit_idx));
}
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
mask[i] &= predicates_[i];
}
set_mask(mask);
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
int pred_idx =
iteration_vector_ + kAccessesPerVector * (iteration_contiguous_ + iteration_strided_ * ThreadMap::Iterations::kContiguous);
int word_idx = pred_idx / kPredicatesPerWord;
int residual = pred_idx % kPredicatesPerWord;
int byte_idx = residual / kPredicatesPerByte;
int bit_idx = residual % kPredicatesPerByte;
bool pred = (predicates_[word_idx] & (1u << (byte_idx * 8 + bit_idx))) != 0;
return pred;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of EllPredicatedTileAccessIterator for pitch-linear data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, typename AccessType_>
class EllPredicatedTileAccessIterator<Shape_, Element_, layout::ColumnMajor,
AdvanceRank, ThreadMap_, AccessType_> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = EllPredicatedTileAccessIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap, AccessType>;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend EllPredicatedTileAccessIterator;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default ctor
CUTLASS_HOST_DEVICE
Params() { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout)
: params_(layout::PitchLinear(layout.stride(0))){};
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator(
///< Precomputed parameters object
Params const ¶ms,
///< Pointer to start of tensor
Pointer pointer,
///< Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(params.params_, pointer,
layout::PitchLinearCoord(extent.row(), extent.column()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.row(),
threadblock_offset.column())) {}
/// Construct a EllPredicatedTileAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: EllPredicatedTileAccessIterator(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
CUTLASS_HOST_DEVICE
int get_k() const {
return iterator_.get_k();
}
CUTLASS_HOST_DEVICE
int get_stride() const {
return iterator_.get_stride();
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator operator++(int) {
EllPredicatedTileAccessIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { iterator_.get_mask(mask); }
/// add mask for small tiles in ELL
CUTLASS_DEVICE
void ell_add_mask(int blocksize) {
iterator_.ell_add_mask(blocksize);
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of EllPredicatedTileAccessIterator for pitch-linear data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, typename AccessType_>
class EllPredicatedTileAccessIterator<Shape_, Element_, layout::RowMajor,
AdvanceRank, ThreadMap_, AccessType_> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = EllPredicatedTileAccessIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap, AccessType>;
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend EllPredicatedTileAccessIterator;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default ctor
CUTLASS_HOST_DEVICE
Params() { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout)
: params_(layout::PitchLinear(layout.stride(0))){};
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator(
///< Precomputed parameters object
Params const ¶ms,
///< Pointer to start of tensor
Pointer pointer,
///< Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(params.params_, pointer,
layout::PitchLinearCoord(extent.column(), extent.row()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.column(),
threadblock_offset.row())) {}
/// Construct a EllPredicatedTileAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: EllPredicatedTileAccessIterator(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
CUTLASS_HOST_DEVICE
int get_k() const {
return iterator_.get_k();
}
CUTLASS_HOST_DEVICE
int get_stride() const {
return iterator_.get_stride();
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator operator++(int) {
EllPredicatedTileAccessIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { iterator_.get_mask(mask); }
/// add mask for small tiles in ELL
CUTLASS_DEVICE
void ell_add_mask(int blocksize) {
iterator_.ell_add_mask(blocksize);
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of EllPredicatedTileAccessIterator for column-major interleaved data.
/// It is mapped to the congruous layout.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, typename AccessType_, int InterleavedK>
class EllPredicatedTileAccessIterator<Shape_, Element_,
layout::ColumnMajorInterleaved<InterleavedK>,
AdvanceRank, ThreadMap_, AccessType_> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
static int const kInterleavedK = InterleavedK;
using Layout = layout::ColumnMajorInterleaved<kInterleavedK>;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = EllPredicatedTileAccessIterator<
layout::PitchLinearShape<Shape::kRow * kInterleavedK,
Shape::kColumn / kInterleavedK>,
Element, layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap,
AccessType>;
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend EllPredicatedTileAccessIterator;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
CUTLASS_HOST_DEVICE
Params() {}
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout)
: params_(layout::PitchLinear(layout.stride(0))) {}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(params.params_, pointer,
layout::PitchLinearCoord(extent.row() * kInterleavedK,
extent.column() / kInterleavedK),
thread_id,
layout::PitchLinearCoord(
threadblock_offset.row() * kInterleavedK,
threadblock_offset.column() / kInterleavedK)) {}
/// Construct a EllPredicatedTileAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: EllPredicatedTileAccessIterator(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
CUTLASS_HOST_DEVICE
int get_k() const {
return iterator_.get_k();
}
CUTLASS_HOST_DEVICE
int get_stride() const {
return iterator_.get_stride();
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator operator++(int) {
EllPredicatedTileAccessIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { iterator_.get_mask(mask); }
/// add mask for small tiles in ELL
CUTLASS_DEVICE
void ell_add_mask(int blocksize) {
iterator_.ell_add_mask(blocksize);
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() { return iterator_.valid(); }
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of EllPredicatedTileAccessIterator for row-major interleaved data.
/// It is mapped to the congruous layout.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, typename AccessType_, int InterleavedK>
class EllPredicatedTileAccessIterator<Shape_, Element_,
layout::RowMajorInterleaved<InterleavedK>,
AdvanceRank, ThreadMap_, AccessType_> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
static int const kInterleavedK = InterleavedK;
using Layout = layout::RowMajorInterleaved<kInterleavedK>;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = EllPredicatedTileAccessIterator<
layout::PitchLinearShape<Shape::kColumn * kInterleavedK,
Shape::kRow / kInterleavedK>,
Element, layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap,
AccessType>;
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend EllPredicatedTileAccessIterator;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
CUTLASS_HOST_DEVICE
Params() {}
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout)
: params_(layout::PitchLinear(layout.stride(0))) {}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(params.params_, pointer,
layout::PitchLinearCoord(extent.column() * kInterleavedK,
extent.row() / kInterleavedK),
thread_id,
layout::PitchLinearCoord(
threadblock_offset.column() * kInterleavedK,
threadblock_offset.row() / kInterleavedK)) {}
/// Construct a EllPredicatedTileAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: EllPredicatedTileAccessIterator(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
CUTLASS_HOST_DEVICE
int get_k() const {
return iterator_.get_k();
}
CUTLASS_HOST_DEVICE
int get_stride() const {
return iterator_.get_stride();
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator operator++(int) {
EllPredicatedTileAccessIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { iterator_.get_mask(mask); }
/// add mask for small tiles in ELL
CUTLASS_DEVICE
void ell_add_mask(int blocksize) {
iterator_.ell_add_mask(blocksize);
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() { return iterator_.valid(); }
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/transform/threadblock/ell_predicated_tile_access_iterator.h/0 | {
"file_path": "include/cutlass/transform/threadblock/ell_predicated_tile_access_iterator.h",
"repo_id": "include",
"token_count": 15640
} | 39 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing computing the addresses of storing of tiles
from pitch-linear rank=2 tensors.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor_op_multiplicand_sm75.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for congruous arrangements for TensorOps
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment, int Crosswise>
class RegularTileAccessIterator<
Shape_, Element_,
layout::TensorOpMultiplicandCongruous<sizeof_bits<Element_>::value,
Crosswise>,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout =
layout::TensorOpMultiplicandCongruous<sizeof_bits<Element_>::value,
Crosswise>;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
static int const kCrosswise = Crosswise;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using StrideIndex = typename Layout::Stride::Index;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Internal details made public to facilitate introspection
struct Detail {
/// This iterator is specialized for an access size that is 128 bits in
/// length.
static int const kAccessSizeInBits = 128;
static_assert(sizeof_bits<Element_>::value *
ThreadMap::kElementsPerAccess ==
kAccessSizeInBits,
"This iterator requires a policy whose access size is 128bs");
///< Number of pointers
static int const kPointerCount =
(ThreadMap::Iterations::kStrided > 1 ? 2 : 1);
};
/// Element type per access
using AccessType = Array<Element, Layout::kElementsPerAccess>;
private:
//
// Data members
//
/// Stride value
StrideIndex stride_;
/// Internal pointer to first access of tile
AccessType *pointer_[Detail::kPointerCount];
/// Internal byte offset
Index byte_offset_;
/// Iteration in the contiguous dimension
int iteration_contiguous_;
/// Iteration in the strided dimension
int iteration_strided_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: stride_(ref.stride(0) * Layout::kFactor / Layout::kElementsPerAccess),
byte_offset_(0) {
layout::PitchLinearCoord thread_offset_base =
ThreadMap::initial_offset(thread_id);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Detail::kPointerCount; ++i) {
// This is the offset of a thread within a threadblock tile for a specific
// pointer (units of elements)
layout::PitchLinearCoord thread_offset_in_threadblock_tile =
thread_offset_base +
layout::PitchLinearCoord{
0, ThreadMap::Detail::WarpThreadArrangement::kStrided * i};
// initialize pointer
pointer_[i] = reinterpret_cast<AccessType *>(
ref.data() + ref.offset(thread_offset_in_threadblock_tile));
}
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_offset_ += pointer_offset * sizeof(Element);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
AccessType *access_ptr = pointer_[iteration_strided_ & 1];
int stride_idx = (iteration_strided_ & ~1);
int access_offset = stride_idx * ThreadMap::Delta::kStrided * stride_ / Layout::kFactor +
iteration_contiguous_ * ThreadMap::Delta::kContiguous /
ThreadMap::kElementsPerAccess;
char *access_byte_ptr =
reinterpret_cast<char *>(access_ptr + access_offset);
return reinterpret_cast<AccessType *>(access_byte_ptr + byte_offset_);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous)
return *this;
// Enter here only if (iteration_contiguous_ ==
// ThreadMap::Iteration::kContiguous)
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
// Enter here only if (iteration_strided_ == ThreadMap::Iteration::kStrided)
// which means we enter the next tile.
iteration_strided_ = 0;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
this->operator++();
return prev;
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
add_pointer_offset(coord.contiguous() * Shape::kContiguous * Layout::kFactor +
coord.strided() * Shape::kStrided * stride_ *
Layout::kElementsPerAccess / Layout::kFactor);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for column-major congruous TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment, int Crosswise>
class RegularTileAccessIterator<
Shape_, Element_,
layout::ColumnMajorTensorOpMultiplicandCongruous<
sizeof_bits<Element_>::value, Crosswise>,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for column-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajorTensorOpMultiplicandCongruous<
sizeof_bits<Element_>::value, Crosswise>;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::TensorOpMultiplicandCongruous<sizeof_bits<Element_>::value,
Crosswise>,
(kAdvanceRank == 0 ? 0 : 1), ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.row(), coord.column()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for row-major congruous TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment, int Crosswise>
class RegularTileAccessIterator<
Shape_, Element_,
layout::RowMajorTensorOpMultiplicandCongruous<sizeof_bits<Element_>::value,
Crosswise>,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for row-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajorTensorOpMultiplicandCongruous<
sizeof_bits<Element_>::value, Crosswise>;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::TensorOpMultiplicandCongruous<sizeof_bits<Element_>::value,
Crosswise>,
(kAdvanceRank == 0 ? 1 : 0), ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.column(), coord.row()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for crosswise arrangements for TensorOps
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment, int Crosswise>
class RegularTileAccessIterator<Shape_, Element_,
layout::TensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, Crosswise>,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout =
layout::TensorOpMultiplicandCrosswise<sizeof_bits<Element_>::value,
Crosswise>;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
static int const kCrosswise = Crosswise;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using StrideIndex = typename Layout::Stride::Index;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
static_assert(!(ThreadMap::Delta::kContiguous % kCrosswise),
"kCrosswise is the smallest unit in the contiguous dimension "
"for shared memory swizzling.");
/// Internal details made public to facilitate introspection
struct Detail {
/// This iterator is specialized for an access size that is 128 bits in
/// length.
static int const kAccessSizeInBits = 128;
static_assert(sizeof_bits<Element_>::value *
ThreadMap::kElementsPerAccess ==
kAccessSizeInBits,
"This iterator requires a policy whose access size is 128bs");
/// Number of pointers
///
/// Note:TN kblock32 layouts only needs 1 pointer, but strangely
/// reducing pointer count hurts perfomrnace
static int const kPointerCount =
(ThreadMap::Iterations::kStrided > 1 ? 2 : 1);
};
/// Element type per access
using AccessType = Array<Element, Layout::kElementsPerAccess>;
private:
//
// Data members
//
/// Total number of sections. The memory is divided into stages. One stage
/// can store one tile. Stage is divided into sections. Interleaved layout
/// can have multiple sections in a stage. The rest layout only has one section
/// in a stage.
int sections_;
/// Sections that a stage has
int sections_per_stage_;
/// Stride value
StrideIndex stride_;
/// Internal pointer to first access of tile
AccessType *pointer_[Detail::kPointerCount];
/// Internal byte offset
Index byte_offset_;
/// Iteration in the contiguous dimension
int iteration_contiguous_;
/// Iteration in the strided dimension
int iteration_strided_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: sections_(ref.stride(0) / kCrosswise),
sections_per_stage_(Shape::kContiguous / kCrosswise),
// stride_ = kCrosswise x sections_ x kFactor
stride_(ref.stride(0) * Layout::kFactor / Layout::kElementsPerAccess),
byte_offset_(0) {
layout::PitchLinearCoord thread_offset_base =
ThreadMap::initial_offset(thread_id);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Detail::kPointerCount; ++i) {
// This is the offset of a thread within a threadblock tile for a specific
// pointer (units of elements)
layout::PitchLinearCoord thread_offset_in_threadblock_tile =
thread_offset_base +
layout::PitchLinearCoord{
0, ThreadMap::Detail::WarpThreadArrangement::kStrided * i};
// initialize pointer
pointer_[i] = reinterpret_cast<AccessType *>(ref.data()) +
ref.offset(thread_offset_in_threadblock_tile) /
Layout::kElementsPerAccess;
}
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_offset_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
AccessType *access_ptr = pointer_[iteration_strided_ & 1];
int stride_idx = (iteration_strided_ & ~1);
int access_offset =
stride_idx * ThreadMap::Delta::kStrided * stride_ / Layout::kFactor +
// kCrosswise elements in the contiguous dimension would span to a
// shared memory cache line.
iteration_contiguous_ * (ThreadMap::Delta::kContiguous / kCrosswise) *
Layout::TileShape::kContiguous;
char *access_byte_ptr =
reinterpret_cast<char *>(access_ptr + access_offset);
return reinterpret_cast<AccessType *>(access_byte_ptr + byte_offset_);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous)
return *this;
// Enter here only if (iteration_contiguous_ ==
// ThreadMap::Iteration::kContiguous)
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
// Enter here only if (iteration_strided_ == ThreadMap::Iteration::kStrided)
// which means we enter the next section.
iteration_strided_ = 0;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
this->operator++();
return prev;
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
add_pointer_offset(coord.contiguous() * sections_per_stage_ * stride_ *
ThreadMap::kElementsPerAccess / sections_ +
coord.strided() * Shape::kStrided * stride_ *
Layout::kElementsPerAccess / Layout::kFactor);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for column-major crosswise TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment, int Crosswise>
class RegularTileAccessIterator<
Shape_, Element_,
layout::ColumnMajorTensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, Crosswise>,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for column-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajorTensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, Crosswise>;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::TensorOpMultiplicandCrosswise<sizeof_bits<Element_>::value,
Crosswise>,
(kAdvanceRank == 0 ? 0 : 1), ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.row(), coord.column()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for row-major crosswise TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment, int Crosswise>
class RegularTileAccessIterator<Shape_, Element_,
layout::RowMajorTensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, Crosswise>,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for row-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajorTensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, Crosswise>;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::TensorOpMultiplicandCrosswise<sizeof_bits<Element_>::value,
Crosswise>,
(kAdvanceRank == 0 ? 1 : 0), ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.column(), coord.row()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op.h/0 | {
"file_path": "include/cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op.h",
"repo_id": "include",
"token_count": 9771
} | 40 |
# Getting Started With CuTe
CuTe is a collection of C++ CUDA template abstractions for defining and operating on hierarchically multidimensional layouts of threads and data. CuTe provides `Layout` and `Tensor` objects that compactly packages the type, shape, memory space, and layout of data, while performing the complicated indexing for the user. This lets programmers focus on the logical descriptions of their algorithms while CuTe does the mechanical bookkeeping for them. With these tools, we can quickly design, implement, and modify all dense linear algebra operations.
The core abstraction of CuTe are the hierarchically multidimensional layouts which can be composed with data arrays to represent tensors. The representation of layouts is powerful enough to represent nearly everything we need to implement efficient dense linear algebra. Layouts can also be combined and manipulated via functional composition, on which we build a large set of common operations such as tiling and partitioning.
## System Requirements
CuTe shares CUTLASS 3.x's software requirements,
including NVCC with a C++17 host compiler.
## Knowledge prerequisites
CuTe is a CUDA C++ header-only library. It requires C++17
(the revision of the C++ Standard that was released in 2017).
Throughout this tutorial, we assume intermediate C++ experience.
For example, we assume that readers know
how to read and write templated functions and classes, and
how to use the `auto` keyword to deduce a function's return type.
We will be gentle with C++ and explain some things
that you might already know.
We also assume intermediate CUDA experience.
For example, readers must know
the difference between device and host code,
and how to launch kernels.
## Building Tests and Examples
CuTe's tests and examples build and run as part of CUTLASS's normal build process.
CuTe's unit tests live in the [`test/unit/cute`](../../../test/unit/cute) subdirectory.
CuTe's examples live in the [`examples/cute`](../../../examples/cute) subdirectory.
## Library Organization
CuTe is a header-only C++ library, so there is no source code that needs building. Library headers are contained within the top level [`include/cute`](../../../include/cute) directory, with components of the library grouped by directories that represent their semantics.
| Directory | Contents |
|------------------------|------------------------|
| [`include/cute`](../../../include/cute) | Each header in the top level corresponds to one of the fundamental building blocks of CuTe, such as [`Layout`](../../../include/cute/layout.hpp) and [`Tensor`](../../../include/cute/tensor.hpp). |
| [`include/cute/container`](../../../include/cute/container) | Implementations of STL-like objects, such as tuple, array, and aligned array. |
| [`include/cute/numeric`](../../../include/cute/numeric) | Fundamental numeric data types that include nonstandard floating-point types, nonstandard integer types, complex numbers, and integer sequence. |
| [`include/cute/algorithm`](../../../include/cute/algorithm) | Implementations of utility algorithms such as copy, fill, and clear that automatically leverage architecture-specific features if available. |
| [`include/cute/arch`](../../../include/cute/arch) | Wrappers for architecture-specific matrix-matrix multiply and copy instructions. |
| [`include/cute/atom`](../../../include/cute/atom) | Meta-information for instructions in `arch` and utilities like partitioning and tiling.
## Tutorial
This directory contains a CuTe tutorial in Markdown format.
The file
[`0x_gemm_tutorial.md`](./0x_gemm_tutorial.md)
explains how to implement dense matrix-matrix multiply using CuTe components.
It gives a broad overview of CuTe and thus would be a good place to start.
Other files in this directory discuss specific parts of CuTe.
* [`01_layout.md`](./01_layout.md) describes `Layout`, CuTe's core abstraction.
* [`02_layout_algebra.md`](./02_layout_algebra.md) describes more advanced `Layout` operations and the CuTe layout algebra.
* [`03_tensor.md`](./03_tensor.md) describes `Tensor`,
a multidimensional array abstraction which composes `Layout`
with an array of data.
* [`04_algorithms.md`](./04_algorithms.md) summarizes CuTe's
generic algorithms that operate on `Tensor`s.
* [`0t_mma_atom.md`](./0t_mma_atom.md) demonstrates CuTe's meta-information and interface to our GPUs'
architecture-specific Matrix Multiply-Accumulate (MMA) instructions.
* [`0x_gemm_tutorial.md`](./0x_gemm_tutorial.md) walks through building a GEMM from scratch using CuTe.
* [`0y_predication.md`](./0y_predication.md) explains what to do
if a tiling doesn't fit evenly into a matrix.
* [`0z_tma_tensors.md`](./0z_tma_tensors.md) explains an advanced `Tensor` type that CuTe uses to support TMA loads and stores.
## Quick Tips
### How do I print CuTe objects on host or device?
The `cute::print` function has overloads for almost all CuTe types, including Pointers, Integers, Strides, Shapes, Layouts, and Tensors. When in doubt, try calling `print` on it.
CuTe's print functions work on either host or device.
Note that on device, printing is expensive.
Even just leaving print code in place on device,
even if it is never called
(e.g., printing in an `if` branch that is not taken at run time),
may generate slower code.
Thus, be sure to remove code that prints on device after debugging.
You might also only want to print on thread 0 of each threadblock, or threadblock 0 of the grid. The `thread0()` function returns true only for global thread 0 of the kernel, that is, for thread 0 of threadblock 0. A common idiom for printing CuTe objects to print only on global thread 0.
```c++
if (thread0()) {
print(some_cute_object);
}
```
Some algorithms depend on some thread or threadblock,
so you may need to print on threads or threadblocks other than zero.
The header file
[`cute/util/debug.hpp`](../../../include/cute/util/debug.hpp),
among other utilities,
includes the function `bool thread(int tid, int bid)`
that returns `true` if running on thread `tid` and threadblock `bid`.
#### Other output formats
Some CuTe types have special printing functions that use a different output format.
The `cute::print_layout` function will display any rank-2 layout in a plain test table. This is excellent for visualizing the map from coordinates to indices.
The `cute::print_tensor` function will display any rank-1, rank-2, rank-3, or rank-4 tensor in a plain text multidimensional table. The values of the tensor are printed so you can verify the tile of data is what you expect after a copy, for example.
The `cute::print_latex` function will print LaTeX commands that you can use to build a nicely formatted and colored tables via `pdflatex`. This work for `Layout`, `TiledCopy`, and `TiledMMA`, which can be very useful to get a sense of layout patterns and partitioning patterns within CuTe.
| media/docs/cute/00_quickstart.md/0 | {
"file_path": "media/docs/cute/00_quickstart.md",
"repo_id": "media",
"token_count": 1891
} | 41 |
![ALT](../images/gemm-hierarchy-with-epilogue-no-labels.png "CUTLASS GEMM API")
[README](../../README.md#documentation) > **CUTLASS 3.0 GEMM API**
# CUTLASS 3.0 GEMM API
CUTLASS presents a uniform programming model
for matrix multiply-accumulate (MMA) operations
at different levels of the GPU system hierarchy.
CUTLASS 3.0 has GEMM APIs corresponding to the following levels
in order of highest to the lowest level.
1. Device
2. Kernel
3. Collective
4. Tiled MMA and Copy
5. Atom
This document will cover the first three levels in detail:
Device, Kernel, and Collective.
It also briefly discusses the Tiled MMA/Copy and Atom level,
and then refers readers to CuTe's tutorial for more information.
# CUTLASS GEMM Model
CUTLASS implements algorithms that express
the classical "triply nested loop" GEMM algorithm
with a tiled structure mirroring the above hierarchy.
The following pseudocode describes the model for a GEMM kernel
targeting a warp-synchronous matrix multiply instruction like `mma.sync.`
The entire operation is referred to as "Gemm,"
as it is assumed that an epilogue operation
performs the general matrix update similar to BLAS.
This is pseudocode and is only meant to illustrate which parts of the layers
correspond to the inner or outer loops of the GEMM.
```c++
// cutlass::gemm::kernel::GemmUniversal: ClusterTileM and ClusterTileN loops
// are either rasterized by the hardware or scheduled by the kernel in persistent kernels.
// Parallelism over thread block clusters
for (int cluster_m = 0; cluster_m < GemmM; cluster_m += ClusterTileM) {
for (int cluster_n = 0; cluster_n < GemmN; cluster_n += ClusterTileN) {
// cutlass::gemm::collective::CollectiveMma: mainloop that iterates over all k-tiles
// No loop unrolling is performed at this stage
for (int k_tile = 0; k_tile < size<2>(gmem_tensor_A); k_tile++) {
// loops inside cute::gemm(tiled_mma, a, b, c); Dispatch 5: (V,M,K) x (V,N,K) => (V,M,N)
// TiledMma uses the hardware instruction provided through its Mma_Atom
// TiledMma's atom layout, value layout, and permutations define the iteration order
for (int tiled_mma_k = 0; tiled_mma_k < size<2>(A); tiled_mma_k++) {
for (int tiled_mma_m = 0; tiled_mma_m < size<1>(A); tiled_mma_m++) {
for (int tiled_mma_n = 0; tiled_mma_n < size<1>(B); tiled_mma_n++) {
// TiledMma's vector mode dispatches to the underlying instruction.
mma.call(d, a, b, c);
} // tiled_mma_n
} // tiled_mma_m
} // tiled_mma_k
} // k_tile mainloop
} // cluster_m
} // cluster_n
```
The first three nested `for` loops
correspond to parallelism over thread block clusters.
The code does not actually express them as explicit `for` loops.
Instead, the parallelization scheme over tiles
is implied by CUDA grid launch semantics.
However, for persistent kernels,
these three loops are expressed in the source code
as a single `while` loop that queries the
[work tile scheduler](/include/cutlass/gemm/kernel/sm90_tile_scheduler.hpp)
for problem tiles on which to compute.
Inside the three nested `for` loops,
one finds code that pulls matrix tiles
from global memory into more "local" memory
(like shared memory or registers)
and computes MMAs.
These tiled copy and tiled mma iterations are generally
fully static and get fully unrolled.
# CUTLASS GEMM Components
CUTLASS expresses the above loop nest
with the following components which are specialized for
data type, layout, and math instruction.
| API level | API Class and/or function names |
| --- | --- |
| Device | `cutlass::gemm::device::GemmUniversalAdapter` |
| Kernel | `cutlass::gemm::kernel::GemmUniversal` |
| Collective | `cutlass::gemm::collective::CollectiveMma` <br /> `cutlass::epilogue::collective::DefaultEpilogue` <br /> `cutlass::epilogue::collective::Epilogue` <br /> |
| Tiled (MMA and Copy) | `cute::TiledMma` and `cute::TiledCopy` <br /> `cute::gemm()` and `cute::copy()` |
| Atom | `cute::Mma_Atom` and `cute::Copy_Atom` |
In CUTLASS 3.0, we assemble kernels
by first composing a collective mainloop and collective epilogue
together at the kernel layer,
and then wrapping them with a host-side adapter
to form a GEMM handle to that kernel.
The following sections describe these components
in the order a user should instantiate them
in order to assemble a kernel. This order is
1. assemble the required collective mainloop and epilogues,
2. compose them together to build a kernel type, and
3. wrap up the kernel with a device layer adapter.
This order is also reflected in the [CUTLASS 3.0 Hopper kernel examples](/examples/48_hopper_warp_specialized_gemm) as seen in the excerpt below.
```c++
// Step 1: Generate the required collective layer mainloop specialization
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
ArchTag, OperatorClass,
ElementA, LayoutA, AlignmentA,
ElementB, LayoutB, AlignmentB,
ElementAccumulator,
TilesShape, ClusterShape,
cutlass::gemm::collective::StageCountAuto,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
// Step 2: Specify the collective layer epilogue type
using CollectiveEpilogue = cutlass::epilogue::collective::DefaultEpilogue<
cutlass::gemm::TagToStrideC_t<LayoutC>,
cutlass::gemm::TagToStrideC_t<LayoutC>,
cutlass::epilogue::thread::LinearCombination<ElementC, 1, ElementAccumulator, ElementAccumulator>>;
// Step 3: Compose the mainloop and epilogue together at the kernel layer
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
cute::Shape<int,int,int,int>, // ProblemShape [M,N,K,L]
CollectiveMainloop,
CollectiveEpilogue
>;
// Step 4: Wrap up the kernel::GemmUniversal kernel class
// with the device adapter to obtain a host-side handle to the kernel
using GemmHandle = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
```
Towards the end, we also briefly cover CuTe's tiled mma and copy as well as the atom layer APIs,
before redirecting users to CuTe-specific documentation for further details.
## Collective API
A Collective is "the largest collection of threads
onto which mma atoms and copy atoms are tiled."
That is, it is the largest number of threads in a grid
that can cooperate by leveraging hardware features
for accelerated communication and synchronization.
These hardware features include
* asynchronous array copy
(e.g., from global memory to shared memory);
* MMA instructions
for small tiles that live in shared memory;
* synchronization operations for clusters,
thread blocks, and/or warps; and/or
* hardware acceleration (such as barriers)
for ensuring that data dependencies
between asynchronous operations are met.
A Collective uses the `TiledMma` and `TiledCopy` API (see below)
to access operations that copy and perform MMA on tiles.
Different units of parallelism
(e.g., threads, warps, or thread blocks)
in a Collective might have different roles.
For example, in "warp-specialized" algorithms,
some warps may be responsible for copying data,
while others may be responsible for computation.
Nevertheless, the different units of parallelism
still need to share data and coordinate access
to the shared data. For example,
the producer warps in a warp-specialized algorithm
that copy input matrix tiles into shared memory
need to let the consumer MMA warp(s) know
that their MMA inputs are ready.
We contrast this with the `kernel::` layer API,
which schedules the collectives over *independent* tiles in the grid.
The Collective API includes both the "mainloop"
of matrix multiply-accumulate, and the epilogue.
This API is the composition point for optimizations
such as mainloop fusions and epilogue fusions.
It is responsible for implementing
the `k_tile` loop in the above triply nested loop pseudocode.
### Collective Mainloops
The `cutlass::gemm::collective::CollectiveMma` class
is the primary interface to the collective
matrix multiply-accumulate (MMA) mainloops.
"Mainloop" refers to the "main loop" over tiles --
the "cluster tile k" loop in the pseudocode
near the top of this document.
Any looping over multiple tiles that
the algorithm might need to do would happen here.
The `CollectiveMma` class is declared in the header
[cutlass/gemm/collective/collective_mma.hpp](/include/cutlass/gemm/collective/collective_mma.hpp).
```c++
namespace cutlass::gemm::collective {
template <
class DispatchPolicy,
class TileShape,
class ElementA,
class StrideA,
class ElementB,
class StrideB,
class TiledMma,
class GmemTiledCopyA,
class SmemLayoutAtomA,
class SmemCopyAtomA,
class TransformA,
class GmemTiledCopyB,
class SmemLayoutAtomB,
class SmemCopyAtomB,
class TransformB
>
struct CollectiveMma {
static_assert(sizeof(ElementA) == 0, "Could not find a mainloop specialization.");
};
} // namespace cutlass::gemm::collective
```
- `DispatchPolicy` is the most important type for a collective, and is
[covered in more detail below](#collective-dispatch-policies).
- `StrideA` and `StrideB` are instances of type `cute::Stride` that represent the global memory layout of A and B tensors. These strides are required to be rank-3, representing the modes `[outer, inner, batch]`. Each of the 3 ranks can be a multi-modal hierarchical stride; this would apply if implementing a tensor contraction.
- `TiledMma` is an instance of `cute::TiledMma`.
- `GmemTiledCopyA` and `GmemTiledCopyB` are instances of `cute::TiledCopy` types. Both tiled operation types are [covered in more detail below](#tiled-mma-and-copy).
- `SmemLayoutAtomA` and `SmemLayoutAtomB` are instances of type `cute::Layout` and represent the smallest
layout that will get tiled over the entire collective's shared memory. This layout does _not_ include the
pipeline mode, and therefore, both are expected to be rank 2 layouts of shape [`outer`, `inner`].
- `SmemCopyAtomA` and `SmemCopyAtomB` are `Copy_Atom`s to be used for moving data from shared memory
into register memory.
Notice that CUTLASS 3.0 mainloops do not accept a dedicated accumulator element type.
We obtain the accumulator type from the `typename TiledMma::ValTypeC`. Note also that
top level API's `ElementA` and `ElementB` can differ from those of the MMA facing
`typename TiledMma::ValTypeA` and `typename TiledMma::ValTypeB`, allowing TMA or user
supplied transform operations to perform type conversions.
### Collective Dispatch Policies
`CollectiveMma` implementations are not generic.
Instead, they must be specialized for each algorithm and GPU architecture.
Users can dispatch to a `CollectiveMma` specialization
by picking template arguments matching that specialization.
CUTLASS 3.0 adopts a tag-based dispatch policy type to specialize
mainloop implementations and add tuning knobs to them.
Below is an example of one of the dispatch policies that is used to dispatch to a Hopper TMA
warp-specialized mainloop implementation:
```c++
// n-buffer in smem (Hopper TMA),
// pipelined with Hopper GMMA and TMA,
// warp-specialized dynamic schedule
template<
int Stages_,
class ClusterShape_ = Shape<_1,_1,_1>,
class KernelSchedule = KernelTmaWarpSpecializedCooperative
>
struct MainloopSm90TmaGmmaWarpSpecialized {
constexpr static int Stages = Stages_;
using ClusterShape = ClusterShape_;
using ArchTag = arch::Sm90;
using Schedule = KernelSchedule;
};
```
The `Stages_` template parameter lets the user freely vary the number of pipeline stages,
while the `ClusterShape_` type allows for parameterization over the shape of the threadblock
cluster over which TMA multicast will take place.
The collective dispatch policy is also the primary point of composing various kernel schedules
freely with any mainloop. Each mainloop policy either prescribes a `Schedule` with which
it needs to be run, or exposes a template API that lets the user pick a subset of the following schedules:
```c++
struct KernelCpAsyncWarpSpecialized { };
struct KernelCpAsyncWarpSpecializedPingpong { };
struct KernelCpAsyncWarpSpecializedCooperative { };
struct KernelTma { };
struct KernelTmaWarpSpecialized { };
struct KernelTmaWarpSpecializedPingpong { };
struct KernelTmaWarpSpecializedCooperative { };
```
- A single kernel schedule can support multiple mainloop implementations. For example,
`KernelMultistage` can be composed with many different mainloop implementations across GPU
architectures such as `MainloopSm70TwoStage`, `MainloopSm80CpAsyncUnpredicated`, and many more.
- A single mainloop can be composed with multiple
possible kernel schedules. For example, the `MainloopSm90TmaGmmaWarpSpecialized` can be
composed with any of the `KernelTmaWarpSpecialized`, `KernelTmaWarpSpecializedPingpong` or `KernelTmaWarpSpecializedCooperative`
kernel schedules.
As [discussed in the CUTLASS 3.0 design documentation](cutlass_3x_design.md), adopting tag
dispatch policies for our core vocabulary types allows us to maintain a single type name for
all operations that conceptually belong to the same class. This design has the following benefits.
- It *avoids code duplication* in cases where mainloops can be composed with multiple kernels or vice versa.
- It *makes writing generic code easier*, as the primary type name `CollectiveMma` does not change across any implementation.
- It *provides a clear, singular extension point* for users to plug in new, custom mainloops implementations specialized on their own dispatch policies.
### Collective Builder for `CollectiveMma`s
The primary `CollectiveMma` is intended to be an expert user interface that allows full control over
all the properties of the collective's GPU micro-kernel. However, often a user just wants an
off-the-shelf GEMM mainloop implementation parameterized on simple configuration parameters. CUTLASS 3.0
provides [`cutlass::gemm::collective::CollectiveBuilder`](/include/cutlass/gemm/collective/collective_builder.hpp) for such scenarios.
```c++
namespace cutlass::gemm::collective {
template <
class ArchTag,
class OpClass,
class ElementA,
class GmemLayoutA,
int AlignmentA,
class ElementB,
class GmemLayoutB,
int AlignmentB,
class ElementAccumulator,
class TileShape_MNK,
class ClusterShape_MNK,
class StageCountType,
class KernelScheduleType,
class Enable = void
>
struct CollectiveBuilder {
static_assert(sizeof(ElementA) == 0, "Could not build a collective for given parameters.");
};
} // namespace cutlass::gemm::collective
```
`CollectiveBuilder` accepts CUTLASS 2.x equivalent input template arguments, and attempts to build
the best performing `CollectiveMma` from the given parameters.
- `ArchTag` is one of the SM architectures tags from `cutlass::arch::Sm*`.
- `OpClass` is one of the operator class tags from `cutlass::arch::OpClass*`.
- `ElementA` and `ElementB` are the logical value types of the A resp. B tensors.
- `ElementAccumulator` is the accumulator type to be used in the instruction.
- `GmemLayoutA` and `GmemLayoutB` are CUTLASS 2.x layout tags, `layout::RowMajor` or `layout::ColumnMajor`.
- `AlignmentA` and `AlignmentB` are global memory alignments of A and B tensors in terms of element count.
- `TileShape_MNK` is an instance of `cute::Shape` that is rank-3, representing the MxNxK collective tile shape.
- `ClusterShape_MNK` is an instance of `cute::Shape` that is rank-3, representing the MxNxK threadblock cluster tile shape.
- `StageCountType` is either `collective::StageCountAuto` or an instance of `collective::StageCount<N>`.
- `KernelScheduleType` is either `collective::KernelScheduleAuto` or one of the specific kernel schedule tags discussed in the [dispatch policy section](#collective-dispatch-policies) above.
`StageCountAuto` allows the collective builder to compute the size of a single stage's size in shared memory
and maximize the shared memory usage assuming 1 threadblock / multiprocessor occupancy.
`KernelScheduleAuto` allows the collective builder to pick the best kernel schedule available for the
given set of parameters, or let's the user override this with a specific kernel schedule type.
Note that collective builders are still in beta, and their functionality
does not map onto the full design space that the primary expert `CollectiveMma` API
allows for. We expect their supported mainloop types to expand in future releases, but
with 3.0, only SM90 tensorop kernels are supported through the builder API. The builder API
may also change in the future as we adopt user feedback.
If the builder is able to provide a collective mainloop type for the given set of parameters,
it will be aliased within as `CollectiveOp`. For more information on how to
parameterize kernels conveniently with the collective builder, please see example [49_hopper_gemm_with_collective_builder](/examples/49_hopper_gemm_with_collective_builder).
### Epilogue
The collective epilogue implements element-wise operations
involving the output matrix. Users can provide a custom
epilogue, or use one of the standard epilogues.
These live in the directory
[include/cutlass/epilogue/collective/](/include/cutlass/epilogue/collective/),
and include classes like
`cutlass::epilogue::collective::DefaultEpilogue`
and
`cutlass::epilogue::collective::Epilogue`.
CUTLASS's provided collective epilogues
do not live under `include/cutlass/gemm`
or in the `cutlass::gemm` namespace,
because they can be used for computations
other than GEMM.
## Kernel API
The kernel is "a collection of all clusters in the grid."
The kernel layer schedules have four main responsibilities.
- Ordering the execution of collectives within the kernel, performing any synchronization between that may be necessary
- Marshalling the threads of a warp specialized schedules into their respective roles
- Performing any necessary grid swizzling logic
- Tiling the input tensors with the threadblock cluster value tile before invoking the collectives on them
The Kernel API is the entry point for a grid of thread blocks
that may or may not be organized in a cluster.
It is the composition point for fusing back-to-back GEMMs,
epilogues, and/or other operations.
The entry point API for CUTLASS 3.0 kernel is the class
`cutlass::gemm::kernel::GemmUniversal`, found in the header file
[include/cutlass/gemm/kernel/gemm_universal.hpp](/include/cutlass/gemm/kernel/gemm_universal.hpp).
`GemmUniversal` is a stateless universal device kernel
that implements GEMM as the composition of two parts:
* a collective mainloop, and
* a collective epilogue
```cpp
namespace cutlass::gemm::kernel {
/*
* Stateless universal device GEMM kernel type that treats GEMM as
* a composition of a collective mainloop and a collective epilogue.
*
* Supports both the 2.x and 3.x APIs based on whether the first type is
* a cute::tuple<> or not.
* 2.x API implementation: cutlass/gemm/kernel/gemm_universal.h
* 3.x API implementation: cutlass/gemm/kernel/gemm_*.hpp
*
* In the following declaration, the name preceding the 'Or' refers to
* 3.x API type argument order, and the name succeeding the 'Or' refers to
* 2.x API type argument order. Template arguments without two names
* belong to the 3.x API only.
**/
template <
class ProblemShapeOrThreadblockMma_, // (m, n, k) or (m, n, k, l)
class CollectiveMainloopOrEpilogue_,
class CollectiveEpilogueOrThreadblockSwizzle_,
class TileScheduler_ = void,
class Enable = void
>
class GemmUniversal;
} // namespace cutlass::gemm::kernel
```
*Stateless* means that the caller --
for example, the Device API described above --
manages the kernel's state.
The kernel just takes input and output parameters (`Params`).
*Universal* means that `GemmUniversal` works
for both CUTLASS 3.0 and 2.x interfaces
and across a broad range of kernel schedules.
If `GemmUniversal`'s first template argument is a `cute::Shape`,
then `GemmUniversal` assumes that the remaining template arguments
implement the 3.0 APIs. Otherwise, `GemmUniversal` assumes that
the remaining template arguments implement the 2.x APIs.
Starting with CUTLASS 3.0, the problem shape has been promoted
to a top-level template API for the GEMM kernel.
This supports fully static GEMM instantiations
where the user expects to know some or all
of the problem shapes at compile time
in order to extract even more performance.
The *collective mainloop* implements MMA on local tiles.
The *collective epilogue* addresses any operations after the MMA,
such as applying the `beta * C` part of `C := beta * C + alpha * A * B`.
We will explain *collective* in more detail below.
Specializations of `kernel::GemmUniversal` for 3.0 APIs live in
any of various `gemm_*.hpp` files in the directory
[include/cutlass/gemm/kernel/](/include/cutlass/gemm/kernel/).
Specializations for 2.x APIs can be found in the header file
[include/cutlass/gemm/kernel/gemm_universal.h](/include/cutlass/gemm/kernel/gemm_universal.h).
CUTLASS 3.x implements various embodiments of `kernel::GemmUniversal`.
Each kernel layer schedule is specialized
for a GEMM scheduling algorithm and GPU architecture.
Specializations of `kernel::GemmUniversal` for 3.0 APIs live in
any of various `include/cutlass/gemm/kernel/{arch_tag}*.hpp` files in the directory
[include/cutlass/gemm/kernel/](/include/cutlass/gemm/kernel/).
Which specialization to dispatch to is decided through the dispatch policy's `Schedule` type.
For example, the header file
[include/cutlass/gemm/kernel/sm90_gemm_tma_warpspecialized_pingpong.hpp](/include/cutlass/gemm/kernel/sm90_gemm_tma_warpspecialized_pingpong.hpp)
has a specialization of `kernel::GemmUniversal` for Hopper
that uses a warp-specialized mainloop with a persistent scheduling algorithm,
while the header file
[include/cutlass/gemm/kernel/sm90_gemm_tma_warpspecialized.hpp](/include/cutlass/gemm/kernel/sm90_gemm_tma_warpspecialized.hpp)
has a specialization of `GemmUniversal` for Hopper
that uses a warp-specialized but non-persistent algorithm.
To support composition between supported kernel schedules and mainloop dispatch policies without having to
duplicate collective mainloop implementations, GEMM kernel layer schedules can be composed with
any mainloop that specifies their corresponding kernel schedule as their `Schedule` type in the policy.
This is discussed in detail in the [collective dispatch policy section](#collective-dispatch-policies) above.
```c++
// An example of the SM90 KernelMultistage kernel's
// specialization logic that allows it to be composed
// with many mainloops such as `MainloopSm80CpAsync`
// and `MainloopSm70TwoStage`.
template <
class ProblemShape_,
class CollectiveMainloop_,
class CollectiveEpilogue_,
class TileScheduler_
>
class GemmUniversal<
ProblemShape_,
CollectiveMainloop_,
CollectiveEpilogue_,
TileScheduler_,
std::enable_if_t<std::is_base_of_v<KernelMultistage, typename CollectiveMainloop_::DispatchPolicy::Schedule>>>
```
## Device API
The Device API is a universal, kernel-agnostic host interface
for kernel launch and managing the lifetime of
reusable host-side parameters.
This API is how users' host-side .cu code
invokes CUTLASS's single-GPU GEMM kernels.
It serves the same purpose as cuBLAS and behaves similarly.
The entry point for the Device GEMM API is the class
`cutlass::gemm::device::GemmUniversalAdapter`.
This class lives in the header file
[include/cutlass/gemm/device/gemm_universal_adapter.h](/include/cutlass/gemm/device/gemm_universal_adapter.h).
`GemmUniversalAdapter` is a stateful, reusable handle,
which is parameterized on the `cutlass::gemm::kernel` type.
```c++
/*!
GemmUniversalAdapter is a stateful, reusable GEMM handle built around a kernel
of type cutlass::gemm::kernel::*
It manages the lifetime of the underlying `kernel::Params` struct, and exposes APIs
to create it from the host facing arguments. For power users, new static methods
are exposed in 3.x APIs that bypass the stateful methods or args->params lowering.
It supports kernel types that implement both the 2.x and 3.0 APIs,
however, this is done by specializing the implementation of GemmUniversalAdapter
on the two kernel API types, and thus, GemmUniversalAdapter's behavior might
differ between the two specializations.
*/
template <class GemmKernel_, class Enable = void>
class GemmUniversalAdapter;
```
*Stateful* means that the handle instance contains state
that the kernel needs to run.
This means that the user must initialize the handle first,
then use the initialized handle instance to run the kernel.
Statefulness also means that the handle can manage the lifetime
of the kernel's `Params` -- the parameters of the kernel itself.
An important duty of `GemmUniversalAdapter`
is to map from the user's `Arguments` --
what the user sees as the kernel's parameters --
to the `Params` that the kernel actually sees.
For power users, the class exposes new static methods
in 3.0 APIs that can bypass stateful methods
or go directly to `Params` without intermediate `Arguments`.
*Reusable* means that the handle instance can be used
to call the kernel multiple times with different arguments
(e.g., different matrices).
Reusing the handle may be more efficient than just
creating a new handle for each kernel invocation.
*Parameterized on the kernel type* means that
the `GemmUniversalAdapter` class' behavior
depends on the GEMM kernel type (see the next section).
Specifically, `GemmUniversalAdapter` has a template parameter
`GemmKernel`, which is the GEMM kernel type.
Valid template arguments for `GemmKernel` are
* `cutlass::gemm::kernel::GemmUniversal`,
implementing CUTLASS 3.x API kernels;
* `cutlass::gemm::kernel::GemmUniversal`,
implementing CUTLASS 2.x API kernels; or
* Any valid CUTLASS 2.x `kernel` layer GEMM that
was previously composable with the `device::GemmUniversalAdapter`.
`GemmUniversalAdapter` presents a single
host-side interface to both 3.0 and 2.x kernels.
CUTLASS accomplishes this by
specializing `GemmUniversalAdapter`'s implementation
on either the 2.x API implementing kernel layer GEMMs, or on the 3.x API
implementing kernel layer GEMMs. The metafunction [`cutlass::gemm::detail::IsCutlass3GemmKernel`](cutlass_3x_backwards_compatibility.md#kernel-api-design-differences)
is what `GemmUniversalAdapter` uses to distinguish between 2.x and 3.x kernels.
`GemmUniversalAdapter` sets up and launches the kernel, using the
CUDA extended launch API for threadblock cluster support if required.
Note, `GemmUniversalAdapter` does *not* specify the grid shape.
The kernel controls the grid shape
and other kernel-specific launch parameters.
This makes it possible for all 3.0 kernels
to use the same kernel launch code,
thus factoring out kernel launch from the actual kernel.
## Tiled MMA and Copy
The Tiled MMA or Copy are tilings of MMA atoms resp. Copy atoms
across threads and data, with possible permutations applied to the
resulting tiling. This layer is most analogous to the warp level
tiling of MMA instructions in CUTLASS 2.x. However, it views the tiling
from the perspective of all threads participating in the operation
and generalizes the concept to copy operations as well. The purpose
of this layer is to build composable GPU micro-kernels out of a plethora
of hardware accelerated math and data movement operations, each with their
unit layouts in threads and data. The tiled MMA and Copy types present
all these various hardware accelerated CuTe Atoms with a single, consistent
API.
The resulting tiled operation acts as a single MMA or copy operation
that users can invoke in the "inner" loop
of the three-nested-loops pseudocode
at the top of this document using `cute::gemm()` or `cute::copy()`.
We call this API "tiled" because it constructs
larger operations out of the Atoms provided by CuTe,
as if fitting together individual tiles
to build a reusable component of a mosaic.
For example, CuTe might provide an MMA Atom
that users can call on a single warp,
for fixed M, N, and K dimensions.
CUTLASS can then use CuTe operations like `make_tiled_mma`
to turn this Atom into an operation
that works on an entire thread block,
for larger M, N, and K dimensions.
## Atom API
An "Atom" is the smallest collection of threads and data
that must participate in the execution of a hardware-accelerated
math or copy operation.
An Atom is "atomic" (indivisible) not in the sense of
concurrent memory operations like `atomicAdd`
(which are "indivisible in time (causality)"),
but in the sense of indivisibility in "space" --
the number of values and the groups of parallel workers
that must participate in the operation together.
An Atom uses CuTe Layouts to express the required
dimensions and strides of its input and output arrays.
Generally these are fixed at compile time.
The Atom API wraps calls to actual hardware instructions
that accelerate MMA or copy operations.
Users can ask for GPU architecture-specific implementations,
or just pick generic implementations and rely on
whatever GPU architectures were enabled.
For more information about Atoms,
please refer to CuTe's tutorial, e.g., the sections on
* [algorithms](./cute/04_algorithms.md) like `gemm` and `copy`,
* [MMA Atoms](./cute/0t_mma_atom.md#cute-mma-atoms), and
* [a GEMM example](./cute/0x_gemm_tutorial.md).
# Copyright
Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
```
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
```
| media/docs/gemm_api_3x.md/0 | {
"file_path": "media/docs/gemm_api_3x.md",
"repo_id": "media",
"token_count": 8924
} | 42 |
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
from math import prod
from typing import Union
from cuda import cuda, cudart
import numpy as np
import cutlass
from cutlass.backend.frontend import CupyFrontend, NumpyFrontend, TorchFrontend
from cutlass.backend.memory_manager import DevicePtrWrapper
from cutlass.utils.datatypes import is_cupy_tensor, is_numpy_tensor, is_torch_tensor
class ArgumentBase:
"""
Base class for operation arguments
"""
def __init__(
self,
A: "Union[cuda.CUdeviceptr, np.ndarray, torch.Tensor, cp.ndarray]",
B: "Union[cuda.CUdeviceptr, np.ndarray, torch.Tensor, cp.ndarray]",
C: "Union[cuda.CUdeviceptr, np.ndarray, torch.Tensor, cp.ndarray]",
D: "Union[cuda.CUdeviceptr, np.ndarray, torch.Tensor, cp.ndarray]",
**kwargs,
) -> None:
# tensor_C can be interpreted as the bias with bias=True in keyword args
self.bias = kwargs.get("bias", False)
self.stream = kwargs.get("stream", cuda.CUstream(0))
# RMM buffers used to track tensor lifetime
self.buffers = {}
# Host tensor to copy the computed result back
self.host_tensors = {}
self.ptr_A = self.tensor_to_ptr(A, "A")
self.ptr_B = self.tensor_to_ptr(B, "B")
self.ptr_C = self.tensor_to_ptr(C, "C")
self.ptr_D = self.tensor_to_ptr(D, "D", is_output=True)
if C is not None:
if not isinstance(C, cuda.CUdeviceptr):
self.tensor_c_numel = prod(C.shape)
def tensor_to_ptr(self, tensor, name, is_output=False):
"""
Convert and remember the input tensor to cuda.CUdeviceptr used by cuda python
For numpy.ndarray, it also remembers the host buffer for synchronization
"""
if tensor is None:
return cuda.CUdeviceptr(0)
if is_numpy_tensor(tensor):
if is_output:
assert name
self.buffers[name] = NumpyFrontend.argument(tensor, is_output)
if is_output:
self.host_tensors[name] = tensor
return self.buffers[name].ptr
elif is_torch_tensor(tensor):
return TorchFrontend.argument(tensor)
elif isinstance(tensor, cuda.CUdeviceptr):
return tensor
elif is_cupy_tensor(tensor):
return CupyFrontend.argument(tensor)
else:
raise TypeError("Unsupported Frontend. Only support numpy and torch")
def sync(self, stream_sync=True):
if stream_sync:
(err,) = cudart.cudaDeviceSynchronize()
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("CUDA Error %s" % str(err))
for key in self.host_tensors.keys():
host_tensor = self.host_tensors[key]
(err,) = cuda.cuMemcpyDtoH(
host_tensor,
self.buffers[key].ptr,
host_tensor.size * host_tensor.itemsize,
)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("CUDA Error %s" % str(err))
self.free()
def free(self):
"""
Frees allocated device-side memory
"""
# Free any device memory allocated manually
if not cutlass.use_rmm:
for name, buf in self.buffers.items():
if isinstance(buf, DevicePtrWrapper):
err, = cudart.cudaFree(buf.ptr)
if err != cudart.cudaError_t.cudaSuccess:
raise RuntimeError(f"cudaFree failed with error {err}")
if hasattr(self, "workspace_buffer") and isinstance(self.workspace_buffer, DevicePtrWrapper):
err, = cudart.cudaFree(self.workspace_buffer.ptr)
if err != cudart.cudaError_t.cudaSuccess:
raise RuntimeError(f"cudaFree failed with error {err}")
del self.workspace_buffer
| python/cutlass/backend/arguments.py/0 | {
"file_path": "python/cutlass/backend/arguments.py",
"repo_id": "python",
"token_count": 2310
} | 43 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Pass manager for DAG IR.
"""
from typing import Any
import networkx as nx
from cutlass.backend.evt.ir import DAGIR
from cutlass.backend.evt.passes.util import cc_map
class EVTPassBase:
"""
Base class for EVT Passes
"""
dependencies = []
def __init__(self, dag_ir: DAGIR) -> None:
self.dag_ir = dag_ir
self.cc = self.dag_ir.cc
def requires(self) -> None:
"""
This function will be called before the pass is run.
"""
pass
def call(self) -> None:
"""
The pass that is run through the self.dag_ir
"""
raise NotImplementedError(
f"__call__ is not overwritten in Pass {self.__class__.__name__}")
def ensures(self) -> None:
"""
This function will be called after the pass is run.
"""
pass
def __call__(self) -> Any:
self.requires()
self.call()
self.ensures()
def cc_specific_method(self, func):
"""
This enables defining function that behaves differently under different cc
The simplest example of using this function is the following
.. highlight:: python
.. code-block:: python
class ExamplePass(EVTPassBase):
def call(sekf):
# This automatically select the smXX_func based on current cc
self.cc_specific_method(self.func)()
# Interface func, can be empty
def func(self):
pass
# Sm90 specific func
def sm90_func(self):
// sm90 specific method
return
# Sm80 specific func
def sm80_func(self):
// sm80 specific method
return
"""
func_name = f"sm{cc_map[self.cc]}_{func.__name__}"
if hasattr(self, func_name):
return getattr(self, func_name)
else:
raise NotImplementedError(f"func {func.__name__} is not overwritten for Sm{self.cc}")
class EVTPassManager(nx.DiGraph):
"""
Topological-based Pass Manager.
Each registered pass has a list of dependencies. The pass manager organizes
the passes as a DAG and launch the compiler passes under topological order.
"""
def __init__(self, dag_ir: DAGIR, pass_list):
super().__init__()
self.dag_ir = dag_ir
for pass_cls in pass_list:
self.add_pass(pass_cls)
self.sorted_passes = self.schedule()
def get_callable(self, pass_name):
"""
Return the callable of the pass
"""
return self.nodes[pass_name]["callable"]
def add_pass(self, pass_cls):
"""
Add a pass to the pass manager
:param pass_cls: the class of pass
:type pass_cls: derived class of EVTPassBase
"""
name = pass_cls.__name__
pass_callable = pass_cls(self.dag_ir)
self.add_node(name, callable=pass_callable)
def schedule(self):
"""
Schedule the added passes under topological order
"""
# Add edges
for pass_name in self.nodes:
callable = self.get_callable(pass_name)
for dependency_cls in callable.dependencies:
self.add_edge(
dependency_cls.__name__,
type(callable).__name__)
# Topological sort
return list(nx.topological_sort(self))
def __call__(self) -> Any:
"""
Launch the registered passes
"""
for pass_name in self.sorted_passes:
callable = self.get_callable(pass_name)
callable()
| python/cutlass/backend/evt/passes/pass_manager.py/0 | {
"file_path": "python/cutlass/backend/evt/passes/pass_manager.py",
"repo_id": "python",
"token_count": 2156
} | 44 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Common utilities for emitting CUTLASS kernels
"""
import cutlass
# Strings used for printing information about the generation of emitted scripts
_AUTOGEN_STR = f"This file was automatically generated by the CUTLASS {cutlass.__version__} Python interface (https://github.com/nvidia/cutlass/python)"
_CSTYLE_AUTOGEN_COMMENT = f"""// {_AUTOGEN_STR}
"""
_PYSTYLE_AUTOGEN_COMMENT = f"""# {_AUTOGEN_STR}
"""
_CUTLASS_KERNEL_ARGS_2x = """
typename DeviceKernel::Arguments arguments {
cutlass::gemm::GemmUniversalMode::kGemm,
{M, N, K}, // problem size
1,
{alpha, beta},
A, B, C, D,
0, 0, 0, 0, // batch strides
DeviceKernel::LayoutA::packed({M, K}).stride(0), // lda
DeviceKernel::LayoutB::packed({K, N}).stride(0), // ldb
DeviceKernel::LayoutC::packed({M, N}).stride(0), // ldc
DeviceKernel::LayoutC::packed({M, N}).stride(0) // ldd
};
"""
_CUTLASS_KERNEL_ARGS_2x_STREAM_K = """
typename DeviceKernel::Arguments arguments {
cutlass::gemm::GemmUniversalMode::kGemm,
{M, N, K}, // problem size
1,
{alpha, beta},
A, B, C, D,
0, 0, 0, 0, // batch strides
DeviceKernel::LayoutA::packed({M, K}).stride(0), // lda
DeviceKernel::LayoutB::packed({K, N}).stride(0), // ldb
DeviceKernel::LayoutC::packed({M, N}).stride(0), // ldc
DeviceKernel::LayoutC::packed({M, N}).stride(0), // ldd
-1 // avail_sms
};
"""
_CUTLASS_KERNEL_RUN_GEMM_2x = """
using ElementCompute = typename DeviceKernel::EpilogueOutputOp::ElementCompute;
cutlass::Status ${name}_kernel_run(int M, int N, int K,
const DeviceKernel::ElementA* A, const DeviceKernel::ElementB* B, const DeviceKernel::ElementC* C, DeviceKernel::ElementC* D,
ElementCompute alpha, ElementCompute beta) {
${args}
size_t workspace_size = DeviceKernel::get_workspace_size(arguments);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
DeviceKernel gemm_op;
cutlass::Status status = gemm_op.initialize(arguments,
workspace.get(),
nullptr); // CUDA stream
if (status != cutlass::Status::kSuccess) {
return status;
}
status = gemm_op();
return status;
}
"""
_CUTLASS_KERNEL_RUN_GEMM_3x = """
using StrideA = typename DeviceKernel::GemmKernel::StrideA;
using StrideB = typename DeviceKernel::GemmKernel::StrideB;
using StrideC = typename DeviceKernel::GemmKernel::StrideC;
using StrideD = typename DeviceKernel::GemmKernel::StrideD;
using ElementCompute = typename DeviceKernel::EpilogueOutputOp::ElementCompute;
cutlass::Status ${name}_kernel_run(
int M, int N, int K, int L,
const DeviceKernel::ElementA* A, const DeviceKernel::ElementB* B, const DeviceKernel::ElementC* C, DeviceKernel::ElementC* D,
ElementCompute alpha, ElementCompute beta, const cutlass::KernelHardwareInfo& hw_info) {
typename DeviceKernel::Arguments arguments{
cutlass::gemm::GemmUniversalMode::kGemm,
{M, N, K, L}, // problem size
{
A, // ptrA
cutlass::make_cute_packed_stride(StrideA{}, cute::make_shape(M, K, L)), // stride A
B, // ptrB
cutlass::make_cute_packed_stride(StrideB{}, cute::make_shape(N, K, L)), // stride B
},
{
{alpha, beta},
C, // ptrC
cutlass::make_cute_packed_stride(StrideC{}, cute::make_shape(M, N, L)), // stride C
D, // ptrD
cutlass::make_cute_packed_stride(StrideD{}, cute::make_shape(M, N, L)), // stride D
},
hw_info
};
size_t workspace_size = DeviceKernel::get_workspace_size(arguments);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
DeviceKernel gemm_op;
cutlass::Status status = gemm_op.run(arguments,
workspace.get(),
nullptr); // CUDA stream
return status;
}
"""
_CUTLASS_KERNEL_RUN_GROUPED_GEMM_2x = """
using ElementCompute = typename DeviceKernel::EpilogueOutputOp::ElementCompute;
int threadblock_count = DeviceKernel::sufficient();
cutlass::Status ${name}_kernel_run(int problem_count, cutlass::gemm::GemmCoord* problem_sizes,
DeviceKernel::ElementA** A, DeviceKernel::ElementB** B, DeviceKernel::ElementC** C, DeviceKernel::ElementC** D,
int64_t* lda, int64_t* ldb, int64_t* ldc, int64_t* ldd,
ElementCompute alpha, ElementCompute beta) {
typename DeviceKernel::Arguments arguments {
problem_sizes,
problem_count,
threadblock_count,
{alpha, beta},
A, B, C, D,
lda, ldb, ldc, ldd
};
size_t workspace_size = DeviceKernel::get_workspace_size(arguments);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
DeviceKernel gemm_op;
cutlass::Status status = gemm_op.initialize(arguments,
workspace.get(),
nullptr); // CUDA stream
if (status != cutlass::Status::kSuccess) {
return status;
}
status = gemm_op();
return status;
}
"""
_CUTLASS_KERNEL_RUN_CONV2D_2x = """
using UnderlyingKernel = typename DeviceKernel::UnderlyingKernel;
namespace {
using TensorRefA = typename UnderlyingKernel::TensorRefA;
using TensorRefB = typename UnderlyingKernel::TensorRefB;
using TensorRefC = typename UnderlyingKernel::TensorRefC;
using ElementCompute = typename UnderlyingKernel::EpilogueOutputOp::ElementCompute;
}
template<typename TensorRef, typename Element>
TensorRef get_tensor_ref(cutlass::Tensor4DCoord tensor_coord, Element* ptr){
cutlass::layout::TensorNHWC layout = cutlass::layout::TensorNHWC::packed(tensor_coord);
TensorRef tensor_ref(ptr, layout);
return tensor_ref;
}
cutlass::Status ${name}_kernel_run(cutlass::conv::Conv2dProblemSize* problem_size,
UnderlyingKernel::ElementA* A, UnderlyingKernel::ElementB* B,
UnderlyingKernel::ElementC* C, UnderlyingKernel::ElementC* D,
ElementCompute alpha, ElementCompute beta, std::string split_k_mode,
cudaStream_t stream, int device_id=0) {
// create the tensor references
cutlass::Tensor4DCoord tensor_coord_A = cutlass::conv::implicit_gemm_tensor_a_extent(
cutlass::conv::Operator::k${conv_kind_name}, *problem_size
);
cutlass::Tensor4DCoord tensor_coord_B = cutlass::conv::implicit_gemm_tensor_b_extent(
cutlass::conv::Operator::k${conv_kind_name}, *problem_size
);
cutlass::Tensor4DCoord tensor_coord_C = cutlass::conv::implicit_gemm_tensor_c_extent(
cutlass::conv::Operator::k${conv_kind_name}, *problem_size
);
TensorRefA tensor_ref_A = get_tensor_ref<TensorRefA, UnderlyingKernel::ElementA>(tensor_coord_A, A);
TensorRefB tensor_ref_B = get_tensor_ref<TensorRefB, UnderlyingKernel::ElementB>(tensor_coord_B, B);
TensorRefC tensor_ref_C = get_tensor_ref<TensorRefC, UnderlyingKernel::ElementC>(tensor_coord_C, C);
TensorRefC tensor_ref_D = get_tensor_ref<TensorRefC, UnderlyingKernel::ElementC>(tensor_coord_C, D);
cutlass::conv::SplitKMode mode;
if (split_k_mode == "serial") {
mode = cutlass::conv::SplitKMode::kSerial;
} else if (split_k_mode == "parallel") {
mode = cutlass::conv::SplitKMode::kParallel;
} else {
throw std::runtime_error("Invalid split_k_mode: " + split_k_mode);
}
typename DeviceKernel::Arguments arguments{
*problem_size,
tensor_ref_A,
tensor_ref_B,
tensor_ref_C,
tensor_ref_D,
{alpha, beta},
mode
};
DeviceKernel implicit_gemm_op;
size_t workspace_size = implicit_gemm_op.get_workspace_size(arguments);
void* workspace_ptr = device_memory_allocation(workspace_size, device_id);
cutlass::Status status = implicit_gemm_op.can_implement(arguments);
if (status != cutlass::Status::kSuccess) {
return status;
}
status = implicit_gemm_op.initialize(arguments, workspace_ptr, stream);
if (status != cutlass::Status::kSuccess) {
return status;
}
//
// Launch initialized CUTLASS kernel
//
status = implicit_gemm_op(stream);
return status;
}
"""
| python/cutlass/emit/common.py/0 | {
"file_path": "python/cutlass/emit/common.py",
"repo_id": "python",
"token_count": 4533
} | 45 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Profiler based on the cuda events
"""
import re
import subprocess
from cuda import cuda, cudart
import numpy as np
from cutlass import CUTLASS_PATH
from cutlass.backend.library import DataTypeSize
from cutlass.op.op import OperationBase
from cutlass.shape import GemmCoord
from cutlass.utils.datatypes import is_numpy_tensor
class GpuTimer:
def __init__(self) -> None:
self.events = [
cuda.cuEventCreate(cuda.CUevent_flags.CU_EVENT_DEFAULT)[1],
cuda.cuEventCreate(cuda.CUevent_flags.CU_EVENT_DEFAULT)[1],
]
def start(self, stream=cuda.CUstream(0)):
(err,) = cuda.cuEventRecord(self.events[0], stream)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError(f"CUDA Error {str(err)}")
def stop(self, stream=cuda.CUstream(0)):
(err,) = cuda.cuEventRecord(self.events[1], stream)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError(f"CUDA Error {str(err)}")
pass
def stop_and_wait(self, stream=cuda.CUstream(0)):
self.stop(stream)
if stream:
(err,) = cuda.cuStreamSynchronize(stream)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError(f"CUDA Error {str(err)}")
else:
(err,) = cudart.cudaDeviceSynchronize()
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError(f"CUDA Error {str(err)}")
def duration(self, iterations=1):
err, duration = cuda.cuEventElapsedTime(self.events[0], self.events[1])
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError(f"CUDA Error {str(err)}")
return duration / float(iterations)
class CUDAEventProfiler:
def __init__(self, op: OperationBase, warmup_iterations: int=500, iterations: int=500, *args, **kwargs) -> None:
self.arguments = op.run(*args, **kwargs)
self.operation = op.operation
self.warmup_iterations = warmup_iterations
self.iterations = iterations
self.timer = GpuTimer()
#
# Cutlass Python Interface Profiler
#
def __call__(self):
for _ in range(self.warmup_iterations):
self.operation.run(self.arguments)
self.timer.start()
for _ in range(self.iterations):
self.operation.run(self.arguments)
self.timer.stop_and_wait()
runtime = self.timer.duration(self.iterations)
return runtime
#
# CUTLASS Profiler
#
def run_cutlass_profiler(self):
alpha = 1.0
beta = 1.0
profiler_path = CUTLASS_PATH + "/build/tools/profiler/cutlass_profiler"
kernel_name = self.operation.procedural_name()
verification_providers = "device"
provider = "cutlass"
problem_size = self.arguments.problem_size
if "cutlass3x" in kernel_name:
# cutlass3x generator only have column-major output
layout_name = self.operation.layout_name_3x()
if layout_name[-1] == "t":
new_layout_name = "".join(["n" for l in layout_name if l == "t" or "t"])
problem_size = GemmCoord(problem_size.n, problem_size.m, problem_size.k)
kernel_name = kernel_name.replace(layout_name, new_layout_name)
batch_count = self.arguments.batch_count
cmd = f"{profiler_path} --kernels={kernel_name} --verification-providers={verification_providers} " \
f"--providers={provider} --m={problem_size.m()} --n={problem_size.n()} --k={problem_size.k()} " \
f"--batch_count={batch_count} --alpha={alpha} --beta={beta} "\
f"--warmup-iterations={self.warmup_iterations} --profiling-iterations={self.iterations}"
result = subprocess.getoutput(cmd)
m = re.search(r"Runtime:\s+(?P<runtime>\d+.\d+)", result)
runtime = float(m.group("runtime"))
m = re.search(r"Bytes:\s+(?P<bytes>\d+)", result)
bytes = int(m.group("bytes"))
m = re.search(r"FLOPs:\s+(?P<flops>\d+)", result)
flops = int(m.group("flops"))
# check if the problem size matches
assert bytes == self.bytes(problem_size, batch_count, beta)
assert flops == self.flops(problem_size, batch_count, beta)
return runtime
def bytes(self, problem_size, batch_count=1, beta=0.0):
m = problem_size.m()
n = problem_size.n()
k = problem_size.k()
bytes = (
(DataTypeSize[self.operation.A.element] * m // 8) * k
+ (DataTypeSize[self.operation.B.element] * n // 8) * k
+ (DataTypeSize[self.operation.C.element] * m // 8) * n
)
if beta != 0:
bytes += (DataTypeSize[self.operation.C.element] * m // 8) * n
bytes *= batch_count
return bytes
def flops(self, problem_size, batch_count=1, beta=0.0):
m = problem_size.m()
n = problem_size.n()
k = problem_size.k()
flops_ = (m * n * k) * 2 * batch_count
if beta != 0:
flops_ += m * n * batch_count * 2
return flops_
| python/cutlass/utils/profiler.py/0 | {
"file_path": "python/cutlass/utils/profiler.py",
"repo_id": "python",
"token_count": 2791
} | 46 |
/*
* language_data.js
* ~~~~~~~~~~~~~~~~
*
* This script contains the language-specific data used by searchtools.js,
* namely the list of stopwords, stemmer, scorer and splitter.
*
* :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details.
*
*/
var stopwords = ["a", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "near", "no", "not", "of", "on", "or", "such", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"];
/* Non-minified version is copied as a separate JS file, is available */
/**
* Porter Stemmer
*/
var Stemmer = function() {
var step2list = {
ational: 'ate',
tional: 'tion',
enci: 'ence',
anci: 'ance',
izer: 'ize',
bli: 'ble',
alli: 'al',
entli: 'ent',
eli: 'e',
ousli: 'ous',
ization: 'ize',
ation: 'ate',
ator: 'ate',
alism: 'al',
iveness: 'ive',
fulness: 'ful',
ousness: 'ous',
aliti: 'al',
iviti: 'ive',
biliti: 'ble',
logi: 'log'
};
var step3list = {
icate: 'ic',
ative: '',
alize: 'al',
iciti: 'ic',
ical: 'ic',
ful: '',
ness: ''
};
var c = "[^aeiou]"; // consonant
var v = "[aeiouy]"; // vowel
var C = c + "[^aeiouy]*"; // consonant sequence
var V = v + "[aeiou]*"; // vowel sequence
var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0
var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1
var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1
var s_v = "^(" + C + ")?" + v; // vowel in stem
this.stemWord = function (w) {
var stem;
var suffix;
var firstch;
var origword = w;
if (w.length < 3)
return w;
var re;
var re2;
var re3;
var re4;
firstch = w.substr(0,1);
if (firstch == "y")
w = firstch.toUpperCase() + w.substr(1);
// Step 1a
re = /^(.+?)(ss|i)es$/;
re2 = /^(.+?)([^s])s$/;
if (re.test(w))
w = w.replace(re,"$1$2");
else if (re2.test(w))
w = w.replace(re2,"$1$2");
// Step 1b
re = /^(.+?)eed$/;
re2 = /^(.+?)(ed|ing)$/;
if (re.test(w)) {
var fp = re.exec(w);
re = new RegExp(mgr0);
if (re.test(fp[1])) {
re = /.$/;
w = w.replace(re,"");
}
}
else if (re2.test(w)) {
var fp = re2.exec(w);
stem = fp[1];
re2 = new RegExp(s_v);
if (re2.test(stem)) {
w = stem;
re2 = /(at|bl|iz)$/;
re3 = new RegExp("([^aeiouylsz])\\1$");
re4 = new RegExp("^" + C + v + "[^aeiouwxy]$");
if (re2.test(w))
w = w + "e";
else if (re3.test(w)) {
re = /.$/;
w = w.replace(re,"");
}
else if (re4.test(w))
w = w + "e";
}
}
// Step 1c
re = /^(.+?)y$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
re = new RegExp(s_v);
if (re.test(stem))
w = stem + "i";
}
// Step 2
re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
suffix = fp[2];
re = new RegExp(mgr0);
if (re.test(stem))
w = stem + step2list[suffix];
}
// Step 3
re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
suffix = fp[2];
re = new RegExp(mgr0);
if (re.test(stem))
w = stem + step3list[suffix];
}
// Step 4
re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/;
re2 = /^(.+?)(s|t)(ion)$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
re = new RegExp(mgr1);
if (re.test(stem))
w = stem;
}
else if (re2.test(w)) {
var fp = re2.exec(w);
stem = fp[1] + fp[2];
re2 = new RegExp(mgr1);
if (re2.test(stem))
w = stem;
}
// Step 5
re = /^(.+?)e$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
re = new RegExp(mgr1);
re2 = new RegExp(meq1);
re3 = new RegExp("^" + C + v + "[^aeiouwxy]$");
if (re.test(stem) || (re2.test(stem) && !(re3.test(stem))))
w = stem;
}
re = /ll$/;
re2 = new RegExp(mgr1);
if (re.test(w) && re2.test(w)) {
re = /.$/;
w = w.replace(re,"");
}
// and turn initial Y back to y
if (firstch == "y")
w = firstch.toLowerCase() + w.substr(1);
return w;
}
}
| python/docs/_static/language_data.js/0 | {
"file_path": "python/docs/_static/language_data.js",
"repo_id": "python",
"token_count": 2592
} | 47 |
/* Highlighting utilities for Sphinx HTML documentation. */
"use strict";
const SPHINX_HIGHLIGHT_ENABLED = true
/**
* highlight a given string on a node by wrapping it in
* span elements with the given class name.
*/
const _highlight = (node, addItems, text, className) => {
if (node.nodeType === Node.TEXT_NODE) {
const val = node.nodeValue;
const parent = node.parentNode;
const pos = val.toLowerCase().indexOf(text);
if (
pos >= 0 &&
!parent.classList.contains(className) &&
!parent.classList.contains("nohighlight")
) {
let span;
const closestNode = parent.closest("body, svg, foreignObject");
const isInSVG = closestNode && closestNode.matches("svg");
if (isInSVG) {
span = document.createElementNS("http://www.w3.org/2000/svg", "tspan");
} else {
span = document.createElement("span");
span.classList.add(className);
}
span.appendChild(document.createTextNode(val.substr(pos, text.length)));
parent.insertBefore(
span,
parent.insertBefore(
document.createTextNode(val.substr(pos + text.length)),
node.nextSibling
)
);
node.nodeValue = val.substr(0, pos);
if (isInSVG) {
const rect = document.createElementNS(
"http://www.w3.org/2000/svg",
"rect"
);
const bbox = parent.getBBox();
rect.x.baseVal.value = bbox.x;
rect.y.baseVal.value = bbox.y;
rect.width.baseVal.value = bbox.width;
rect.height.baseVal.value = bbox.height;
rect.setAttribute("class", className);
addItems.push({ parent: parent, target: rect });
}
}
} else if (node.matches && !node.matches("button, select, textarea")) {
node.childNodes.forEach((el) => _highlight(el, addItems, text, className));
}
};
const _highlightText = (thisNode, text, className) => {
let addItems = [];
_highlight(thisNode, addItems, text, className);
addItems.forEach((obj) =>
obj.parent.insertAdjacentElement("beforebegin", obj.target)
);
};
/**
* Small JavaScript module for the documentation.
*/
const SphinxHighlight = {
/**
* highlight the search words provided in localstorage in the text
*/
highlightSearchWords: () => {
if (!SPHINX_HIGHLIGHT_ENABLED) return; // bail if no highlight
// get and clear terms from localstorage
const url = new URL(window.location);
const highlight =
localStorage.getItem("sphinx_highlight_terms")
|| url.searchParams.get("highlight")
|| "";
localStorage.removeItem("sphinx_highlight_terms")
url.searchParams.delete("highlight");
window.history.replaceState({}, "", url);
// get individual terms from highlight string
const terms = highlight.toLowerCase().split(/\s+/).filter(x => x);
if (terms.length === 0) return; // nothing to do
// There should never be more than one element matching "div.body"
const divBody = document.querySelectorAll("div.body");
const body = divBody.length ? divBody[0] : document.querySelector("body");
window.setTimeout(() => {
terms.forEach((term) => _highlightText(body, term, "highlighted"));
}, 10);
const searchBox = document.getElementById("searchbox");
if (searchBox === null) return;
searchBox.appendChild(
document
.createRange()
.createContextualFragment(
'<p class="highlight-link">' +
'<a href="javascript:SphinxHighlight.hideSearchWords()">' +
_("Hide Search Matches") +
"</a></p>"
)
);
},
/**
* helper function to hide the search marks again
*/
hideSearchWords: () => {
document
.querySelectorAll("#searchbox .highlight-link")
.forEach((el) => el.remove());
document
.querySelectorAll("span.highlighted")
.forEach((el) => el.classList.remove("highlighted"));
localStorage.removeItem("sphinx_highlight_terms")
},
initEscapeListener: () => {
// only install a listener if it is really needed
if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) return;
document.addEventListener("keydown", (event) => {
// bail for input elements
if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return;
// bail with special keys
if (event.shiftKey || event.altKey || event.ctrlKey || event.metaKey) return;
if (DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS && (event.key === "Escape")) {
SphinxHighlight.hideSearchWords();
event.preventDefault();
}
});
},
};
_ready(SphinxHighlight.highlightSearchWords);
_ready(SphinxHighlight.initEscapeListener);
| python/docs/_static/sphinx_highlight.js/0 | {
"file_path": "python/docs/_static/sphinx_highlight.js",
"repo_id": "python",
"token_count": 1859
} | 48 |
<jupyter_start><jupyter_text>Example of using elementwise activation functions in the CUTLASS Python interfaceThis notebook walks through a basic example of using the CUTLASS Python interface to declare, compile, and run GEMMs with different epilogues.[](https://colab.research.google.com/github/NVIDIA/cutlass/tree/master/examples/00_basic_gemm.ipynb) We first import various packages needed for the example and construct the input and output tensors that will be used in our example.<jupyter_code>import numpy as np
import cutlass
# This controls whether ther C++ GEMM declaration will be printed at each step. Set to `false` to
# omit this information.
print_module = True
m = 256
n = m
k = m
type_A = np.float16
type_B = np.float16
type_C = np.float16
type_D = np.float16
np.random.seed(1234)
scope_min = -4
scope_max = 4
tensor_A = np.ceil(np.random.uniform(low=scope_min, high=scope_max, size=(m, k)).astype(type_A))
tensor_B = np.ceil(np.random.uniform(low=scope_min, high=scope_max, size=(k, n)).astype(type_B))
tensor_C = np.ceil(np.random.uniform(low=scope_min, high=scope_max, size=(m, n)).astype(type_C))
alpha = np.float16(1.)
beta = np.float16(0.)
tensor_D = np.zeros(tensor_C.shape).astype(type_D)<jupyter_output>/usr/local/lib/python3.8/dist-packages/tqdm/auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html
from .autonotebook import tqdm as notebook_tqdm<jupyter_text>Run a GEMM with an identity activation functionTo begin, we simply run a default GEMM with an identity activation function. This performs the well-known operation `D = alpha * (A @ B) + beta * C`. This is the default activation function used, and does not need to be specified.<jupyter_code>plan = cutlass.op.Gemm(element=np.float16, layout=cutlass.LayoutType.RowMajor)
plan.run(tensor_A, tensor_B, tensor_C, tensor_D, print_module=print_module)<jupyter_output>// Gemm operator cutlass_sm80_tensorop_h16x8x16gemm_1x1x1_256x128_64x3_tt_align8
using cutlass_sm80_tensorop_h16x8x16gemm_1x1x1_256x128_64x3_tt_align8_base =
typename cutlass::gemm::kernel::DefaultGemmUniversal<
cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8,
cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8,
cutlass::half_t, cutlass::layout::RowMajor,
cutlass::half_t,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<256, 128, 64>,
cutlass::gemm::GemmShape<64, 64, 64>,
cutlass::gemm::GemmShape<16, 8, 16>,
cutlass::epilogue::thread::LinearCombination<cutlass::half_t, 8, cutlass::half_t, cutlass::half_t>,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<1>,
3,
cutlass::arch::OpMultiplyAdd
>::GemmKernel;
// Define named type
struct cutlass_sm80_tensorop_h16x8x16gemm_1x1x1_256x128_64x3_tt_align8_type :
public cutlass_sm80_tensorop_[...]<jupyter_text>Run a GEMM with a ReLU element-wise activation functionCUTLASS makes it easy to support other element-wise activation functions. This results in performing an element-wise after the generic linear combination performed in a GEMM. If we call such an activation function `act`, the resulting formulation is:```D = alpha * (A @ B) + beta * CD = act(D)```Here, we will add a ReLU activation function. Given an input `x`, ReLU returns `max(x, 0)`.This is easy to do in CUTLASS. One only needs to set the plan's `activation` field.<jupyter_code>tensor_D_relu = np.zeros(tensor_C.shape).astype(type_D)
plan.activation = cutlass.epilogue.relu
plan.run(tensor_A, tensor_B, tensor_C, tensor_D_relu, print_module=print_module)<jupyter_output>// Gemm operator cutlass_sm80_tensorop_h16x8x16gemm_1x1x1_256x128_64x3_tt_align8
using cutlass_sm80_tensorop_h16x8x16gemm_1x1x1_256x128_64x3_tt_align8_base =
typename cutlass::gemm::kernel::DefaultGemmUniversal<
cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8,
cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8,
cutlass::half_t, cutlass::layout::RowMajor,
cutlass::half_t,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<256, 128, 64>,
cutlass::gemm::GemmShape<64, 64, 64>,
cutlass::gemm::GemmShape<16, 8, 16>,
cutlass::epilogue::thread::LinearCombinationGeneric<cutlass::epilogue::thread::ReLu, cutlass::half_t, 8, cutlass::half_t, cutlass::half_t>,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<1>,
3,
cutlass::arch::OpMultiplyAdd
>::GemmKernel;
// Define named type
struct cutlass_sm80_tensorop_h16x8x16gemm_1x1x1_256x128_64x3_tt_align8[...]<jupyter_text>We can now verify that the result of the GEMM that used a ReLU activation function:<jupyter_code>relu_ref = (tensor_D >= 0).astype(type_D) * tensor_D
np.testing.assert_array_equal(relu_ref, tensor_D_relu)<jupyter_output><empty_output><jupyter_text>Other element-wise activation functionsCUTLASS supports a variety of widely-used element-wise activation functions. We can obtain a list of these functions via the `get_activations()` method.<jupyter_code>activations = plan.activations()
for activation in activations:
print(activation)<jupyter_output><class 'cutlass.backend.epilogue.gelu'>
<class 'cutlass.backend.epilogue.hardswish'>
<class 'cutlass.backend.epilogue.identity'>
<class 'cutlass.backend.epilogue.leaky_relu'>
<class 'cutlass.backend.epilogue.relu'>
<class 'cutlass.backend.epilogue.sigmoid'>
<class 'cutlass.backend.epilogue.silu'>
<class 'cutlass.backend.epilogue.tanh'><jupyter_text>We can then run each of them:<jupyter_code>for activation in activations:
print('=============================================================================================')
print(f'Compiling and running activation {activation}')
print('=============================================================================================')
plan.activation = activation
plan.run(tensor_A, tensor_B, tensor_C, tensor_D, print_module=print_module)<jupyter_output>=============================================================================================
Compiling and running activation <class 'cutlass.backend.epilogue.gelu'>
=============================================================================================
// Gemm operator cutlass_sm80_tensorop_h16x8x16gemm_1x1x1_256x128_64x3_tt_align8
using cutlass_sm80_tensorop_h16x8x16gemm_1x1x1_256x128_64x3_tt_align8_base =
typename cutlass::gemm::kernel::DefaultGemmUniversal<
cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8,
cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8,
cutlass::half_t, cutlass::layout::RowMajor,
cutlass::half_t,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<256, 128, 64>,
cutlass::gemm::GemmShape<64, 64, 64>,
cutlass::gemm::GemmShape<16, 8, 16>,
cutlass::epilogue::thread::LinearCombinationGeneric<cutlass::epilogue::thread::GELU, cutlass:[...] | python/docs/externals/01_epilogue.ipynb/0 | {
"file_path": "python/docs/externals/01_epilogue.ipynb",
"repo_id": "python",
"token_count": 2598
} | 49 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Definition of CuTe Layouts and functions to manipulate them
"""
from itertools import chain
from typing import Union
from .int_tuple import *
class LayoutBase:
pass
def is_layout(x):
return isinstance(x, LayoutBase)
class Layout(LayoutBase):
def __init__(self, _shape, _stride=None):
self.shape = _shape
if _stride is None:
self.stride = prefix_product(self.shape)
else:
self.stride = _stride
# operator ==
def __eq__(self, other):
return self.shape == other.shape and self.stride == other.stride
# operator len(L) (len [rank] like tuples)
def __len__(self):
if is_tuple(self.shape):
return len(self.shape)
else:
return 1
# operator () (map coord to idx)
def __call__(self, *args):
"""
Map a logical coordinate to a linear index (Coord has no Underscore slice operators)
OR
Slice the layout and return the sublayout (Coord has an Underscore slice op)
Follow the same behavior of `Layout::operator(Coord const&)` in cute C++
"""
if has_none(args):
if len(args) == 1:
return Layout(slice_(args[0], self.shape), slice_(args[0], self.stride))
else:
return Layout(slice_(args, self.shape), slice_(args, self.stride))
else:
if len(args) == 1:
return crd2idx(args[0], self.shape, self.stride)
else:
return crd2idx(args, self.shape, self.stride)
# operator [] (get-i like tuples)
def __getitem__(self, i):
if is_tuple(self.shape):
return Layout(self.shape[i], self.stride[i])
else:
assert i == 0
return Layout(self.shape, self.stride)
# size(layout) Size of the domain
def size(self):
return product(self.shape)
# cosize(layout) Size of the codomain
def cosize(self):
return self(self.size() - 1) + 1
# print and str
def __str__(self):
return f"{self.shape}:{self.stride}"
# error msgs and representation
def __repr__(self):
return f"Layout({self.shape},{self.stride})"
# Make Layout from a list of layouts (each layout it's own mode in the result)
def make_layout(*layouts):
if len(layouts) == 1 and not is_layout(layouts[0]):
layouts = layouts[0]
shape, stride = zip(*((a.shape,a.stride) for a in layouts))
return Layout(shape, stride)
# Size of the domain
def size(layout):
if is_layout(layout):
return layout.size()
return product(layout)
# Size of the codomain
def cosize(layout):
return layout.cosize()
# Layout coalesce -- flatten and combine as many modes as possible while preserving the int-to-int function
def coalesce(layout, profile=None):
if is_tuple(profile):
assert len(layout) >= len(profile)
return make_layout(chain((coalesce(layout[i], profile[i]) for i in range( 0,len(profile))),
(layout[i] for i in range(len(profile),len(layout)))))
result_shape = [1]
result_stride = [0]
for (shape,stride) in zip(flatten(layout.shape),flatten(layout.stride)):
# skip their shape-1s
if shape == 1:
continue
# replace our shape-1 with anything
elif result_shape[-1] == 1:
result_shape[-1] = shape
result_stride[-1] = stride
# merge modes if the shape*stride match
elif result_shape[-1] * result_stride[-1] == stride:
result_shape[-1] = result_shape[-1] * shape
# append a new mode
else:
result_shape.append(shape)
result_stride.append(stride)
if len(result_shape) == 1:
return Layout(result_shape[0], result_stride[0])
else:
return Layout(tuple(result_shape), tuple(result_stride))
# Layout filter -- replace all stride-0 modes with size-1 and then coalesce to remove them
def filter(layout, profile=None):
if is_tuple(profile):
assert len(layout) >= len(profile)
return make_layout(chain((filter(layout[i], profile[i]) for i in range( 0,len(profile))),
(layout[i] for i in range(len(profile),len(layout)))))
result_shape = []
result_stride = []
for (shape,stride) in zip(flatten(layout.shape),flatten(layout.stride)):
# skip their shape-1s and stride-0s
if not (shape == 1 or stride == 0):
result_shape.append(shape)
result_stride.append(stride)
if len(result_shape) == 0:
return Layout(1,0)
else:
return coalesce(Layout(tuple(result_shape), tuple(result_stride)))
# Layout composition
# Use tuples-of-layouts to perform this operation by-mode and None as no-op
def composition(layoutA, layoutB):
if layoutB is None:
return layoutA
elif is_int(layoutB):
return composition(layoutA, Layout(layoutB))
elif is_tuple(layoutB):
assert len(layoutA) >= len(layoutB)
return make_layout(chain((composition(layoutA[i], layoutB[i]) for i in range( 0,len(layoutB))),
(layoutA[i] for i in range(len(layoutB),len(layoutA)))))
elif is_tuple(layoutB.shape):
return make_layout(composition(layoutA, layoutB_i) for layoutB_i in layoutB)
if layoutB.stride == 0:
return Layout(layoutB.shape, 0)
else:
result_shape = []
result_stride = []
rest_shape = layoutB.shape
rest_stride = layoutB.stride
for (s, d) in zip(flatten(layoutA.shape)[:-1], flatten(layoutA.stride)[:-1]):
s1 = shape_div(s, rest_stride)
result_shape.append(min(s1,rest_shape))
result_stride.append(rest_stride * d)
rest_shape = shape_div(rest_shape, abs(s1))
rest_stride = shape_div(rest_stride, s)
result_shape.append(rest_shape)
result_stride.append(rest_stride * flatten(layoutA.stride)[-1])
return coalesce(Layout(tuple(result_shape), tuple(result_stride)))
# Layout complement
def complement(layout, max_idx=1):
if is_int(layout):
return complement(Layout(layout))
result_shape = []
result_stride = []
current_idx = 1
sorted_DS = sorted(zip(flatten(layout.stride), flatten(layout.shape)))
for (stride, shape) in sorted_DS:
if stride == 0 or shape == 1:
continue
in_bound = current_idx <= shape * stride
# To support symbolic value which can't be evaluated now
assert (type(in_bound) is not bool) or in_bound
result_shape.append(stride // current_idx)
result_stride.append(current_idx)
current_idx = shape * stride
result_shape.append((max_idx + current_idx - 1) // current_idx) # ceil_div
result_stride.append(current_idx)
return coalesce(Layout(tuple(result_shape), tuple(result_stride)))
# Layout right inverse
def right_inverse(layout):
if layout is None:
return None
elif is_int(layout):
return Layout(layout)
result_shape = []
result_stride = []
current_idx = 1
flat_shape = flatten(layout.shape)
flat_stride = flatten(layout.stride)
sorted_DSA = sorted(zip(flat_stride, flat_shape, prefix_product(flat_shape)))
for (stride,shape,rstride) in sorted_DSA:
if shape == 1:
continue
if current_idx != stride:
break
result_shape.append(shape)
result_stride.append(rstride)
current_idx = shape * stride
return coalesce(Layout(tuple(result_shape), tuple(result_stride)))
# Layout left inverse
def left_inverse(layout):
if layout is None:
return None
elif is_int(layout):
return Layout(layout)
return right_inverse(make_layout(layout, complement(layout)))
# Split a layout by the composition of B and the "rest"
# Use tuples-of-layouts to perform this operation by-mode and None as no-op
def logical_divide(layoutA, layoutB):
if layoutB is None:
return layoutA
elif is_int(layoutB):
return logical_divide(layoutA, Layout(layoutB))
elif is_tuple(layoutB):
assert len(layoutA) >= len(layoutB)
return make_layout(chain((logical_divide(layoutA[i], layoutB[i]) for i in range( 0,len(layoutB))),
(layoutA[i] for i in range(len(layoutB),len(layoutA)))))
return composition(layoutA, make_layout(layoutB, complement(layoutB, size(layoutA))))
# Reproduce a layoutA over a layoutB
# Use tuples-of-layouts to perform this operation by-mode and None as no-op
def logical_product(layoutA, layoutB):
if layoutB is None:
return layoutA
elif is_int(layoutB):
return logical_divide(layoutA, Layout(layoutB))
elif is_tuple(layoutB):
assert len(layoutA) >= len(layoutB)
return make_layout(chain((logical_product(layoutA[i], layoutB[i]) for i in range( 0,len(layoutB))),
(layoutA[i] for i in range(len(layoutB),len(layoutA)))))
return make_layout(layoutA, composition(complement(layoutA, size(layoutA)*cosize(layoutB)), layoutB));
# Gather the modes from a hierarchical logical_divide or logical_product
def hier_unzip(splitter, layoutA, layoutB):
if layoutB is None:
return make_layout(Layout(1,0), layoutA)
elif is_tuple(layoutB):
assert len(layoutA) >= len(layoutB)
# A layout with shape ((A,a),(B,b),(C,c))
split = make_layout(hier_unzip(splitter, layoutA[i], layoutB[i]) for i in range(0,len(layoutB)))
# Gather to shape ((A,B,C,...),(a,b,c,...,y,z))
return make_layout(make_layout( split[i][0] for i in range( 0,len(layoutB))),
make_layout(chain((split[i][1] for i in range( 0,len(layoutB))),
(layoutA[i] for i in range(len(layoutB),len(layoutA))))))
# splitter must return a rank-2 layout
return splitter(layoutA, layoutB)
# Apply logical divide hierarchically and gather the split modes into two modes
def zipped_divide(layoutA, layoutB):
return hier_unzip(logical_divide, layoutA, layoutB)
# Perform logical divide hierarchically and gather tiles (B-layouts) into a new mode
def tiled_divide(layoutA, layoutB):
result = zipped_divide(layoutA, layoutB)
return make_layout([result[0]] + [result[1][i] for i in range(len(result[1]))])
# Apply logical product hierarchically and gather the split modes into two modes
def zipped_product(layoutA, layoutB):
return hier_unzip(logical_product, layoutA, layoutB)
# Perform logical product hierarchically and gather tiles (B-layouts) into a new mode
def tiled_product(layoutA, layoutB):
result = zipped_product(layoutA, layoutB)
return make_layout([result[0]] + [result[1][i] for i in range(len(result[1]))])
def slice_and_offset(crd: tuple,
layout: Layout):
return (Layout(slice_(crd, layout.shape), slice_(crd, layout.stride)),
crd2idx(crd, layout.shape, layout.stride))
| python/pycute/layout.py/0 | {
"file_path": "python/pycute/layout.py",
"repo_id": "python",
"token_count": 4643
} | 50 |
################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
Unittest for mixed types of nodes in SM90
"""
import logging
import unittest
import cutlass
from cutlass.backend import *
from cutlass.epilogue import *
from cutlass.swizzle import ThreadblockSwizzleStreamK
from utils.evt_testbed import EVTTestBed, EVTTestCaseBase
cutlass.set_log_level(logging.WARNING)
@unittest.skipIf(device_cc() not in [80, 86, 89, 90], "This unittest is only supported on CC [80, 86, 89, 90]")
class TestEVTMixed(EVTTestCaseBase):
def test_mixed_dag(self):
def evt_mixed_dag(accum, alpha, C, beta, aux, cbias, rbias):
F = alpha * accum + (beta * C + aux)
F_row_max = max(F, dim=[0, 1])
E = relu(F + 1) + cbias + rbias
E_col_max = max(E, dim=[0, 2])
D = E + F
return D, F, F_row_max, E_col_max
if device_cc() == 80:
alignments = [2, 4, 8]
else:
# Sm90 EVT currently only supports 128-bit alignment
alignments = [8,]
for align in alignments:
for m, n, k, l in self.get_problem_sizes(align):
example_inputs = {
"accum": self.fake_tensor(self.element, (l, m, n)),
"alpha": 1.0,
"C": self.fake_tensor(self.element, (l, m, n)),
"beta": 1.0,
"aux": self.fake_tensor(self.element, (l, m, n)),
"cbias": self.fake_tensor(self.element, (m, 1)),
"rbias": self.fake_tensor(self.element, (n,)),
"D": self.fake_tensor(self.element, (l, m, n)),
"F": self.fake_tensor(self.element, (l, m, n)),
"F_row_max": self.fake_tensor(DataType.f32, (n,)),
"E_col_max": self.fake_tensor(DataType.f32, (m, 1))
}
launcher = EVTTestBed(self.element, evt_mixed_dag, example_inputs)
input_keys = ["alpha", "C", "beta", "aux", "cbias", "rbias"]
result_keys = ["D", "F", "F_row_max", "E_col_max"]
launcher.verify((m, n, k), input_keys, result_keys, l)
@unittest.skipIf(device_cc() not in [80, 89], "This unittest is for cc 80 and 89 only")
def test_mixed_dag_float(self):
def evt_mixed_dag(accum, alpha, C, beta, aux, cbias, rbias):
F = alpha * accum + (beta * C + aux)
F_row_max = max(F, dim=[0, 1])
E = relu(F + 1) + cbias + rbias
E_col_max = max(E, dim=[0, 2])
D = E + F
return D, F, F_row_max, E_col_max
for align in [3, 2, 4]:
for m, n, k, l in self.get_problem_sizes(align):
example_inputs = {
"accum": self.fake_tensor(np.float32, (l, m, n)),
"alpha": 1.0,
"C": self.fake_tensor(np.float32, (l, m, n)),
"beta": 1.0,
"aux": self.fake_tensor(np.float32, (l, m, n)),
"cbias": self.fake_tensor(np.float32, (m, 1)),
"rbias": self.fake_tensor(np.float32, (n,)),
"D": self.fake_tensor(np.float32, (l, m, n)),
"F": self.fake_tensor(np.float32, (l, m, n)),
"F_row_max": self.fake_tensor(np.float32, (n,)),
"E_col_max": self.fake_tensor(np.float32, (m, 1))
}
launcher = EVTTestBed(DataType.f32, evt_mixed_dag, example_inputs)
input_keys = ["alpha", "C", "beta", "aux", "cbias", "rbias"]
result_keys = ["D", "F", "F_row_max", "E_col_max"]
launcher.verify((m, n, k), input_keys, result_keys, l)
@unittest.skipIf(device_cc() not in [80, 89], "This unittest is for cc 80 and 89 only")
def test_mixed_dag_stage2(self):
def evt_mixed_dag(accum, alpha, C, beta, aux, cbias, rbias):
F = alpha * accum + (beta * C + aux)
F_row_max = max(F, dim=[0, 1])
E = relu(F + 1) + cbias + rbias
E_col_max = max(E, dim=[0, 2])
D = E + F
return D, F, F_row_max, E_col_max
for m, n, k, l in self.get_problem_sizes(8):
example_inputs = {
"accum": self.fake_tensor(self.element, (l, m, n)),
"alpha": 1.0,
"C": self.fake_tensor(self.element, (l, m, n)),
"beta": 1.0,
"aux": self.fake_tensor(self.element, (l, m, n)),
"cbias": self.fake_tensor(self.element, (m, 1)),
"rbias": self.fake_tensor(self.element, (n,)),
"D": self.fake_tensor(self.element, (l, m, n)),
"F": self.fake_tensor(self.element, (l, m, n)),
"F_row_max": self.fake_tensor(DataType.f32, (n,)),
"E_col_max": self.fake_tensor(DataType.f32, (m, 1))
}
launcher = EVTTestBed(self.element, evt_mixed_dag, example_inputs, epilogue_stages=2)
input_keys = ["alpha", "C", "beta", "aux", "cbias", "rbias"]
result_keys = ["D", "F", "F_row_max", "E_col_max"]
launcher.verify((m, n, k), input_keys, result_keys, l)
@unittest.skipIf(device_cc() not in [80, 89], "This unittest is for cc 80 and 89 only")
def test_mixed_dag_partition_k(self):
def evt_mixed_dag(accum, alpha, C, beta, aux, cbias, rbias):
F = alpha * accum + (beta * C + aux)
F_row_max = max(F, dim=[0, 1])
E = relu(F + 1) + cbias + rbias
E_col_max = max(E, dim=[0, 2])
D = E + F
return D, F, F_row_max, E_col_max
for m, n, k, l in self.get_problem_sizes(8):
example_inputs = {
"accum": self.fake_tensor(self.element, (l, m, n)),
"alpha": 1.0,
"C": self.fake_tensor(self.element, (l, m, n)),
"beta": 1.0,
"aux": self.fake_tensor(self.element, (l, m, n)),
"cbias": self.fake_tensor(self.element, (m, 1)),
"rbias": self.fake_tensor(self.element, (n,)),
"D": self.fake_tensor(self.element, (l, m, n)),
"F": self.fake_tensor(self.element, (l, m, n)),
"F_row_max": self.fake_tensor(DataType.f32, (n,)),
"E_col_max": self.fake_tensor(DataType.f32, (m, 1))
}
tile_description = {
"threadblock_shape": [128, 128, 64],
"warp_count": [2, 2, 2]
}
launcher = EVTTestBed(self.element, evt_mixed_dag, example_inputs, tile_description=tile_description, epilogue_stages=2)
input_keys = ["alpha", "C", "beta", "aux", "cbias", "rbias"]
result_keys = ["D", "F", "F_row_max", "E_col_max"]
launcher.verify((m, n, k), input_keys, result_keys, l)
@unittest.skipIf(device_cc() not in [80, 89], "This unittest is for cc 80 and 89 only")
def test_mixed_dag_stream_k(self):
def evt_mixed_dag(accum, alpha, C, beta, aux, cbias, rbias):
F = alpha * accum + (beta * C + aux)
F_row_max = max(F, dim=[0, 1])
E = relu(F + 1) + cbias + rbias
E_col_max = max(E, dim=[0, 2])
D = E + F
return D, F, F_row_max, E_col_max
# High per-sm occupancy tile_description
tile_description = {
"threadblock_shape": [128, 128, 32],
"warp_count": [2, 2, 1],
"stages": 3
}
tds = [None, tile_description]
for td in tds:
for m, n, k, l in self.get_problem_sizes(8, k=960, batch_count=[1, 3]):
if l == 1:
example_inputs = {
"accum": self.fake_tensor(self.element, (m, n)),
"alpha": 1.0,
"C": self.fake_tensor(self.element, (m, n)),
"beta": 1.0,
"aux": self.fake_tensor(self.element, (m, n)),
"cbias": self.fake_tensor(self.element, (m, 1)),
"rbias": self.fake_tensor(self.element, (n,)),
"D": self.fake_tensor(self.element, (m, n)),
"F": self.fake_tensor(self.element, (m, n)),
"F_row_max": self.fake_tensor(DataType.f32, (n,)),
"E_col_max": self.fake_tensor(DataType.f32, (m, 1))
}
else:
example_inputs = {
"accum": self.fake_tensor(self.element, (l, m, n)),
"alpha": 1.0,
"C": self.fake_tensor(self.element, (l, m, n)),
"beta": 1.0,
"aux": self.fake_tensor(self.element, (l, m, n)),
"cbias": self.fake_tensor(self.element, (m, 1)),
"rbias": self.fake_tensor(self.element, (n,)),
"D": self.fake_tensor(self.element, (l, m, n)),
"F": self.fake_tensor(self.element, (l, m, n)),
"F_row_max": self.fake_tensor(DataType.f32, (n,)),
"E_col_max": self.fake_tensor(DataType.f32, (m, 1))
}
if td is not None:
launcher = EVTTestBed(
self.element, evt_mixed_dag, example_inputs,
tile_description=td,
swizzling_functor=ThreadblockSwizzleStreamK, backend="torch")
else:
launcher = EVTTestBed(
self.element, evt_mixed_dag, example_inputs,
swizzling_functor=ThreadblockSwizzleStreamK, backend="torch")
input_keys = ["alpha", "C", "beta", "aux", "cbias", "rbias"]
result_keys = ["D", "F", "F_row_max", "E_col_max"]
launcher.verify((m, n, k), input_keys, result_keys, l)
def test_mixed_dag_no_batch(self):
def evt_mixed_dag_no_batch(accum, alpha, C, beta, aux, cbias, rbias):
F = alpha * accum + (beta * C + aux)
F_row_max = max(F, dim=[0, 1])
E = relu(F + 1) + cbias + rbias
E_col_max = max(E, dim=[0, 2])
D = E + F
return D, F, F_row_max, E_col_max
for m, n, k, _ in self.get_problem_sizes(8):
example_inputs = {
"accum": self.fake_tensor(self.element, (m, n)),
"alpha": 1.0,
"C": self.fake_tensor(self.element, (m, n)),
"beta": 1.0,
"aux": self.fake_tensor(self.element, (m, n)),
"cbias": self.fake_tensor(self.element, (m, 1)),
"rbias": self.fake_tensor(self.element, (n,)),
"D": self.fake_tensor(self.element, (m, n)),
"F": self.fake_tensor(self.element, (m, n)),
"F_row_max": self.fake_tensor(DataType.f32, (n,)),
"E_col_max": self.fake_tensor(DataType.f32, (m, 1))
}
launcher = EVTTestBed(self.element, evt_mixed_dag_no_batch, example_inputs)
input_keys = ["alpha", "C", "beta", "aux", "cbias", "rbias"]
result_keys = ["D", "F", "F_row_max", "E_col_max"]
launcher.verify((m, n, k), input_keys, result_keys, 1)
if __name__ == '__main__':
unittest.main()
| test/python/cutlass/evt/evt_mixed_sm80_90.py/0 | {
"file_path": "test/python/cutlass/evt/evt_mixed_sm80_90.py",
"repo_id": "test",
"token_count": 7012
} | 51 |
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
from cutlass_library import SubstituteTemplate
import cutlass
from cutlass_library import (
DataTypeNames,
EpilogueScheduleSuffixes,
KernelScheduleSuffixes,
LayoutType,
OpcodeClassNames,
ShortDataTypeNames,
ShortLayoutTypeNames
)
from cutlass.backend import library
from gemm_testbed import test_all_gemm
class Layout:
"""
Utility class to map transpose and non-transpose terminology to row- and column-major terminology
"""
T = LayoutType.RowMajor
N = LayoutType.ColumnMajor
class LayoutCombination:
"""
Utility class defining all combinations of row- and column-major layouts for operands to a GEMMs
"""
NNN = (Layout.N, Layout.N, Layout.N)
NNT = (Layout.N, Layout.N, Layout.T)
NTN = (Layout.N, Layout.T, Layout.N)
NTT = (Layout.N, Layout.T, Layout.T)
TNN = (Layout.T, Layout.N, Layout.N)
TNT = (Layout.T, Layout.N, Layout.T)
TTN = (Layout.T, Layout.T, Layout.N)
TTT = (Layout.T, Layout.T, Layout.T)
def get_name(
layouts,
alignments,
element_output,
element_accumulator,
element_epilogue,
cluster_shape,
threadblock_shape,
stages,
element_a,
element_b,
element_c,
arch,
opclass,
kernel_schedule=None,
epilogue_schedule=None,
suffix="",
):
"""
Generates a procedural name for a test case.
:param layouts: indexable container of layouts of A, B, and C operands
:param alignments: indexable container of alignments of A, B, and C operands
:param element_output: data type of the output element
:param element_accumulator: data type used in accumulation
:param element_epilogue: data type used in computing the epilogue
:param cluster_shape: indexable container of dimensions of threadblock cluster to be launched
:param threadblock_shape: indexable container of dimensions of threadblock tiles
:param stages: number of pipeline stages to use in the kernel
:type stages: int
:param element_a: data type of operand A
:param element_b: data type of operand B
:param element_c: data type of operand C
:param arch: compute capability of kernel being generated
:type arch: int
:param opclass: class of operation being performed (e.g., SIMT, Tensor Core)
:type opclass: cutlass.OpcodeClass
:param kernel_schedule: kernel_schedule type
:type kernel_schedule: cutlass.KernelScheduleType
:param epilogue_schedule: epilogue_schedule type
:type epilogue_schedule: cutlass.EpilogueScheduleType
:param suffix: additional string to add to the suffix of the name
:type suffix: str
:return: str
"""
name_format = "test_SM${arch}_Device_Gemm_${eA}${lA}_${eB}${lB}_${eC}${lC}_${opclass}_${acc}_${tbM}x${tbN}x${tbK}_${cM}x${cN}x${cK}_${stages}_align${aA}-${aB}-${aC}${k}${e}${suffix}"
return SubstituteTemplate(
name_format,
{
"arch": str(arch),
"eA": DataTypeNames[element_a],
"eB": DataTypeNames[element_b],
"eC": DataTypeNames[element_c],
"lA": ShortLayoutTypeNames[layouts[0]],
"lB": ShortLayoutTypeNames[layouts[1]],
"lC": ShortLayoutTypeNames[layouts[2]],
"opclass": OpcodeClassNames[opclass],
"acc": DataTypeNames[element_accumulator],
"cM": str(cluster_shape[0]),
"cN": str(cluster_shape[1]),
"cK": str(cluster_shape[2]),
"tbM": str(threadblock_shape[0]),
"tbN": str(threadblock_shape[1]),
"tbK": str(threadblock_shape[2]),
"stages": str(stages) if stages is not None else "auto",
"aA": str(alignments[0]),
"aB": str(alignments[1]),
"aC": str(alignments[2]),
"k": "" if kernel_schedule is None else KernelScheduleSuffixes[kernel_schedule],
"e": "" if epilogue_schedule is None else EpilogueScheduleSuffixes[epilogue_schedule],
"suffix": "" if suffix is None else suffix,
},
)
def add_test_gemm(
cls=None,
cc=None,
element=None,
layouts=None,
alignments=None,
element_output=None,
element_accumulator=None,
cluster_shape=None,
threadblock_shape=None,
warp_count=None,
stages=None,
opclass=None,
swizzle=None,
kernel_schedule=None,
epilogue_schedule=None,
compilation_modes=['nvcc', 'nvrtc'],
element_A=None,
element_B=None,
element_C=None):
"""
Create test-running functions with the given specification and set it as a method of ``cls``.
:param cls: class to which the generated method will be added
:type cls: type
:param cc: compute capability to compile for
:type cc: int
:param element: data type of A and B operands
:type element: cutlass.DataType.f16
:param layouts: layouts of A, B, and C operands
:type layouts: list or tuple
:param alignments: alingments of A, B, and C operands
:type alignments: list or tuple
:param element_output: data type of the output element
:type element_output: cutlass.DataType
:param element_accumulator: data type used in accumulation
:type element_accumulator: cutlass.DataType
:param cluster_shape: dimensions of clusters
:type cluster_shape: list or tuple
:param threadblock_shape: dimensions of threadblock tiles
:type threadblock_shape: list or tuple
:param warp_count: warps to be launched per threadblock dimension
:type warp_count: list or tuple
:param stages: number of pipeline stages to use in the kernel
:type stages: int
:param opclass: class of operation being performed (e.g., SIMT, Tensor Core)
:type opclass: cutlass.OpcodeClass
:param swizzle: threadblock swizzling functor
:param kernel_schedule: kernel schedule to use
:type kernel_schedule: cutlass.KernelScheduleType
:param epilogue_schedule: epilogue schedule to use
:type epilogue_schedule: cutlass.EpilogueScheduleType
:param compilation_modes: list of compilers to used in testing the kernel (options: 'nvrtc', 'nvcc')
:type compilation_modes: list,
:param element_A: data type of operand A. If set, overrides ``element``
:type element_A: cutlass.DataType
:param element_B: data type of operand B. If set, overrides ``element``
:type element_B: cutlass.DataType
:param element_C: data type of operand C. If set, overrides ``element``
:type element_C: cutlass.DataType
"""
if element_A is None:
element_A = element
if element_B is None:
element_B = element
if element_C is None:
element_C = element
if element_output is None:
element_output = element
if element_accumulator is None:
element_accumulator = element
for compilation_mode in compilation_modes:
def run(self):
"""
Dynamically-generated function that constructs a GEMM operation and verifies it against
multiple test cases.
"""
layout_A, layout_B, layout_C = layouts
alignment_A, alignment_B, alignment_C = alignments
plan = cutlass.op.Gemm(element_A=element_A, element_B=element_B,
element_C=element_C, element_D=element_output,
layout_A=layout_A, layout_B=layout_B, layout_C=layout_C,
element_accumulator=element_accumulator,
kernel_cc=cc)
plan.opclass = opclass
if swizzle is not None:
plan.swizzling_functor = swizzle
td = plan.tile_descriptions()[0]
if warp_count is not None:
td.warp_count = warp_count
td.threadblock_shape = threadblock_shape
td.stages = stages
td.cluster_shape = cluster_shape
op = plan.construct(tile_description=td, alignment_A=alignment_A, alignment_B=alignment_B, alignment_C=alignment_C)
self.assertTrue(test_all_gemm(op, 'universal', compilation_mode=compilation_mode))
element_epilogue = element_accumulator
name = get_name(
layouts=layouts, alignments=alignments, element_output=element_output, element_accumulator=element_accumulator,
element_epilogue=element_epilogue, cluster_shape=cluster_shape, threadblock_shape=threadblock_shape,
stages=stages, element_a=element_A, element_b=element_B, element_c=element_C, arch=cc, opclass=opclass,
kernel_schedule=kernel_schedule, epilogue_schedule=epilogue_schedule, suffix=f'_{compilation_mode}')
setattr(cls, name, run)
| test/python/cutlass/gemm/utils.py/0 | {
"file_path": "test/python/cutlass/gemm/utils.py",
"repo_id": "test",
"token_count": 4014
} | 52 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit test for the launch_on_cluster function
*/
#include "../common/cutlass_unit_test.h"
#include "cutlass/cluster_launch.hpp"
#include "cute/arch/cluster_sm90.hpp"
#include <cassert>
#include <memory>
#include <type_traits>
#if defined(CUTLASS_SM90_CLUSTER_LAUNCH_ENABLED)
namespace { // (anonymous)
// Using a struct instead of a lambda makes it possible
// to name the deleter type without std::function
// (which type-erases).
struct scalar_deleter {
void operator() (float* p) {
if (p != nullptr) {
cudaFree(p);
}
}
};
using scalar_device_pointer = std::unique_ptr<float, scalar_deleter>;
// Each test needs to initialize this anew,
// from a scalar instance that is in scope during the test.
__device__ float* scalar_ptr_gpu;
// A single scalar value on device.
// The constructor allocates space on device for one value,
// copies the value to device, and sets the global pointer
// `scalar_ptr_gpu` (see above) to point to it.
// sync_to_host() copies that value back to host.
//
// This class exists only for the tests in this file.
// In order to know whether a kernel that launch_on_cluster
// claimed to launch actually got launched, each kernel
// performs a side effect: it modifies the scalar value
// through the scalar_ptr_gpu value.
// It performs a side effect through a global,
// rather than through an argument,
// so that we can test kernel launch
// with kernels that take zero parameters.
class scalar {
private:
static constexpr std::size_t num_bytes = sizeof(float);
public:
scalar(float value) : value_host_(value)
{
float* ptr_gpu_raw = nullptr;
auto err = cudaMalloc(&ptr_gpu_raw, num_bytes);
assert(err == cudaSuccess);
scalar_device_pointer ptr_gpu{ptr_gpu_raw, scalar_deleter{}};
err = cudaMemcpy(ptr_gpu.get(), &value_host_,
num_bytes, cudaMemcpyHostToDevice);
assert(err == cudaSuccess);
ptr_gpu_ = std::move(ptr_gpu);
upload_device_pointer();
}
float sync_to_host()
{
auto err = cudaMemcpy(&value_host_, ptr_gpu_.get(),
num_bytes, cudaMemcpyDeviceToHost);
assert(err == cudaSuccess);
return value_host_;
}
private:
void upload_device_pointer()
{
float* ptr_raw = ptr_gpu_.get();
auto err = cudaMemcpyToSymbol(scalar_ptr_gpu, &ptr_raw, sizeof(float*));
assert(err == cudaSuccess);
}
float value_host_ = 0.0;
scalar_device_pointer ptr_gpu_;
};
template<int cluster_x, int cluster_y, int cluster_z>
CUTE_DEVICE void check_cluster_shape() {
[[maybe_unused]] const dim3 cluster_shape = cute::cluster_shape();
assert(cluster_shape.x == cluster_x);
assert(cluster_shape.y == cluster_y);
assert(cluster_shape.z == cluster_z);
}
template<int cluster_x, int cluster_y, int cluster_z>
__global__ void kernel_0()
{
check_cluster_shape<cluster_x, cluster_y, cluster_z>();
// Write to global memory, so that we know
// whether the kernel actually ran.
const dim3 block_id = cute::block_id_in_cluster();
if (threadIdx.x == 0 && block_id.x == 0 && block_id.y == 0 && block_id.z == 0) {
*scalar_ptr_gpu = 0.1f;
}
}
template<int cluster_x, int cluster_y, int cluster_z,
int expected_p0>
__global__ void kernel_1(int p0)
{
check_cluster_shape<cluster_x, cluster_y, cluster_z>();
assert(p0 == expected_p0);
// Write to global memory, so that we know
// whether the kernel actually ran.
const dim3 block_id = cute::block_id_in_cluster();
if (threadIdx.x == 0 && block_id.x == 0 && block_id.y == 0 && block_id.z == 0) {
*scalar_ptr_gpu = 1.2f;
}
}
template<int cluster_x, int cluster_y, int cluster_z,
int expected_p0,
int expected_p2>
__global__ void kernel_2(int p0, void* p1, int p2)
{
check_cluster_shape<cluster_x, cluster_y, cluster_z>();
assert(p0 == expected_p0);
assert(p1 == nullptr);
assert(p2 == expected_p2);
// Write to global memory, so that we know
// whether the kernel actually ran.
const dim3 block_id = cute::block_id_in_cluster();
if (threadIdx.x == 0 && block_id.x == 0 && block_id.y == 0 && block_id.z == 0) {
*scalar_ptr_gpu = 2.3f;
}
}
struct OverloadedOperatorAmpersand {
struct tag_t {};
// Test that kernel launch uses the actual address,
// instead of any overloaded operator& that might exist.
CUTE_HOST_DEVICE tag_t operator& () const {
return {};
}
int x = 0;
int y = 0;
int z = 0;
int w = 0;
};
static_assert(sizeof(OverloadedOperatorAmpersand) == 4 * sizeof(int));
template<int cluster_x, int cluster_y, int cluster_z,
int expected_p0,
int expected_p1_x,
int expected_p1_y,
int expected_p1_z,
int expected_p1_w,
std::uint64_t expected_p2>
__global__ void kernel_3(int p0, OverloadedOperatorAmpersand p1, std::uint64_t p2)
{
check_cluster_shape<cluster_x, cluster_y, cluster_z>();
assert(p0 == expected_p0);
assert(p1.x == expected_p1_x);
assert(p1.y == expected_p1_y);
assert(p1.z == expected_p1_z);
assert(p1.w == expected_p1_w);
assert(p2 == expected_p2);
// Write to global memory, so that we know
// whether the kernel actually ran.
const dim3 block_id = cute::block_id_in_cluster();
if (threadIdx.x == 0 && block_id.x == 0 && block_id.y == 0 && block_id.z == 0) {
*scalar_ptr_gpu = 3.4f;
}
}
} // namespace (anonymous)
TEST(SM90_ClusterLaunch, Kernel_0)
{
scalar global_value(-1.0f);
const dim3 grid_dims{2, 1, 1};
const dim3 block_dims{1, 1, 1};
const dim3 cluster_dims{grid_dims.x * block_dims.x, 1, 1};
const int smem_size_in_bytes = 0;
cutlass::ClusterLaunchParams params{
grid_dims, block_dims, cluster_dims, smem_size_in_bytes};
void const* kernel_ptr = reinterpret_cast<void const*>(&kernel_0<2, 1, 1>);
cutlass::Status status = cutlass::launch_kernel_on_cluster(params,
kernel_ptr);
ASSERT_EQ(status, cutlass::Status::kSuccess);
cudaError_t result = cudaDeviceSynchronize();
if (result == cudaSuccess) {
CUTLASS_TRACE_HOST("Kernel launch succeeded\n");
}
else {
CUTLASS_TRACE_HOST("Kernel launch FAILED\n");
cudaError_t error = cudaGetLastError();
EXPECT_EQ(result, cudaSuccess) << "Error at kernel sync: "
<< cudaGetErrorString(error) << "\n";
}
ASSERT_EQ(global_value.sync_to_host(), 0.1f);
}
TEST(SM90_ClusterLaunch, Kernel_1)
{
scalar global_value(-1.0f);
const dim3 grid_dims{2, 1, 1};
const dim3 block_dims{1, 1, 1};
const dim3 cluster_dims{grid_dims.x * block_dims.x, 1, 1};
const int smem_size_in_bytes = 0;
cutlass::ClusterLaunchParams params{
grid_dims, block_dims, cluster_dims, smem_size_in_bytes};
constexpr int expected_p0 = 42;
void const* kernel_ptr = reinterpret_cast<void const*>(&kernel_1<2, 1, 1, expected_p0>);
const int p0 = expected_p0;
cutlass::Status status = cutlass::launch_kernel_on_cluster(params,
kernel_ptr, p0);
ASSERT_EQ(status, cutlass::Status::kSuccess);
cudaError_t result = cudaDeviceSynchronize();
if (result == cudaSuccess) {
#if (CUTLASS_DEBUG_TRACE_LEVEL > 1)
CUTLASS_TRACE_HOST("Kernel launch succeeded\n");
#endif
}
else {
CUTLASS_TRACE_HOST("Kernel launch FAILED\n");
cudaError_t error = cudaGetLastError();
EXPECT_EQ(result, cudaSuccess) << "Error at kernel sync: "
<< cudaGetErrorString(error) << "\n";
}
ASSERT_EQ(global_value.sync_to_host(), 1.2f);
}
TEST(SM90_ClusterLaunch, Kernel_2)
{
scalar global_value(-1.0f);
const dim3 grid_dims{2, 1, 1};
const dim3 block_dims{1, 1, 1};
const dim3 cluster_dims{grid_dims.x * block_dims.x, 1, 1};
const int smem_size_in_bytes = 0;
cutlass::ClusterLaunchParams params{
grid_dims, block_dims, cluster_dims, smem_size_in_bytes};
constexpr int expected_p0 = 42;
constexpr int expected_p2 = 43;
int p0 = expected_p0;
int* p1 = nullptr;
int p2 = expected_p2;
void const* kernel_ptr = reinterpret_cast<void const*>(
&kernel_2<2, 1, 1, expected_p0, expected_p2>);
cutlass::Status status = cutlass::launch_kernel_on_cluster(params,
kernel_ptr, p0, p1, p2);
ASSERT_EQ(status, cutlass::Status::kSuccess);
cudaError_t result = cudaDeviceSynchronize();
if (result == cudaSuccess) {
#if (CUTLASS_DEBUG_TRACE_LEVEL > 1)
CUTLASS_TRACE_HOST("Kernel launch succeeded\n");
#endif
}
else {
CUTLASS_TRACE_HOST("Kernel launch FAILED\n");
cudaError_t error = cudaGetLastError();
EXPECT_EQ(result, cudaSuccess) << "Error at kernel sync: "
<< cudaGetErrorString(error) << "\n";
}
ASSERT_EQ(global_value.sync_to_host(), 2.3f);
}
TEST(SM90_ClusterLaunch, Kernel_3)
{
scalar global_value(-1.0f);
const dim3 grid_dims{2, 1, 1};
const dim3 block_dims{1, 1, 1};
const dim3 cluster_dims{grid_dims.x * block_dims.x, 1, 1};
const int smem_size_in_bytes = 0;
cutlass::ClusterLaunchParams params{
grid_dims, block_dims, cluster_dims, smem_size_in_bytes};
constexpr int expected_p0 = 42;
constexpr int expected_p1_x = 1;
constexpr int expected_p1_y = 2;
constexpr int expected_p1_z = 3;
constexpr int expected_p1_w = 4;
constexpr std::uint64_t expected_p2 = 1'000'000'000'000uLL;
int p0 = expected_p0;
OverloadedOperatorAmpersand p1{expected_p1_x,
expected_p1_y, expected_p1_z, expected_p1_w};
// Verify that operator& is overloaded for this type.
static_assert(! std::is_same_v<decltype(&p1),
OverloadedOperatorAmpersand*>);
std::uint64_t p2 = expected_p2;
void const* kernel_ptr = reinterpret_cast<void const*>(
&kernel_3<2, 1, 1, expected_p0, expected_p1_x,
expected_p1_y, expected_p1_z, expected_p1_w,
expected_p2>);
cutlass::Status status = cutlass::launch_kernel_on_cluster(params,
kernel_ptr, p0, p1, p2);
ASSERT_EQ(status, cutlass::Status::kSuccess);
cudaError_t result = cudaDeviceSynchronize();
if (result == cudaSuccess) {
#if (CUTLASS_DEBUG_TRACE_LEVEL > 1)
CUTLASS_TRACE_HOST("Kernel launch succeeded\n");
#endif
}
else {
CUTLASS_TRACE_HOST("Kernel launch FAILED\n");
cudaError_t error = cudaGetLastError();
EXPECT_EQ(result, cudaSuccess) << "Error at kernel sync: "
<< cudaGetErrorString(error) << "\n";
}
ASSERT_EQ(global_value.sync_to_host(), 3.4f);
}
#endif // CUTLASS_SM90_CLUSTER_LAUNCH_ENABLED
| test/unit/cluster_launch/cluster_launch.cu/0 | {
"file_path": "test/unit/cluster_launch/cluster_launch.cu",
"repo_id": "test",
"token_count": 4594
} | 53 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cutlass_unit_test.h"
#include <cutlass/trace.h>
#include <cute/tensor.hpp>
template <class Layout, class CoTarget>
void
test_complement(Layout const& layout, CoTarget const& cotarget)
{
using namespace cute;
auto result = complement(layout, cotarget);
CUTLASS_TRACE_HOST("complement(" << layout << ", " << cotarget << ") => " << result);
auto completed = make_layout(layout, result);
// Lower-bound on the codomain size of the layout ++ complement (1)
EXPECT_GE(cosize(completed), size(cotarget));
// Upper-bound on the codomain size of the complement (2)
EXPECT_LE(cosize(result), cute::round_up(size(cotarget), cosize(layout)));
// Post-condition on the codomain of the complement
for (int i = 1; i < size(result); ++i) {
EXPECT_LT(result(i-1), result(i)); // Ordered (3)
for (int j = 0; j < size(layout); ++j) {
EXPECT_NE(result(i), layout(j)); // Disjoint (4)
}
}
// Other observations
EXPECT_LE(size(result), cosize(result)); // As a result of the ordered condition (3)
EXPECT_GE(size(result), size(cotarget) / size(filter(layout)));
EXPECT_LE(cosize(completed), cosize(result) + cosize(layout));
EXPECT_GE(cosize(result), size(cotarget) / size(filter(layout)));
if constexpr (is_static<decltype(stride(completed))>::value) { // If we can apply complement again
EXPECT_EQ(size(complement(completed)), 1); // There's no more codomain left over
}
}
template <class Layout>
void
test_complement(Layout const& layout)
{
return test_complement(layout, cosize(layout));
}
TEST(CuTe_core, Complement)
{
using namespace cute;
CUTLASS_TRACE_HOST("-------------------------------");
CUTLASS_TRACE_HOST("COMPLEMENT");
CUTLASS_TRACE_HOST("-------------------------------");
{
auto layout = Layout<_1,_0>{};
test_complement(layout);
test_complement(layout, Int<2>{});
test_complement(layout, Int<5>{});
test_complement(layout, make_shape(Int<2>{}, 2));
}
{
auto layout = Layout<_1,_1>{};
test_complement(layout);
test_complement(layout, Int<2>{});
test_complement(layout, Int<5>{});
test_complement(layout, make_shape(Int<2>{}, 2));
}
{
auto layout = Layout<_1,_2>{};
test_complement(layout, Int<1>{});
test_complement(layout, Int<2>{});
test_complement(layout, Int<8>{});
test_complement(layout, Int<5>{});
test_complement(layout, make_shape(Int<2>{}, 2));
}
{
auto layout = Layout<_4,_0>{};
test_complement(layout, Int<1>{});
test_complement(layout, Int<2>{});
test_complement(layout, Int<8>{});
}
{
auto layout = Layout<_4,_1>{};
test_complement(layout, Int<1>{});
test_complement(layout, Int<2>{});
test_complement(layout, Int<8>{});
}
{
auto layout = Layout<_4,_2>{};
test_complement(layout, Int<1>{});
test_complement(layout);
test_complement(layout, Int<16>{});
test_complement(layout, Int<19>{});
test_complement(layout, make_shape(Int<2>{}, 2));
}
{
auto layout = Layout<_4,_4>{};
test_complement(layout, Int<1>{});
test_complement(layout);
test_complement(layout, Int<17>{});
test_complement(layout, make_shape(Int<2>{}, 2));
}
{
auto layout = Layout<Shape<_2,_4>>{};
test_complement(layout);
}
{
auto layout = Layout<Shape<_2,_3>>{};
test_complement(layout);
}
{
auto layout = Layout<Shape<_2,_4>, Stride<_1,_4>>{};
test_complement(layout);
}
{
auto layout = Layout<Shape<_2,_4>, Stride<_1,_6>>{};
test_complement(layout);
}
{
auto layout = Layout<Shape<_2,_4,_8>, Stride<_8,_1,_64>>{};
test_complement(layout);
}
{
auto layout = Layout<Shape<_2,_4,_8>, Stride<_8,_1,_0>>{};
test_complement(layout);
test_complement(layout, Int<460>{});
}
{
auto layout = make_layout(Shape <Shape <_2,_2>,Shape <_2, _2>>{},
Stride<Stride<_1,_4>,Stride<_8,_32>>{});
test_complement(layout);
}
{
auto layout = make_layout(Shape <Shape <_2, _2>,Shape <_2,_2>>{},
Stride<Stride<_1,_32>,Stride<_8,_4>>{});
test_complement(layout);
}
// Fails due to non-injective layout
// {
// auto layout = make_layout(Shape <Shape <_2,_2>,Shape <_2,_2>>{},
// Stride<Stride<_1,_8>,Stride<_8,_4>>{});
// test_complement(layout);
// }
// Fails due to non-injective layout
// {
// auto layout = Layout<Shape<_2,_2>, Stride<_2,_3>>{};
// test_complement(layout);
// test_complement(layout, Int<19>{});
// }
{
auto layout = Layout<Shape<_4,_6>, Stride<_1,_6>>{};
test_complement(layout);
}
{
auto layout = Layout<Shape<_4,_2>, Stride<_1,_10>>{};
test_complement(layout);
}
{
auto layout = Layout<Shape<_4,_2>, Stride<_1,_16>>{};
test_complement(layout);
}
CUTLASS_TRACE_HOST("-------------------------------");
CUTLASS_TRACE_HOST("Dynamic shapes/strides");
CUTLASS_TRACE_HOST("-------------------------------");
{
auto layout = make_layout(12);
test_complement(layout, 1);
test_complement(layout);
test_complement(layout, 53);
test_complement(layout, 128);
}
{
auto layout = make_layout(12, 1);
test_complement(layout, 1);
test_complement(layout);
test_complement(layout, 53);
test_complement(layout, 128);
}
{
auto layout = make_layout(12, Int<2>{});
test_complement(layout, 1);
test_complement(layout);
test_complement(layout, 53);
test_complement(layout, 128);
}
{
auto layout = make_layout(12, 2);
test_complement(layout, 1);
test_complement(layout);
test_complement(layout, 53);
test_complement(layout, 128);
}
{
auto layout = make_layout(make_shape(3,6),make_stride(_1{}, _3{}));
test_complement(layout);
}
{
auto layout = make_layout(make_shape(3,6),make_stride(_1{}, _9{}));
test_complement(layout);
}
{
auto layout = make_layout(make_shape(3,6),make_stride(_1{}, _10{}));
test_complement(layout);
}
{
auto layout = make_layout(make_shape(make_shape(2,2), make_shape(2,2)),
Stride<Stride<_1,_4>,Stride<_8,_32>>{});
test_complement(layout);
}
{
auto layout = make_layout(Int<64>{});
test_complement(layout, make_shape(Int<32>{}, Int<4>{}, Int<4>{}));
test_complement(layout, make_shape(Int<32>{}, Int<4>{}, 4));
}
}
| test/unit/cute/core/complement.cpp/0 | {
"file_path": "test/unit/cute/core/complement.cpp",
"repo_id": "test",
"token_count": 3069
} | 54 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for epilogues
*/
#pragma once
#include <fstream>
#include <cfenv>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/half.h"
#include "cutlass/complex.h"
#include "cutlass/quaternion.h"
#include "cutlass/platform/platform.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/host/tensor_fill.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace kernel {
template <typename Epilogue>
__global__ void epilogue_threadblock(
typename Epilogue::OutputTileIterator::Params params_D,
typename Epilogue::OutputTileIterator::Element *ptr_D,
typename Epilogue::OutputTileIterator::Params params_C,
typename Epilogue::OutputTileIterator::Element *ptr_C,
typename Epilogue::OutputOp::Params params_output_op,
cutlass::MatrixCoord problem_size,
cutlass::TensorRef<
typename Epilogue::WarpMmaOperator::ElementC,
typename Epilogue::WarpMmaOperator::LayoutC> accumulator_ref,
int epilogue_count = 1) {
__shared__ typename Epilogue::SharedStorage shared_storage;
int thread_idx = threadIdx.x;
int warp_idx = threadIdx.x / 32;
int lane_idx = threadIdx.x % 32;
//
// Construct the epilogue
//
// Tile iterator writing to output tile
typename Epilogue::OutputTileIterator iterator_D(
params_D,
ptr_D,
problem_size,
thread_idx
);
// Tile iterator writing to output tile
typename Epilogue::OutputTileIterator iterator_C(
params_C,
ptr_C,
problem_size,
thread_idx
);
// Epilogue operator
Epilogue epilogue(
shared_storage,
thread_idx,
warp_idx,
lane_idx);
//
// Initialize the accumulators
//
int warp_mn = warp_idx % (Epilogue::WarpCount::kM * Epilogue::WarpCount::kN);
int warp_m = warp_mn % Epilogue::WarpCount::kM;
int warp_n = warp_mn / Epilogue::WarpCount::kM;
accumulator_ref.add_coord_offset({
warp_m * Epilogue::WarpMmaOperator::Shape::kM,
warp_n * Epilogue::WarpMmaOperator::Shape::kN});
typename Epilogue::WarpMmaOperator::IteratorC accumulator_iterator(accumulator_ref, lane_idx);
typename Epilogue::AccumulatorTile accumulators;
accumulators.clear();
accumulator_iterator.load(accumulators);
#if 0
// For debugging, enable this block of code to fill each accumulator element with its
// source thread ID.
CUTLASS_PRAGMA_UNROLL
for (size_t i = 0; i < accumulators.size(); ++i) {
typename Epilogue::WarpMmaOperator::ElementC x(threadIdx.x);
accumulators[i] = x;
}
__syncthreads();
#endif
//
// Perform the epilogue operation
//
typename Epilogue::OutputOp output_op(params_output_op);
// Place the epilogue in a loop
for (int iter = 0; iter < epilogue_count; ++iter) {
epilogue(output_op, iterator_D, accumulators, iterator_C);
}
}
} // namespace kernel
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Epilogue_
>
class EpilogueTestbed {
public:
using Epilogue = Epilogue_;
using ElementAccumulator = typename Epilogue::ElementAccumulator;
using ElementCompute = typename Epilogue::OutputOp::ElementCompute;
using ElementOutput = typename Epilogue::ElementOutput;
using OutputOpParams = typename Epilogue::OutputOp::Params;
public:
//
// Data members
//
cutlass::MatrixCoord quantized_size;
cutlass::HostTensor<ElementAccumulator, cutlass::layout::RowMajor> accumulator_tensor;
cutlass::HostTensor<ElementOutput, cutlass::layout::RowMajor> source_tensor;
cutlass::HostTensor<ElementOutput, cutlass::layout::RowMajor> output_tensor;
public:
//
// Methods
//
EpilogueTestbed():
quantized_size(Epilogue::Shape::kM, Epilogue::Shape::kN),
accumulator_tensor({Epilogue::Shape::kM, Epilogue::Shape::kN}),
source_tensor({Epilogue::Shape::kM, Epilogue::Shape::kN}),
output_tensor({Epilogue::Shape::kM, Epilogue::Shape::kN}) {
//
// Initialize problem space
//
uint64_t seed = 2019;
cutlass::reference::host::TensorFillRandomUniform(
accumulator_tensor.host_view(),
seed,
2,
-2,
0);
cutlass::reference::host::TensorFillRandomUniform(
source_tensor.host_view(),
seed + 2018,
2,
-2,
0);
}
bool run_all() {
double alpha_values[] = {1, 0, 2.25};
double beta_values[] = {0, 1, -1.25};
// Test runtime explodes if we tried to test every case exhaustively. This tests the full
// output tile and several smaller sizes to stress predication.
for (int m_idx = 0; m_idx < 3; ++m_idx) {
for (int n_idx = 0; n_idx < 3; ++n_idx) {
int m = quantized_size.row() - m_idx * 3;
int n = quantized_size.column() - n_idx * Epilogue::kElementsPerAccess;
for (double const &alpha : alpha_values) {
for (double const &beta : beta_values) {
bool passed = run({m, n}, {cutlass::from_real<ElementCompute>(alpha), cutlass::from_real<ElementCompute>(beta)});
if (!passed) {
return false;
}
}
}
}
}
return true;
}
/// Runs the test
bool run(
cutlass::MatrixCoord problem_size,
OutputOpParams output_params) {
//
// Initialize problem space
//
ElementOutput default_output = ElementOutput(-127);
cutlass::reference::host::TensorFill(output_tensor.host_view(), default_output);
accumulator_tensor.sync_device();
output_tensor.sync_device();
source_tensor.sync_device();
//
// Initialize epilogue parameters
//
typename Epilogue::OutputTileIterator::Params params_D(output_tensor.device_ref().layout());
typename Epilogue::OutputTileIterator::Params params_C(source_tensor.device_ref().layout());
//
// Launch kernel
//
dim3 grid(1, 1);
dim3 block(Epilogue::WarpCount::kCount * 32, 1);
test::kernel::epilogue_threadblock<Epilogue><<< grid, block >>>(
params_D,
output_tensor.device_data(),
params_C,
source_tensor.device_data(),
output_params,
problem_size,
accumulator_tensor.device_view());
cudaError_t result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
std::cerr << "Kernel error: " << cudaGetErrorString(result) << std::endl;
return false;
}
//
// Verify results
//
output_tensor.sync_host();
int errors = 0;
int const kMaxErrors = 5;
for (int r = 0; errors < kMaxErrors && r < quantized_size.row(); ++r) {
for (int c = 0; errors < kMaxErrors && c < quantized_size.column(); ++c) {
cutlass::MatrixCoord coord{r, c};
ElementOutput got = output_tensor.at(coord);
ElementOutput expected;
if (coord.row() < problem_size.row() && coord.column() < problem_size.column()) {
ElementCompute intermediate =
output_params.alpha * ElementCompute(accumulator_tensor.at(coord)) +
output_params.beta * ElementCompute(source_tensor.at(coord));
if ((cutlass::platform::is_same<ElementOutput, cutlass::int4b_t>::value
|| cutlass::platform::is_same<ElementOutput, cutlass::uint4b_t>::value
|| std::numeric_limits<ElementOutput>::is_integer)
&& !std::numeric_limits<ElementCompute>::is_integer) {
std::fesetround(FE_TONEAREST);
expected = ElementOutput(std::nearbyint(float(cutlass::real(intermediate))));
} else {
expected = ElementOutput(intermediate);
}
} else {
expected = default_output;
}
if (expected != got) {
using OutputIO = cutlass::ScalarIO<ElementOutput>;
EXPECT_TRUE(false)
<< "-------\n"
<< "Error - output element (" << coord << ") - expected: "
<< OutputIO(expected)
<< ", got: " << OutputIO(got)
<< ", accum: " << (accumulator_tensor.at(coord))
<< ", source: " << OutputIO(source_tensor.at(coord))
<< ", alpha: " << (output_params.alpha)
<< ", beta: " << (output_params.beta) << "\n";
++errors;
}
}
}
//
// Report results on error
//
if (errors) {
std::stringstream ss;
ss
<< "output_tensor_op_" << Epilogue::Shape::kM << "x" << Epilogue::Shape::kN << "_"
<< Epilogue::WarpTileIterator::WarpShape::kM << "x"
<< Epilogue::WarpTileIterator::WarpShape::kN
<< "_slice_" << Epilogue::WarpCount::kK << ".csv";
std::ofstream output_file(ss.str());
output_file << output_tensor.host_view();
}
return !errors;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/epilogue/threadblock/testbed.h/0 | {
"file_path": "test/unit/epilogue/threadblock/testbed.h",
"repo_id": "test",
"token_count": 4068
} | 55 |
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# this file creates the test/unit/gemm/device simt tests
outputDir = ""
################################################################################
# parameters
# Edge - for tiles, the edges represent the length of one side
# Ratio - the maximum ratio between 2 edges, limits the skinnyness of tiles
# MaxEdge - maximum length of each edge
# Min/Max - minimum/maximum of the product of edge lengths
################################################################################
warpsPerThreadblockEdge = [1, 2, 4, 8, 16]
warpsPerThreadblockRatio = 2
warpsPerThreadblockMax = 16
# NOTE 1x32 and 2x16 warp tile shapes fail validation for ~10% of cases
warpShapeEdges = [8, 16, 32, 64, 128, 256]
warpShapeRatio = 4
warpShapeMax = 64*64
warpShapeMin = 8*8
threadblockEdgeMax = 256
# char, type bits/elem, max tile, L0 threadblock tiles
precisions = [
["c", "cutlass::complex<float>", 64, 64*128, [ [ 64, 128], [ 64, 32] ] ],
["q", "cutlass::Quaternion<float>", 64, 64*128, [ [ 64, 128], [ 64, 32] ] ],
["d", "double", 64, 64*64, [ [ 64, 64], [ 32, 32] ] ],
["h", "cutlass::half_t", 16, 128*256, [ [256, 128], [ 64, 128], [ 64, 32] ] ],
["i", "int", 32, 128*128, [ [128, 64], [ 16, 32] ] ],
["s", "float", 32, 128*128, [ [128, 256], [128, 128], [ 64, 64] ] ],
["z", "cutlass::complex<double>", 128, 64*64, [ [ 32, 64], [ 16, 32] ] ],
]
# L1 will have a single kernel for every unique shape
# L2 will have everything else
transposes = [
[False, False],
[False, True],
[True, False],
[True, True]
]
################################################################################
# warps per threadblock
################################################################################
warpsPerThreadblocks = []
for warpsPerThreadblock0 in warpsPerThreadblockEdge:
for warpsPerThreadblock1 in warpsPerThreadblockEdge:
if warpsPerThreadblock0 / warpsPerThreadblock1 <= warpsPerThreadblockRatio and warpsPerThreadblock1 / warpsPerThreadblock0 <= warpsPerThreadblockRatio and warpsPerThreadblock0 * warpsPerThreadblock1 <= warpsPerThreadblockMax:
warpsPerThreadblocks.append([warpsPerThreadblock0,
warpsPerThreadblock1])
print("WarpsPerThreadblocks",warpsPerThreadblocks)
################################################################################
# warp shapes
################################################################################
warpNumThreads = 32
warpShapes = []
for warp0 in warpShapeEdges:
for warp1 in warpShapeEdges:
if warp0 / warp1 <= warpShapeRatio and warp1 / warp0 <= warpShapeRatio and warp0*warp1 <= warpShapeMax and warp0*warp1 > warpShapeMin:
warpShapes.append([warp0, warp1])
print("WarpShapes", warpShapes)
numL0 = 0
numL1 = 0
numL2 = 0
################################################################################
# create kernels
# create a file for each precision/transpose
# each file contains many tile sizes
################################################################################
# precisions
for precision in precisions:
# get precision char
precisionChar = precision[0]
precisionType = precision[1]
precisionBits = precision[2]
threadblockMaxElements = precision[3]
threadblockTilesL0 = precision[4]
# transposes
for transpose in transposes:
# get transpose char
columnMajorA = transpose[0]
columnMajorB = transpose[1]
transCharA = "n" if columnMajorA else "t"
transCharB = "n" if columnMajorB else "t"
# open file
fileName="simt_%sgemm_%s%s_sm50.cu" % (precisionChar, transCharA, transCharB)
print("\n", fileName)
filePath = "%s%s" % (outputDir, fileName)
out = open(filePath, "w+")
# write file header
out.write("/***************************************************************************************************\n"
" * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. \n"
" * SPDX-License-Identifier: BSD-3-Clause \n"
" * \n"
" * Redistribution and use in source and binary forms, with or without \n"
" * modification, are permitted provided that the following conditions are met: \n"
" * \n"
" * 1. Redistributions of source code must retain the above copyright notice, this \n"
" * list of conditions and the following disclaimer. \n"
" * \n"
" * 2. Redistributions in binary form must reproduce the above copyright notice, \n"
" * this list of conditions and the following disclaimer in the documentation \n"
" * and/or other materials provided with the distribution. \n"
" * \n"
" * 3. Neither the name of the copyright holder nor the names of its \n"
" * contributors may be used to endorse or promote products derived from \n"
" * this software without specific prior written permission. \n"
" * \n"
" * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" \n"
" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE \n"
" * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE \n"
" * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE \n"
" * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL \n"
" * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR \n"
" * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER \n"
" * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, \n"
" * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \n"
" * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \n"
" *\n"
" **************************************************************************************************/\n"
"/*! \\file\n"
" \\brief Tests for device-wide GEMM interface\n"
"*/\n"
"\n"
"#include <iostream>\n"
"\n"
"#include \"cutlass/cutlass.h\"\n"
"#include \"cutlass/gemm/device/gemm.h\"\n"
"#include \"cutlass/numeric_types.h\"\n"
"\n"
"#include \"../../common/cutlass_unit_test.h\"\n"
"\n"
"#include \"cutlass/util/host_tensor.h\"\n"
"#include \"cutlass/util/tensor_view_io.h\"\n"
"#include \"cutlass/util/reference/host/tensor_fill.h\"\n"
"#include \"cutlass/util/reference/host/tensor_copy.h\"\n"
"#include \"cutlass/util/reference/host/tensor_compare.h\"\n"
"#include \"cutlass/util/reference/host/gemm.h\"\n"
"\n"
"#include \"testbed.h\"\n"
"\n")
foundThreadblockTilesL0 = {}
foundThreadblockTilesL1 = {}
########################################################################
# for each combination of tile sizes
########################################################################
for warpsPerThreadblock in warpsPerThreadblocks:
for warpShape in warpShapes:
warpThreadsM = 0
if warpShape[0] > warpShape[1]:
warpThreadsM = 8
else:
warpThreadsM = 4
warpThreadsN = warpNumThreads / warpThreadsM
# skip shapes with conflicting rectangularity
# they are unlikely to be fastest
blockG = warpsPerThreadblock[0] > warpsPerThreadblock[1]
blockL = warpsPerThreadblock[0] < warpsPerThreadblock[1]
warpG = warpShape[0] > warpShape[1]
warpL = warpShape[0] < warpShape[1]
blockG2 = warpsPerThreadblock[0] > warpsPerThreadblock[1]*2
blockL2 = warpsPerThreadblock[0]*2 < warpsPerThreadblock[1]
warpG2 = warpShape[0] > warpShape[1]*2
warpL2 = warpShape[0]*2 < warpShape[1]
if blockG2 and warpL: continue
if blockL2 and warpG: continue
if warpG2 and blockL: continue
if warpL2 and blockG: continue
# check threadblock ratios and max
threadblockTile = [warpShape[0]*warpsPerThreadblock[0],
warpShape[1]*warpsPerThreadblock[1]]
if threadblockTile[0] * threadblockTile[1] > threadblockMaxElements: continue
if threadblockTile[0] > threadblockEdgeMax: continue
if threadblockTile[1] > threadblockEdgeMax: continue
totalThreads = warpNumThreads*warpsPerThreadblock[0]*warpsPerThreadblock[1]
# calculate unroll
# ensure that every iteration at least a full load of A,B are done
unrollMin = 8
unrollMin0 = totalThreads / threadblockTile[0]
unrollMin1 = totalThreads / threadblockTile[1]
unroll = max(unrollMin, unrollMin0, unrollMin1)
threadTileM = warpShape[0] / warpThreadsM
threadTileN = warpShape[1] / warpThreadsN
if threadTileM < 2 or threadTileN < 2: continue
if threadTileM*threadTileN*precisionBits > 8*8*32: continue
# epilogue currently only supports N < WarpNumThreads
if threadblockTile[1] < warpNumThreads: continue
# limit smem
smemBitsA = threadblockTile[0]*unroll*2*precisionBits
smemBitsB = threadblockTile[1]*unroll*2*precisionBits
smemKBytes = (smemBitsA+smemBitsB)/8/1024
if (smemKBytes > 48): continue
# test level 0
testLevel = -1
for tileId in range(0, len(threadblockTilesL0)):
tbTile = threadblockTilesL0[tileId]
if tbTile[0] == threadblockTile[0] and tbTile[1] == threadblockTile[1]:
if tuple(tbTile) not in foundThreadblockTilesL0:
testLevel = 0
numL0 += 1
foundThreadblockTilesL0[tuple(tbTile)] = True
# test level 1
if testLevel < 0:
threadblockTileAlreadyUsed = False
if tuple(threadblockTile) not in foundThreadblockTilesL1:
testLevel = 1
numL1 += 1
foundThreadblockTilesL1[tuple(threadblockTile)] = True
# test level 2
if testLevel < 0:
testLevel = 2
numL2 += 1
################################################################
# write this tile to file
################################################################
print("%ix%ix%i__%ix%i_%ix%i_%ix%i L%i" % (
threadblockTile[0], threadblockTile[1], unroll,
threadTileM, threadTileN,
warpThreadsM, warpThreadsN,
warpsPerThreadblock[0], warpsPerThreadblock[1], testLevel))
out.write("////////////////////////////////////////////////////////////////////////////////\n"
"// Elements / Thread: %3i x %3i\n"
"// Threads / Warp: %3i x %3i\n"
"// Warps / Block: %3i x %3i\n"
"// Threadblock: %3i x %3i x %2i\n"
% ( threadTileM, threadTileN,
warpThreadsM, warpThreadsN,
warpsPerThreadblock[0], warpsPerThreadblock[1],
threadblockTile[0], threadblockTile[1], unroll
)
)
out.write("CUTLASS_TEST_L%i(SM50_device_%sgemm_%s%s, %ix%ix%i_%ix%ix1_%ix%i_%ix%i_%ix%i, {\n" % (
testLevel,
precisionChar,
transCharA,
transCharB,
threadblockTile[0],
threadblockTile[1],
unroll,
warpShape[0],
warpShape[1],
threadTileM,
threadTileN,
warpThreadsM,
warpThreadsN,
warpsPerThreadblock[0],
warpsPerThreadblock[1]
))
out.write(" using precision = %s;\n" % precisionType)
out.write(" using ThreadblockShape = cutlass::gemm::GemmShape<%i, %i, %i>;\n" % (
threadblockTile[0],
threadblockTile[1],
unroll))
out.write(" using WarpShape = cutlass::gemm::GemmShape<%i, %i, %i>;\n\n" % (
warpShape[0],
warpShape[1],
unroll))
out.write(" static int const kEpilogueElementsPerAccess = 1;\n"
" using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;\n"
" using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination<\n"
" precision, kEpilogueElementsPerAccess, precision, precision>;\n\n")
out.write(" using Gemm = cutlass::gemm::device::Gemm<\n"
" precision, cutlass::layout::%sMajor,\n"
" precision, cutlass::layout::%sMajor,\n"
" precision, cutlass::layout::RowMajor,\n"
" precision,\n"
" cutlass::arch::OpClassSimt,\n"
" cutlass::arch::Sm50,\n"
" ThreadblockShape, WarpShape, InstructionShape,\n"
" EpilogueOutputOp,\n"
" cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,\n"
" 2 // Stages\n"
" >;\n" % (
"Column" if columnMajorA else "Row",
"Column" if columnMajorB else "Row",
))
out.write(" EXPECT_TRUE(test::gemm::device::TestAllGemm<Gemm>());\n"
"} )\n\n")
out.close()
print("NumKernels:", numL0, numL1, numL2)
| test/unit/gemm/device/simt_sm50.py/0 | {
"file_path": "test/unit/gemm/device/simt_sm50.py",
"repo_id": "test",
"token_count": 8209
} | 56 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide GEMM interface
*/
#pragma once
#include <iostream>
#include <fstream>
#include <sstream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/reference/host/gemm.h"
#include "testbed_utils.h"
#include "testbed_universal.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/gemm/device/gemm_universal_adapter.h"
namespace test {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Gemm, bool Relu = false>
struct Testbed {
using ElementA = typename Gemm::ElementA;
using ElementB = typename Gemm::ElementB;
using ElementC = typename Gemm::ElementC;
using ElementAccumulator = typename Gemm::ElementAccumulator;
using ElementCompute = typename Gemm::GemmKernel::Epilogue::OutputOp::ElementCompute;
/// Initialization
typename Gemm::LayoutA::Stride stride_factor_A;
typename Gemm::LayoutB::Stride stride_factor_B;
typename Gemm::LayoutC::Stride stride_factor_C;
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::Distribution::Kind init_C;
uint64_t seed;
cutlass::HostTensor<typename Gemm::ElementA, typename Gemm::LayoutA> tensor_A;
cutlass::HostTensor<typename Gemm::ElementB, typename Gemm::LayoutB> tensor_B;
cutlass::HostTensor<typename Gemm::ElementC, typename Gemm::LayoutC> tensor_C;
cutlass::HostTensor<typename Gemm::ElementC, typename Gemm::LayoutC> tensor_D;
cutlass::HostTensor<typename Gemm::ElementC, typename Gemm::LayoutC> reference_D;
//
// Methods
//
Testbed(
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint64_t seed_ = 2080
):
stride_factor_A(typename Gemm::LayoutA::Stride()),
stride_factor_B(typename Gemm::LayoutB::Stride()),
stride_factor_C(typename Gemm::LayoutC::Stride()),
init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { }
Testbed(
typename Gemm::LayoutA::Stride stride_factor_A_,
typename Gemm::LayoutB::Stride stride_factor_B_,
typename Gemm::LayoutC::Stride stride_factor_C_,
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint64_t seed_ = 2080
):
stride_factor_A(stride_factor_A_),
stride_factor_B(stride_factor_B_),
stride_factor_C(stride_factor_C_),
init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { }
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
bool initialize_tensor(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
double scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
int bits_output = cutlass::sizeof_bits<typename Gemm::ElementC>::value;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
} else if (bits_input <= 8) {
scope_max = 1;
scope_min = -1;
} else if (bits_output == 16) {
scope_max = 5;
scope_min = -5;
} else {
scope_max = 8;
scope_min = -8;
}
cutlass::reference::host::TensorFillRandomUniform(
view, seed, scope_max, scope_min, 0);
}
else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5);
}
else if (dist_kind == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(
view.data(), view.capacity());
}
else {
EXPECT_TRUE(false) << "Not implemented";
return false;
}
return true;
}
/// Initializes data structures
void initialize(cutlass::gemm::GemmCoord problem_size) {
//
// Allocate the GEMM workspace
//
tensor_A.resize(problem_size.mk(), cutlass::layout::Affine2Layout_Factory<typename Gemm::LayoutA>::layout_factory(problem_size.mk(), stride_factor_A));
tensor_B.resize(problem_size.kn(), cutlass::layout::Affine2Layout_Factory<typename Gemm::LayoutB>::layout_factory(problem_size.kn(), stride_factor_B));
tensor_C.resize(problem_size.mn(), cutlass::layout::Affine2Layout_Factory<typename Gemm::LayoutC>::layout_factory(problem_size.mn(), stride_factor_C));
tensor_D.resize(problem_size.mn(), cutlass::layout::Affine2Layout_Factory<typename Gemm::LayoutC>::layout_factory(problem_size.mn(), stride_factor_C));
reference_D.resize(problem_size.mn(), cutlass::layout::Affine2Layout_Factory<typename Gemm::LayoutC>::layout_factory(problem_size.mn(), stride_factor_C), false);
EXPECT_TRUE(initialize_tensor(tensor_A.host_view(), init_A, seed + 2019));
EXPECT_TRUE(initialize_tensor(tensor_B.host_view(), init_B, seed + 2018));
EXPECT_TRUE(initialize_tensor(tensor_C.host_view(), init_C, seed + 2017));
// It is possible to randomly initialize to all zeros, so override this with non-zeros
// in the upper left corner of each operand.
tensor_A.host_view().at({0, 0}) = typename Gemm::ElementA(1);
tensor_B.host_view().at({0, 0}) = typename Gemm::ElementB(1);
tensor_C.host_view().at(cutlass::make_Coord(0, 0)) = typename Gemm::ElementC(1);
cutlass::reference::host::TensorCopy(reference_D.host_view(), tensor_C.host_view());
tensor_A.sync_device();
tensor_B.sync_device();
tensor_C.sync_device();
tensor_D.sync_device();
}
/// Compares computed reference with device reference and outputs to a file if incorrect
bool compare_reference(
cutlass::gemm::GemmCoord problem_size,
ElementCompute alpha,
ElementCompute beta) {
tensor_D.sync_host();
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_A.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_B.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_C.host_view()), 0);
if (tensor_D.size() > 1)
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D.host_view()), 0);
if (reference_D.size() > 1)
EXPECT_GT(cutlass::reference::host::TensorNorm(reference_D.host_view()), 0);
bool passed = cutlass::reference::host::TensorEquals(reference_D.host_view(), tensor_D.host_view());
EXPECT_TRUE(passed);
if (!passed) {
std::stringstream fname;
fname << "error_Gemm_device_"
<< problem_size.m() << "x"
<< problem_size.n() << "x"
<< problem_size.k() << "_"
<< Gemm::ThreadblockShape::kM << "x"
<< Gemm::ThreadblockShape::kN << "x"
<< Gemm::ThreadblockShape::kK << "_"
<< Gemm::WarpShape::kM << "x"
<< Gemm::WarpShape::kN << "x"
<< Gemm::WarpShape::kK << ".txt";
std::ofstream file(fname.str());
file
<< "problem: " << problem_size
<< ", alpha: " << alpha << ", beta: " << beta << "\n\n";
file
<< "A =\n" << tensor_A.host_view()
<< "\nB =\n" << tensor_B.host_view()
<< "\nC =\n" << tensor_C.host_view()
<< "\n\nReference =\n" << reference_D.host_view()
<< "\nComputed =\n" << tensor_D.host_view();
}
return passed;
}
/// Verifies the result is a GEMM
bool verify(
cutlass::gemm::GemmCoord problem_size,
ElementCompute alpha,
ElementCompute beta) {
//
// Verify
//
cutlass::reference::host::Gemm<
typename Gemm::ElementA, typename Gemm::LayoutA,
typename Gemm::ElementB, typename Gemm::LayoutB,
typename Gemm::ElementC, typename Gemm::LayoutC, ElementCompute,
ElementAccumulator, typename Gemm::Operator>
reference_gemm;
reference_gemm(
problem_size,
alpha,
tensor_A.host_ref(),
tensor_B.host_ref(),
beta,
reference_D.host_ref(),
ElementAccumulator(0)
);
if (Relu) {
for (int i = 0; i < problem_size.m(); ++i) {
for (int j = 0; j < problem_size.n(); ++j) {
reference_D.at(cutlass::MatrixCoord(i, j)) =
((ElementCompute)reference_D.at(cutlass::MatrixCoord(i, j)) < (ElementCompute)0)
? (typename Gemm::ElementC)0
: reference_D.at(cutlass::MatrixCoord(i, j));
}
}
}
return compare_reference(problem_size, alpha, beta);
}
/// Determine if the CUDA device is sufficient to run the kernel
bool sufficient() const {
//
// Determine SMEM requirements and waive if not satisfied
//
size_t smem_size = sizeof(typename Gemm::GemmKernel::SharedStorage);
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.sharedMemPerBlockOptin < smem_size) {
return false;
}
return true;
}
/// Executes one test
bool run(
cutlass::gemm::GemmCoord problem_size,
int split_k_slices = 1,
ElementCompute alpha = ElementCompute(1),
ElementCompute beta = ElementCompute(0))
{
/*
std::cout << "\n-----------------------\n";
std::cout << "problem size: " << problem_size << "\n";
std::cout << "split_k_slices: " << split_k_slices << "\n";
std::cout << "alpha: " << alpha << "\n";
std::cout << "beta: " << beta << "\n";
std::cout << "-----------------------\n\n";
*/
// Waive test if insufficient CUDA device
if (!sufficient()) {
if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) {
std::cerr << "Test waived due to insufficient CUDA device." << std::endl;
}
return true;
}
this->initialize(problem_size);
//
// Initialize the GEMM operator
//
typename Gemm::Arguments arguments{
problem_size,
tensor_A.device_ref(),
tensor_B.device_ref(),
tensor_C.device_ref(),
tensor_D.device_ref(),
{alpha, beta},
split_k_slices
};
Gemm gemm_op;
size_t workspace_size = Gemm::get_workspace_size(arguments);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
cutlass::Status status = gemm_op.initialize(arguments, workspace.get());
if (status != cutlass::Status::kSuccess) {
cudaError_t error = cudaGetLastError();
std::cerr << "This test is not supported: " << cudaGetErrorString(error) << "\n";
return true;
}
//
// Run the GEMM
//
status = gemm_op();
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
//
// Verify
//
bool passed = this->verify(problem_size, alpha, beta);
if (!passed) {
std::cout << "Error with split_k_slices = " << split_k_slices << ", alpha: " << alpha << std::endl;
}
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Gemm, bool Relu=false>
bool TestAllGemmBasic(
const typename Gemm::LayoutA::Stride& stride_factor_A = typename Gemm::LayoutA::Stride(),
const typename Gemm::LayoutB::Stride& stride_factor_B = typename Gemm::LayoutB::Stride(),
const typename Gemm::LayoutC::Stride& stride_factor_C = typename Gemm::LayoutC::Stride()) {
bool passed = true;
int const kMinimumOperandElementSize =
std::min(
int(cutlass::sizeof_bits<typename Gemm::ElementA>::value),
int(cutlass::sizeof_bits<typename Gemm::ElementB>::value));
int const kAlignment = cutlass::platform::is_same<
typename Gemm::OperatorClass,
cutlass::arch::OpClassSimt>::value ? 1 : 128 / kMinimumOperandElementSize;
// int8_t gemm alignment constraints
int const kAlignmentM = cutlass::platform::is_same<typename Gemm::OperatorClass, cutlass::arch::OpClassSimt>::value &&
cutlass::platform::is_same<typename Gemm::ElementA, int8_t>::value &&
cutlass::platform::is_same<typename Gemm::LayoutA, cutlass::layout::ColumnMajor>::value ? 4 : kAlignment;
int const kAlignmentN = cutlass::platform::is_same<typename Gemm::OperatorClass, cutlass::arch::OpClassSimt>::value &&
cutlass::platform::is_same<typename Gemm::ElementB, int8_t>::value &&
cutlass::platform::is_same<typename Gemm::LayoutB, cutlass::layout::RowMajor>::value ? 4 : kAlignment;
int const kAlignmentK = cutlass::platform::is_same<typename Gemm::OperatorClass, cutlass::arch::OpClassSimt>::value &&
cutlass::platform::is_same<typename Gemm::ElementA, int8_t>::value &&
cutlass::platform::is_same<typename Gemm::ElementB, int8_t>::value &&
(cutlass::platform::is_same<typename Gemm::LayoutA, cutlass::layout::RowMajor>::value ||
cutlass::platform::is_same<typename Gemm::LayoutB, cutlass::layout::ColumnMajor>::value) ? 4 : kAlignment;
int problem_size_m[] = {kAlignmentM, 512 - 3 * kAlignmentM};
int problem_size_n[] = {kAlignmentN, 512 - 2 * kAlignmentN};
int problem_size_k[] = {
kAlignmentK, Gemm::ThreadblockShape::kK * (Gemm::kStages + 1) - kAlignmentK};
int split_k_slices[] = {
1, 2, 3
};
double problem_alpha[] = {
1
};
double problem_beta[] = {
2.0
};
Testbed<Gemm, Relu> testbed(stride_factor_A, stride_factor_B, stride_factor_C);
using ElementCompute = typename Gemm::EpilogueOutputOp::ElementCompute;
for (int m : problem_size_m) {
for (int n : problem_size_n) {
for (int k : problem_size_k) {
for (int split_k : split_k_slices) {
if (!Gemm::kSplitKSerial && split_k > 1) {
continue;
}
if (split_k > 1 && k / Gemm::ThreadblockShape::kK < split_k) {
continue;
}
for (auto alpha : problem_alpha) {
for (auto beta : problem_beta) {
cutlass::gemm::GemmCoord problem_size(m, n, k);
passed = testbed.run(
problem_size,
split_k,
cutlass::from_real<ElementCompute>(alpha),
cutlass::from_real<ElementCompute>(beta)
);
if (!passed) {
return false;
}
}
}
}
}
}
}
return passed;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Gemm, bool Relu=false>
bool TestAllGemm(
const typename Gemm::LayoutA::Stride& stride_factor_A,
const typename Gemm::LayoutB::Stride& stride_factor_B = typename Gemm::LayoutB::Stride(),
const typename Gemm::LayoutC::Stride& stride_factor_C = typename Gemm::LayoutC::Stride())
{
// Test basic GEMM with non-default stride factors
return TestAllGemmBasic<Gemm, Relu>(stride_factor_A, stride_factor_B, stride_factor_C);
}
template <typename Gemm, bool Relu=false>
bool TestAllGemm()
{
#ifdef NDEBUG
// Non-debug builds also test basic GEMM with default stride factors
if (!TestAllGemmBasic<Gemm, Relu>()) {
return false;
}
#endif // NDEBUG
// Test universal GEMM
#if 0
// Define the universal kernel
using UniversalKernel = cutlass::gemm::kernel::GemmUniversal<
typename Gemm::GemmKernel::Mma, // Mma
typename Gemm::GemmKernel::Epilogue, // Epilogue
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<> // ThreadblockSwizzle
>;
#else
// Define the streamk universal kernel
using UniversalKernel = cutlass::gemm::kernel::GemmUniversalStreamk<
typename Gemm::GemmKernel::Mma, // Mma
typename Gemm::GemmKernel::Epilogue, // Epilogue
cutlass::gemm::threadblock::ThreadblockSwizzleStreamK // ThreadblockSwizzle
>;
#endif
// Define the universal adaptor
using UniversalGemm = cutlass::gemm::device::GemmUniversalAdapter<UniversalKernel>;
// Test universal GEMM
return TestAllGemmUniversal<UniversalGemm, Relu>();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Gemm>
bool TestGemmPerf(int iterations = 1) {
bool passed = true;
int problem_size_m[] = { 2048 };
int problem_size_n[] = { 4352 };
int problem_size_k[] = { 4096 };
int split_k_slices[] = { 1 };
double problem_alpha[] = { 1 };
double problem_beta[] = { 0.0 };
Testbed<Gemm> testbed;
using ElementCompute = typename Gemm::EpilogueOutputOp::ElementCompute;
for (int m : problem_size_m) {
for (int n : problem_size_n) {
for (int k : problem_size_k) {
for (int split_k : split_k_slices) {
if (!Gemm::kSplitKSerial && split_k > 1) {
continue;
}
for (auto alpha : problem_alpha) {
for (auto beta : problem_beta) {
cutlass::gemm::GemmCoord problem_size(m, n, k);
for (int i = 0; i < iterations; i++){
passed = testbed.run(
problem_size,
split_k,
cutlass::from_real<ElementCompute>(alpha),
cutlass::from_real<ElementCompute>(beta)
);
}
if (!passed) {
return false;
}
}
}
}
}
}
}
return passed;
}
} // namespace device
} // namespace gemm
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/gemm/device/testbed.h/0 | {
"file_path": "test/unit/gemm/device/testbed.h",
"repo_id": "test",
"token_count": 8244
} | 57 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide TRMM interface
*/
#pragma once
#include <iostream>
#include <fstream>
#include <sstream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/blas3.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/reference/host/error_metrics.h"
#include "cutlass/util/reference/host/trmm.h"
#include "cutlass/util/reference/host/trmm_complex.h"
#include "cutlass/core_io.h"
#include "testbed_utils.h"
namespace test {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Trmm>
struct TestbedTrmmUniversal {
using ElementA = typename Trmm::ElementA;
using ElementB = typename Trmm::ElementB;
using ElementC = typename Trmm::ElementC;
using ElementAccumulator = typename Trmm::ElementAccumulator;
using ElementCompute = typename Trmm::TrmmKernel::Epilogue::OutputOp::ElementCompute;
/// Initialization
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::Distribution::Kind init_D;
uint64_t seed;
cutlass::HostTensor<typename Trmm::ElementA, typename Trmm::LayoutA> tensor_A;
cutlass::HostTensor<typename Trmm::ElementB, typename Trmm::LayoutB> tensor_B;
cutlass::HostTensor<typename Trmm::ElementC, typename Trmm::LayoutC> tensor_D;
cutlass::HostTensor<typename Trmm::ElementC, typename Trmm::LayoutC> reference_D;
//
// Methods
//
TestbedTrmmUniversal(
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_D_ = cutlass::Distribution::Uniform,
uint64_t seed_ = 2080
):
init_A(init_A_), init_B(init_B_), init_D(init_D_), seed(seed_) { }
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
bool initialize_tensor(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed,
int mantissa_in_bits) {
if (dist_kind == cutlass::Distribution::Uniform) {
double scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
int bits_output = cutlass::sizeof_bits<typename Trmm::ElementC>::value;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
} else if (bits_input <= 8) {
scope_max = 2;
scope_min = -2;
} else if (bits_output == 16) {
scope_max = 5;
scope_min = -5;
} else {
scope_max = 8;
scope_min = -8;
}
cutlass::reference::host::TensorFillRandomUniform(
view, seed, scope_max, scope_min, mantissa_in_bits);
}
else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5, mantissa_in_bits);
}
else if (dist_kind == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(
view.data(), view.capacity());
}
else {
EXPECT_TRUE(false) << "Not implemented";
return false;
}
return true;
}
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
bool initialize_symmetric_tensor(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed,
int mantissa_in_bits) {
if (dist_kind == cutlass::Distribution::Uniform) {
double scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
int bits_output = cutlass::sizeof_bits<typename Trmm::ElementC>::value;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
} else if (bits_input <= 8) {
scope_max = 2;
scope_min = -2;
} else if (bits_output == 16) {
scope_max = 5;
scope_min = -5;
} else {
scope_max = 8;
scope_min = -8;
}
cutlass::reference::host::TensorFillSymmetricRandomUniform(
view, seed, Trmm::kFillMode, scope_max, scope_min, mantissa_in_bits);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillSymmetricRandomGaussian(
view, seed, Trmm::kFillMode, 0, 0.5, mantissa_in_bits);
}
else {
EXPECT_TRUE(false) << "Not implemented";
return false;
}
return true;
}
/// Helper to initialize a tensor view (pad diagonal fill with zeros for up to alignment on wrong side of diagonal)
template <typename Element, typename Layout>
bool initialize_pad_diagonal_tensor(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed,
int alignment) {
if (dist_kind == cutlass::Distribution::Uniform) {
double scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
int bits_output = cutlass::sizeof_bits<typename Trmm::ElementC>::value;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
} else if (bits_input <= 8) {
scope_max = 2;
scope_min = -2;
} else if (bits_output == 16) {
scope_max = 5;
scope_min = -5;
} else {
scope_max = 8;
scope_min = -8;
}
cutlass::reference::host::TensorFillPadDiagonalRandomUniform(
view, seed, Trmm::kFillMode, scope_max, scope_min, 0, alignment);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
EXPECT_TRUE(false) << "Gaussian distribution for pad diagonal not implemented";
}
else {
EXPECT_TRUE(false) << "Not implemented";
return false;
}
return true;
}
/// Initializes data structures
void initialize(cutlass::gemm::GemmCoord problem_size) {
//
// Allocate the TRMM workspace
//
if (Trmm::kSideMode == cutlass::SideMode::kLeft) {
tensor_A.resize(cutlass::make_Coord(problem_size.m(),problem_size.m()));
}
else if (Trmm::kSideMode == cutlass::SideMode::kRight) {
tensor_A.resize(cutlass::make_Coord(problem_size.n(),problem_size.n()));
}
tensor_B.resize(problem_size.mn());
tensor_D.resize(problem_size.mn());
reference_D.resize(problem_size.mn(), false);
//EXPECT_TRUE(initialize_symmetric_tensor(tensor_A.host_view(), init_A, seed + 2017));
//EXPECT_TRUE(initialize_pad_diagonal_tensor(tensor_A.host_view(), init_A, seed + 2017, Trmm::kAlignmentA));
EXPECT_TRUE(initialize_tensor(tensor_A.host_view(), init_A, seed + 2017, cutlass::MantissaInBits<typename Trmm::ElementA>::bits));
EXPECT_TRUE(initialize_tensor(tensor_B.host_view(), init_B, seed + 2019, cutlass::MantissaInBits<typename Trmm::ElementB>::bits));
// It is possible to randomly initialize to all zeros, so override this with non-zeros
// in the upper left corner of each operand.
tensor_A.host_view().at({0, 0}) = typename Trmm::ElementA(1);
tensor_B.host_view().at({0, 0}) = typename Trmm::ElementB(1);
cutlass::reference::host::TensorCopy(reference_D.host_view(), tensor_D.host_view());
tensor_A.sync_device();
tensor_B.sync_device();
tensor_D.sync_device();
}
/// Compares computed reference with device reference and outputs to a file if incorrect
bool compare_reference(
cutlass::gemm::GemmCoord problem_size,
ElementCompute alpha) {
tensor_D.sync_host();
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_A.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_B.host_view()), 0);
if (tensor_D.size() > 1)
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D.host_view()), 0);
if (reference_D.size() > 1)
EXPECT_GT(cutlass::reference::host::TensorNorm(reference_D.host_view()), 0);
double l2_norm = cutlass::reference::host::TensorRelativeErrorMetric(reference_D.host_view(), tensor_D.host_view());
bool passed = l2_norm < cutlass::MantissaInBits<typename Trmm::ElementA>::error;
return passed;
}
/// Verifies the result is a TRMM
bool verify(
cutlass::gemm::GemmCoord problem_size,
ElementCompute alpha) {
//
// Verify
//
using HostReference = typename cutlass::platform::conditional<
(cutlass::platform::is_same<typename Trmm::ElementC,
cutlass::complex<double>
>::value ||
cutlass::platform::is_same<typename Trmm::ElementC,
cutlass::complex<float>
>::value
),
cutlass::reference::host::TrmmComplex<
typename Trmm::ElementA, typename Trmm::LayoutA,
Trmm::kTransformA,
Trmm::kSideMode, Trmm::kFillMode, Trmm::kDiagType,
typename Trmm::ElementB, typename Trmm::LayoutB,
Trmm::kTransformB,
typename Trmm::ElementC, typename Trmm::LayoutC,
ElementCompute,
ElementAccumulator>,
cutlass::reference::host::Trmm<
typename Trmm::ElementA, typename Trmm::LayoutA,
Trmm::kSideMode, Trmm::kFillMode, Trmm::kDiagType,
typename Trmm::ElementB, typename Trmm::LayoutB,
typename Trmm::ElementC, typename Trmm::LayoutC,
ElementCompute,
ElementAccumulator>
>::type;
HostReference reference_trmm;
reference_trmm(
problem_size,
alpha,
tensor_A.host_ref(),
tensor_B.host_ref(),
reference_D.host_ref(),
ElementAccumulator(0)
);
return compare_reference(problem_size, alpha);
}
/// Returns true if the CUDA device is sufficient to execute the kernel.
bool sufficient() const {
//
// Determine SMEM requirements and waive if not satisfied
//
size_t smem_size = sizeof(typename Trmm::TrmmKernel::SharedStorage);
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.sharedMemPerBlockOptin < smem_size) {
return false;
}
return true;
}
/// Executes one test
bool run(
cutlass::gemm::GemmUniversalMode mode,
cutlass::gemm::GemmCoord problem_size,
int batch_count = 1,
ElementCompute alpha = ElementCompute(1)) {
// Waive test if insufficient CUDA device
if (!sufficient()) {
if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) {
std::cerr << "Test waived due to insufficient CUDA device." << std::endl;
}
return true;
}
#if 0
std::cout << "[TestbedTrmmUniversal::run()] problem(m, n, k): " << problem_size
<< " alpha: " << ElementCompute(alpha) << std::endl;
#endif
this->initialize(problem_size);
//
// Initialize the TRMM operator
//
int batch_stride_A;
if (Trmm::kSideMode == cutlass::SideMode::kLeft)
batch_stride_A = problem_size.m()*problem_size.m();
if (Trmm::kSideMode == cutlass::SideMode::kRight)
batch_stride_A = problem_size.n()*problem_size.n();
typename Trmm::Arguments arguments{
mode,
problem_size,
batch_count,
{alpha},
tensor_A.device_data(),
tensor_B.device_data(),
tensor_D.device_data(),
batch_stride_A,
problem_size.m() * problem_size.n(),
problem_size.m() * problem_size.n(),
tensor_A.layout().stride(0),
tensor_B.layout().stride(0),
tensor_D.layout().stride(0)
};
Trmm trmm_op;
size_t workspace_size = Trmm::get_workspace_size(arguments);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
cutlass::Status status = trmm_op.initialize(arguments, workspace.get());
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
//
// Run the TRMM
//
status = trmm_op();
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
//
// Verify
//
bool passed = this->verify(problem_size, alpha);
if (!passed) {
std::stringstream fname;
fname << "error_Trmm_device_"
<< "fill_mode_"
<< (Trmm::kFillMode == cutlass::FillMode::kLower ? "lower_" :
(Trmm::kFillMode == cutlass::FillMode::kUpper ? "upper_" : "invalid_"))
<< "side_mode_"
<< (Trmm::kSideMode == cutlass::SideMode::kLeft ? "left_" :
(Trmm::kSideMode == cutlass::SideMode::kRight ? "right_" : "invalid_"))
<< "mnk_"
<< problem_size.m() << "x"
<< problem_size.n() << "x"
<< problem_size.k() << "_"
<< Trmm::ThreadblockShape::kM << "x"
<< Trmm::ThreadblockShape::kN << "x"
<< Trmm::ThreadblockShape::kK << "_"
<< Trmm::WarpShape::kM << "x"
<< Trmm::WarpShape::kN << "x"
<< Trmm::WarpShape::kK << ".txt";
std::cout << fname.str() << std::endl;
std::ofstream results(fname.str());
results << problem_size << std::endl;
results
<< "\nA:\n" << tensor_A.host_view() << "\n"
<< "\nB:\n" << tensor_B.host_view() << "\n"
<< "\nD reference:\n" << reference_D.host_view() << "\n"
<< "\nD computed:\n" << tensor_D.host_view() << "\n";
}
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Trmm>
bool TestTrmmUniversal(
cutlass::gemm::GemmCoord const & problem_size,
cutlass::gemm::GemmUniversalMode mode,
int batch_count,
double alpha = 1.0) {
bool passed = true;
TestbedTrmmUniversal<Trmm> testbed;
using ElementCompute = typename Trmm::EpilogueOutputOp::ElementCompute;
passed = testbed.run(
mode,
problem_size,
batch_count,
cutlass::from_real<ElementCompute>(alpha)
);
return passed;
}
template <typename Trmm>
bool TestAllTrmmUniversal() {
bool passed = true;
int const kMinimumOperandElementSize = int(cutlass::sizeof_bits<typename Trmm::ElementA>::value);
int const kAlignment = cutlass::platform::is_same<
typename Trmm::OperatorClass,
cutlass::arch::OpClassSimt>::value ? 1 : 128 / kMinimumOperandElementSize;
// int8_t gemm alignment constraints
int const kAlignmentM = cutlass::platform::is_same<typename Trmm::OperatorClass, cutlass::arch::OpClassSimt>::value &&
cutlass::platform::is_same<typename Trmm::ElementA, int8_t>::value &&
cutlass::platform::is_same<typename Trmm::LayoutA, cutlass::layout::ColumnMajor>::value ? 4 : kAlignment;
int const kAlignmentN = kAlignmentM;
int const kAlignmentK = cutlass::platform::is_same<typename Trmm::OperatorClass, cutlass::arch::OpClassSimt>::value &&
cutlass::platform::is_same<typename Trmm::ElementA, int8_t>::value &&
cutlass::platform::is_same<typename Trmm::LayoutA, cutlass::layout::RowMajor>::value
? 4 : kAlignment;
cutlass::gemm::GemmUniversalMode modes[] = {
cutlass::gemm::GemmUniversalMode::kGemm,
};
int problem_size_m[] = {
kAlignmentK,
Trmm::ThreadblockShape::kK * Trmm::kStages - kAlignmentK,
Trmm::ThreadblockShape::kK * Trmm::kStages * 3 - kAlignmentK
};
int problem_size_n[] = {
kAlignmentN, 512 - 2*kAlignmentN
};
int batch_counts[] = { // may be interpretted as batch count or split-K slices
1 // Just running one batch for now (removing 2, 3, 5, 7)
};
double problem_alpha[] = {
1.0, 2.0
};
using ElementCompute = typename Trmm::EpilogueOutputOp::ElementCompute;
for (cutlass::gemm::GemmUniversalMode mode : modes) {
for (int m : problem_size_m) {
for (int n : problem_size_n) {
for (int batch_count : batch_counts) {
for (auto alpha : problem_alpha) {
int k = 0;
if (Trmm::kSideMode == cutlass::SideMode::kLeft)
k = m;
else if (Trmm::kSideMode == cutlass::SideMode::kRight)
k = n;
if (mode == cutlass::gemm::GemmUniversalMode::kGemm ||
mode == cutlass::gemm::GemmUniversalMode::kGemmSplitKParallel) {
#if 0
// skip very small K problems
if (k / batch_count < 2 * Trmm::ThreadblockShape::kK) {
continue;
}
#endif
}
cutlass::gemm::GemmCoord problem_size(m, n, k);
TestbedTrmmUniversal<Trmm> testbed;
passed = testbed.run(
mode,
problem_size,
batch_count,
cutlass::from_real<ElementCompute>(alpha)
);
if (!passed) {
return false;
}
}
}
}
}
}
return passed;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace gemm
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/gemm/device/testbed_trmm_universal.h/0 | {
"file_path": "test/unit/gemm/device/testbed_trmm_universal.h",
"repo_id": "test",
"token_count": 8616
} | 58 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit testbed for kernel-level GEMM
*/
#pragma once
#include <fstream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/vector.h"
#include "cutlass/numeric_types.h"
#include "cutlass/core_io.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/gemm/threadblock/default_mma_core_simt.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm75.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm70.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator_2dthreadtile.h"
#include "cutlass/cutlass.h"
#include "cutlass/platform/platform.h"
namespace test {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Mma>
__global__ void kernel_mma(cutlass::gemm::GemmCoord problem_size,
typename Mma::IteratorA::Params params_A,
typename Mma::IteratorA::TensorRef ref_A,
typename Mma::IteratorB::Params params_B,
typename Mma::IteratorB::TensorRef ref_B,
typename Mma::ElementC **ptr_C,
typename Mma::LayoutC::Stride::Index ldc) {
// Shared storage needed by threadblock-scoped matrix multiply-accumulate
__shared__ typename Mma::SharedStorage shared_storage;
// Compute threadblock location
cutlass::gemm::GemmCoord tb_tile_offset = {int(blockIdx.x), int(blockIdx.y),
0};
cutlass::MatrixCoord tb_offset_A{tb_tile_offset.m() * Mma::Shape::kM,
tb_tile_offset.k()};
cutlass::MatrixCoord tb_offset_B{tb_tile_offset.k(),
tb_tile_offset.n() * Mma::Shape::kN};
// Compute position within threadblock
int tb_thread_id = threadIdx.y * blockDim.x + threadIdx.x;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(params_A, ref_A.data(),
{problem_size.m(), problem_size.k()},
tb_thread_id, tb_offset_A);
typename Mma::IteratorB iterator_B(params_B, ref_B.data(),
{problem_size.k(), problem_size.n()},
tb_thread_id, tb_offset_B);
int warp_id = threadIdx.y;
int lane_id = threadIdx.x;
int partitionsK_idx = warp_id / (Mma::WarpCount::kM * Mma::WarpCount::kN);
// Construct thread-scoped matrix multiply
Mma mma(shared_storage, tb_thread_id, warp_id, threadIdx.x);
typename Mma::FragmentC accum;
accum.clear();
int gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
mma(gemm_k_iterations, accum, iterator_A, iterator_B, accum);
// Output results
typename Mma::Operator::IteratorC iterator_C({ptr_C[partitionsK_idx], ldc}, lane_id);
int warp_idx_mn = warp_id % (Mma::WarpCount::kM * Mma::WarpCount::kN);
iterator_C.add_tile_offset(
{(tb_tile_offset.m() * Mma::WarpCount::kM) +
(warp_idx_mn % Mma::WarpCount::kM),
(tb_tile_offset.n() * Mma::WarpCount::kN) +
(warp_idx_mn / Mma::WarpCount::kM)});
iterator_C.store(accum);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product
template <
/// Threadblock-level matrix multiply-accumulate
typename MmaCore_>
struct Testbed {
/// Threadblock-level GEMM implementation
using MmaCore = MmaCore_;
using ThreadblockShape = typename MmaCore::Shape;
using WarpShape = typename MmaCore::WarpShape;
using InstructionShape = typename MmaCore::InstructionShape;
using ElementA = typename MmaCore::ElementA;
using LayoutA = typename MmaCore::LayoutA;
using ElementB = typename MmaCore::ElementB;
using LayoutB = typename MmaCore::LayoutB;
using ElementC = typename MmaCore::ElementC;
using LayoutC = typename MmaCore::LayoutC;
// Define iterators over tiles from the A operand
static const bool use_idp4a = cutlass::platform::is_same<ElementA, int8_t>::value &&
cutlass::platform::is_same<ElementB, int8_t>::value &&
cutlass::platform::is_same<typename MmaCore::OperatorClass, cutlass::arch::OpClassSimt>::value;
static const bool transposeA = cutlass::platform::is_same< LayoutA, cutlass::layout::ColumnMajor >::value;
static const bool transposeB = cutlass::platform::is_same< LayoutB, cutlass::layout::RowMajor >::value;
using IteratorA = typename cutlass::platform::conditional< use_idp4a,
cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA, transposeA> ,
cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA>
>::type;
// Define iterators over tiles from the B operand
using IteratorB = typename cutlass::platform::conditional< use_idp4a,
cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB, transposeB> ,
cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB>
>::type;
// Define the threadblock-scoped pipelined matrix multiply
using Mma = cutlass::gemm::threadblock::MmaPipelined<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
IteratorB, typename MmaCore::SmemIteratorB, ElementC, LayoutC,
typename MmaCore::MmaPolicy>;
static int const kPartitionsK = MmaCore::MmaPolicy::kPartitionsK;
//
// Data members
//
cutlass::HostTensor<ElementA, LayoutA> matrix_A;
cutlass::HostTensor<ElementB, LayoutB> matrix_B;
cutlass::HostTensor<ElementC, LayoutC> matrix_C_computed[kPartitionsK];
cutlass::HostTensor<ElementC, LayoutC> matrix_C_reference;
cutlass::HostTensor<ElementC*, cutlass::layout::PackedVectorLayout> matrix_C_pointers;
cutlass::gemm::GemmCoord problem_size;
float alpha, beta;
//
// Methods
//
/// Allocates workspace in device memory
Testbed(int m, int n, int k, float alpha_, float beta_)
: problem_size(m, n, k), alpha(alpha_), beta(beta_) {
matrix_A.reset(cutlass::make_Coord(m, k));
matrix_B.reset(cutlass::make_Coord(k, n));
CUTLASS_PRAGMA_UNROLL
for(int k = 0; k < kPartitionsK; k++)
matrix_C_computed[k].reset(cutlass::make_Coord(m, n));
matrix_C_reference.reset(cutlass::make_Coord(m, n), false);
matrix_C_pointers.reset(cutlass::Coord<1>(kPartitionsK));
}
/// Runs the test
bool run(
dim3 grid, dim3 block,
cutlass::Distribution::Kind init_A = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B = cutlass::Distribution::Uniform) {
//
// initialize device memory
//
if (init_A == cutlass::Distribution::Uniform) {
int scope_max = 8;
int scope_min = -8;
if (cutlass::sizeof_bits<ElementA>::value == 4) {
scope_max = 2;
scope_min = -2;
} else if (cutlass::sizeof_bits<ElementA>::value == 1) {
scope_max = 2;
scope_min = 0;
}
uint64_t seed = 7;
cutlass::reference::host::TensorFillRandomUniform(
matrix_A.host_view(), seed, scope_max, scope_min, 0);
} else if (init_A == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(matrix_A.host_data(),
matrix_A.capacity());
} else if (init_A == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(matrix_A.host_view());
} else {
return false;
}
if (init_B == cutlass::Distribution::Uniform) {
int scope_max = 8;
int scope_min = -8;
if (cutlass::sizeof_bits<ElementB>::value == 4) {
scope_max = 2;
scope_min = -2;
} else if (cutlass::sizeof_bits<ElementB>::value == 1) {
scope_max = 2;
scope_min = 0;
}
uint64_t seed = 7;
cutlass::reference::host::TensorFillRandomUniform(
matrix_B.host_view(), seed + 16, scope_max, scope_min, 0);
} else if (init_B == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(matrix_B.host_data(),
matrix_B.capacity());
} else if (init_B == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(matrix_B.host_view());
} else {
return false;
}
CUTLASS_PRAGMA_UNROLL
for(int k = 0; k < kPartitionsK; k++)
cutlass::reference::host::TensorFill(matrix_C_computed[k].host_view());
cutlass::reference::host::TensorFill(matrix_C_reference.host_view());
matrix_A.sync_device();
matrix_B.sync_device();
CUTLASS_PRAGMA_UNROLL
for(int k = 0; k < kPartitionsK; k++)
matrix_C_computed[k].sync_device();
typename IteratorA::Params params_A(matrix_A.layout());
typename IteratorB::Params params_B(matrix_B.layout());
CUTLASS_PRAGMA_UNROLL
for(int k = 0; k < kPartitionsK; k++)
matrix_C_pointers.at(cutlass::Coord<1>(k)) = matrix_C_computed[k].device_data();
matrix_C_pointers.sync_device();
test::gemm::threadblock::kernel_mma<Mma><<<grid, block>>>(
problem_size, params_A, matrix_A.device_ref(), params_B,
matrix_B.device_ref(), matrix_C_pointers.device_data(),
matrix_C_computed[0].layout().stride(0));
//
// Check error code
//
cudaError_t result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess)
<< " kernel error: " << cudaGetErrorString(result);
CUTLASS_PRAGMA_UNROLL
for(int k = 0; k < kPartitionsK; k++)
matrix_C_computed[k].sync_host();
// TODO: this is temporary. it will be removed after slicing can de
// reduction
//
// Reduce matrix_C_computed
//
CUTLASS_PRAGMA_UNROLL
for(int k = 1; k < kPartitionsK; k++) {
CUTLASS_PRAGMA_UNROLL
for(int m = 0; m < matrix_C_computed[0].extent().row(); m++){
CUTLASS_PRAGMA_UNROLL
for(int n = 0; n < matrix_C_computed[0].extent().column(); n++){
matrix_C_computed[0].at({m, n}) += matrix_C_computed[k].at({m, n});
}
}
}
cutlass::reference::host::Gemm<ElementA, LayoutA, ElementB, LayoutB,
ElementC, LayoutC, ElementC, ElementC,
typename MmaCore::Operator>
reference_gemm;
reference_gemm(
problem_size, ElementC(alpha), matrix_A.host_view(),
matrix_B.host_view(), ElementC(beta), matrix_C_reference.host_view());
bool passed = cutlass::reference::host::TensorEquals(
matrix_C_computed[0].host_view(), matrix_C_reference.host_view());
EXPECT_TRUE(passed);
if (!passed) {
std::ofstream output("mma_pipelined_testbed_errors.txt");
output
<< "A:\n" << matrix_A.host_view() << "\n"
<< "B:\n" << matrix_B.host_view() << "\n"
<< "Reference:\n"
<< matrix_C_reference.host_view() << "\n"
<< "Computed:\n"
<< matrix_C_computed[0].host_view() << "\n";
}
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace test
| test/unit/gemm/threadblock/mma_pipelined_testbed_slicedk.h/0 | {
"file_path": "test/unit/gemm/threadblock/mma_pipelined_testbed_slicedk.h",
"repo_id": "test",
"token_count": 5699
} | 59 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Defines a math function
*/
#pragma once
#include <vector>
#include <string>
#include <memory>
#include <unordered_map>
// CUTLASS includes
#include "cutlass/trace.h"
// CUTLASS Library includes
#include "cutlass/library/library.h"
#include "cutlass/library/util.h"
#include "cutlass/library/manifest.h"
// Profiler includes
#include "options.h"
#include "device_context.h"
#include "performance_result.h"
#include "performance_report.h"
#include "problem_space.h"
#include "debug.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Abstract base class for each math function
class OperationProfiler {
public:
protected:
//
// Data members
//
/// Top-level operation kind
library::OperationKind kind_;
/// Human readable description
std::string description_;
/// Arguments parsed from command line
ArgumentDescriptionVector arguments_;
/// List of providers used to verify and compare each result
ProviderVector verification_providers_;
/// Model performance result initialized by the operation profiler with workload statistics
/// and reasonable default state.
PerformanceResult model_result_;
/// Performance result vector constructed by profiling the operation
PerformanceResultVector results_;
public:
//
// Methods
//
/// Ctor
OperationProfiler();
OperationProfiler(
Options const &options,
library::OperationKind kind,
ArgumentDescriptionVector const &arguments = ArgumentDescriptionVector(),
ProviderVector const & verification_providers = ProviderVector());
/// Destructor
virtual ~OperationProfiler();
/// Obtains the operation kind
library::OperationKind kind() const { return kind_; }
/// Gets the schema description
std::string const &description() const;
/// Returns a reference to the arguments
ArgumentDescriptionVector const &arguments() const { return arguments_; }
public:
//
// Basic overrides
//
/// Prints usage statement for the math function
virtual void print_usage(std::ostream &out) const;
/// Prints examples
virtual void print_examples(std::ostream &out) const =0;
/// Entry point to profile all operations in the manifest
virtual int profile_all(
Options const &options,
library::Manifest const &manifest,
DeviceContext &device_context);
public:
//
// Operation-specific phases of verification and profiling
//
/// Extracts the problem dimensions
virtual Status initialize_configuration(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) = 0;
/// Initializes workspace
virtual Status initialize_workspace(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) = 0;
/// Verifies CUTLASS against references
virtual bool verify_cutlass(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) = 0;
/// Measures performance results
virtual bool profile(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) = 0;
public:
//
// Static helpers
//
/// Sleep for a given duration in ms
static void sleep(int sleep_duration);
/// Returns true if the current operation description satisfies the problem space
static bool satisfies(
library::OperationDescription const &op_desc,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem);
/// Compares tensors for equality
static Disposition compare_tensors(
Options const &options,
DeviceAllocation &experimental,
DeviceAllocation &reference,
int64_t count = 0);
static void save_workspace(
DeviceContext &device_context,
Options const &options,
library::OperationDescription const &desc,
library::Provider provider,
library::Provider verification_provider = library::Provider::kInvalid);
/// Helper to set a performance result member
static void set_argument(
PerformanceResult &result,
char const *name,
ProblemSpace const &problem_space,
std::string const &value);
/// Helper to set a performance result member
static void set_argument(
PerformanceResult &result,
char const *name,
ProblemSpace const &problem_space,
int64_t value);
protected:
/// Sets operation description
static void initialize_result_(
PerformanceResult &result,
library::OperationDescription const &operation_desc,
ProblemSpace const &problem_space);
/// Method to profile an initialized CUTLASS operation
virtual Status profile_cutlass_(
double &runtime,
Options const &options,
library::Operation const *operation,
void *arguments,
void *host_workspace,
void *device_workspace);
private:
/// finds string matches filter_string in operation_name
bool find_string_matches_(
std::string const &filter_string,
std::string const &operation_name);
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Vector of owning operation profilers
using OperationProfilerVector = std::vector<std::unique_ptr<OperationProfiler>>;
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| tools/profiler/include/cutlass/profiler/operation_profiler.h/0 | {
"file_path": "tools/profiler/include/cutlass/profiler/operation_profiler.h",
"repo_id": "tools",
"token_count": 2076
} | 60 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Execution environment
*/
#include <cstring>
#include "cutlass/numeric_types.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/util/reference/device/tensor_compare.h"
#include "cutlass/util/reference/device/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/library/util.h"
#include "cutlass/profiler/device_allocation.h"
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
size_t DeviceAllocation::bytes(library::NumericTypeID type, size_t capacity) {
return size_t(cutlass::library::sizeof_bits(type)) * capacity / 8;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Layout>
static std::vector<int64_t> get_packed_layout_stride(std::vector<int> const &extent) {
typename Layout::TensorCoord extent_coord;
typename Layout::Stride stride_coord;
if (extent.size() != size_t(Layout::kRank)) {
throw std::runtime_error("Layout does not have same rank as extent vector.");
}
for (int i = 0; i < Layout::kRank; ++i) {
extent_coord[i] = extent.at(i);
}
std::vector<int64_t> stride;
stride.resize(Layout::kStrideRank, 0);
Layout layout = Layout::packed(extent_coord);
stride_coord = layout.stride();
for (int i = 0; i < Layout::kStrideRank; ++i) {
stride.at(i) = (int64_t)stride_coord[i];
}
return stride;
}
/// Returns the stride of a packed layout
std::vector<int64_t> DeviceAllocation::get_packed_layout(
library::LayoutTypeID layout_id,
std::vector<int> const &extent) {
std::vector<int64_t> stride;
switch (layout_id) {
case library::LayoutTypeID::kColumnMajor:
stride = get_packed_layout_stride<cutlass::layout::ColumnMajor>(extent);
break;
case library::LayoutTypeID::kRowMajor:
stride = get_packed_layout_stride<cutlass::layout::RowMajor>(extent);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK2:
stride = get_packed_layout_stride<cutlass::layout::ColumnMajorInterleaved<2>>(extent);
break;
case library::LayoutTypeID::kRowMajorInterleavedK2:
stride = get_packed_layout_stride<cutlass::layout::RowMajorInterleaved<2>>(extent);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK4:
stride = get_packed_layout_stride<cutlass::layout::ColumnMajorInterleaved<4>>(extent);
break;
case library::LayoutTypeID::kRowMajorInterleavedK4:
stride = get_packed_layout_stride<cutlass::layout::RowMajorInterleaved<4>>(extent);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK16:
stride = get_packed_layout_stride<cutlass::layout::ColumnMajorInterleaved<16>>(extent);
break;
case library::LayoutTypeID::kRowMajorInterleavedK16:
stride = get_packed_layout_stride<cutlass::layout::RowMajorInterleaved<16>>(extent);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK32:
stride = get_packed_layout_stride<cutlass::layout::ColumnMajorInterleaved<32>>(extent);
break;
case library::LayoutTypeID::kRowMajorInterleavedK32:
stride = get_packed_layout_stride<cutlass::layout::RowMajorInterleaved<32>>(extent);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK64:
stride = get_packed_layout_stride<cutlass::layout::ColumnMajorInterleaved<64>>(extent);
break;
case library::LayoutTypeID::kRowMajorInterleavedK64:
stride = get_packed_layout_stride<cutlass::layout::RowMajorInterleaved<64>>(extent);
break;
case library::LayoutTypeID::kTensorNCHW:
stride = get_packed_layout_stride<cutlass::layout::TensorNCHW>(extent);
break;
case library::LayoutTypeID::kTensorNHWC:
stride = get_packed_layout_stride<cutlass::layout::TensorNHWC>(extent);
break;
case library::LayoutTypeID::kTensorNDHWC:
stride = get_packed_layout_stride<cutlass::layout::TensorNDHWC>(extent);
break;
case library::LayoutTypeID::kTensorNC32HW32:
stride = get_packed_layout_stride<cutlass::layout::TensorNCxHWx<32>>(extent);
break;
case library::LayoutTypeID::kTensorNC64HW64:
stride = get_packed_layout_stride<cutlass::layout::TensorNCxHWx<64>>(extent);
break;
case library::LayoutTypeID::kTensorC32RSK32:
stride = get_packed_layout_stride<cutlass::layout::TensorCxRSKx<32>>(extent);
break;
case library::LayoutTypeID::kTensorC64RSK64:
stride = get_packed_layout_stride<cutlass::layout::TensorCxRSKx<64>>(extent);
break;
default: break;
}
return stride;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template to use CUTLASS Layout functions to
template <typename Layout>
static size_t construct_layout_(
void *bytes,
library::LayoutTypeID layout_id,
std::vector<int> const &extent,
std::vector<int64_t> &stride) {
if (extent.size() != Layout::kRank) {
throw std::runtime_error(
"Layout must have same rank as extent vector.");
}
if (Layout::kStrideRank && stride.empty()) {
stride = get_packed_layout_stride<Layout>(extent);
return construct_layout_<Layout>(
bytes,
layout_id,
extent,
stride);
}
else if (Layout::kStrideRank && stride.size() != Layout::kStrideRank) {
throw std::runtime_error(
"Layout requires either empty stride or stride vector matching Layout::kStrideRank");
}
typename Layout::Stride stride_coord;
for (int i = 0; i < Layout::kStrideRank; ++i) {
stride_coord[i] = (int)stride.at(i);
}
typename Layout::TensorCoord extent_coord;
for (int i = 0; i < Layout::kRank; ++i) {
extent_coord[i] = extent.at(i);
}
// Construct the CUTLASS layout object from the stride object
Layout layout(stride_coord);
// Pack it into bytes
if (bytes) {
*reinterpret_cast<Layout *>(bytes) = layout;
}
// Return capacity
size_t capacity_ = layout.capacity(extent_coord);
return capacity_;
}
/// returns the capacity needed
size_t DeviceAllocation::construct_layout(
void *bytes,
library::LayoutTypeID layout_id,
std::vector<int> const &extent,
std::vector<int64_t> &stride) {
switch (layout_id) {
case library::LayoutTypeID::kColumnMajor:
return construct_layout_<cutlass::layout::ColumnMajor>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kRowMajor:
return construct_layout_<cutlass::layout::RowMajor>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kColumnMajorInterleavedK2:
return construct_layout_<cutlass::layout::ColumnMajorInterleaved<2>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kRowMajorInterleavedK2:
return construct_layout_<cutlass::layout::RowMajorInterleaved<2>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kColumnMajorInterleavedK4:
return construct_layout_<cutlass::layout::ColumnMajorInterleaved<4>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kRowMajorInterleavedK4:
return construct_layout_<cutlass::layout::RowMajorInterleaved<4>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kColumnMajorInterleavedK16:
return construct_layout_<cutlass::layout::ColumnMajorInterleaved<16>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kRowMajorInterleavedK16:
return construct_layout_<cutlass::layout::RowMajorInterleaved<16>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kColumnMajorInterleavedK32:
return construct_layout_<cutlass::layout::ColumnMajorInterleaved<32>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kRowMajorInterleavedK32:
return construct_layout_<cutlass::layout::RowMajorInterleaved<32>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kColumnMajorInterleavedK64:
return construct_layout_<cutlass::layout::ColumnMajorInterleaved<64>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kRowMajorInterleavedK64:
return construct_layout_<cutlass::layout::RowMajorInterleaved<64>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorNCHW:
return construct_layout_<cutlass::layout::TensorNHWC>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorNHWC:
return construct_layout_<cutlass::layout::TensorNHWC>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorNDHWC:
return construct_layout_<cutlass::layout::TensorNDHWC>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorNC32HW32:
return construct_layout_<cutlass::layout::TensorNCxHWx<32>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorNC64HW64:
return construct_layout_<cutlass::layout::TensorNCxHWx<64>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorC32RSK32:
return construct_layout_<cutlass::layout::TensorCxRSKx<32>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorC64RSK64:
return construct_layout_<cutlass::layout::TensorCxRSKx<64>>(bytes, layout_id, extent, stride);
default: break;
}
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
DeviceAllocation::DeviceAllocation():
type_(library::NumericTypeID::kInvalid),
batch_stride_(0),
capacity_(0),
pointer_(nullptr),
layout_(library::LayoutTypeID::kUnknown),
batch_count_(1) {
}
DeviceAllocation::DeviceAllocation(
library::NumericTypeID type,
size_t capacity
):
type_(type), batch_stride_(capacity), capacity_(capacity), pointer_(nullptr),
layout_(library::LayoutTypeID::kUnknown), batch_count_(1) {
cudaError_t result = cudaMalloc((void **)&pointer_, bytes(type, capacity));
if (result != cudaSuccess) {
type_ = library::NumericTypeID::kInvalid;
capacity_ = 0;
pointer_ = nullptr;
throw std::bad_alloc();
}
}
DeviceAllocation::DeviceAllocation(
library::NumericTypeID type,
library::LayoutTypeID layout_id,
std::vector<int> const &extent,
std::vector<int64_t> const &stride,
int batch_count
):
type_(type), batch_stride_(size_t(0)), capacity_(size_t(0)), pointer_(nullptr), batch_count_(1) {
reset(type, layout_id, extent, stride, batch_count);
}
DeviceAllocation::~DeviceAllocation() {
if (pointer_) {
cudaFree(pointer_);
}
}
DeviceAllocation &DeviceAllocation::reset() {
if (pointer_) {
cudaFree(pointer_);
}
type_ = library::NumericTypeID::kInvalid;
batch_stride_ = 0;
capacity_ = 0;
pointer_ = nullptr;
layout_ = library::LayoutTypeID::kUnknown;
stride_.clear();
extent_.clear();
tensor_ref_buffer_.clear();
batch_count_ = 1;
return *this;
}
DeviceAllocation &DeviceAllocation::reset(library::NumericTypeID type, size_t capacity) {
reset();
type_ = type;
batch_stride_ = capacity;
capacity_ = capacity;
cudaError_t result = cudaMalloc((void **)&pointer_, bytes(type_, capacity_));
if (result != cudaSuccess) {
throw std::bad_alloc();
}
layout_ = library::LayoutTypeID::kUnknown;
stride_.clear();
extent_.clear();
batch_count_ = 1;
tensor_ref_buffer_.resize(sizeof(pointer_), 0);
std::memcpy(tensor_ref_buffer_.data(), &pointer_, sizeof(pointer_));
return *this;
}
/// Allocates memory for a given layout and tensor
DeviceAllocation &DeviceAllocation::reset(
library::NumericTypeID type,
library::LayoutTypeID layout_id,
std::vector<int> const &extent,
std::vector<int64_t> const &stride,
int batch_count) {
reset();
tensor_ref_buffer_.resize(sizeof(pointer_) + (sizeof(int64_t) * library::get_layout_stride_rank(layout_id)), 0);
type_ = type;
layout_ = layout_id;
stride_ = stride;
extent_ = extent;
batch_count_ = batch_count;
batch_stride_ = construct_layout(
tensor_ref_buffer_.data() + sizeof(pointer_),
layout_id,
extent,
stride_);
capacity_ = batch_stride_ * batch_count_;
cudaError_t result = cudaMalloc((void **)&pointer_, bytes(type, capacity_));
if (result != cudaSuccess) {
throw std::bad_alloc();
}
std::memcpy(tensor_ref_buffer_.data(), &pointer_, sizeof(pointer_));
return *this;
}
bool DeviceAllocation::good() const {
return (capacity_ && pointer_);
}
library::NumericTypeID DeviceAllocation::type() const {
return type_;
}
void *DeviceAllocation::data() const {
return pointer_;
}
void *DeviceAllocation::batch_data(int batch_idx) const {
return static_cast<char *>(data()) + batch_stride_bytes() * batch_idx;
}
library::LayoutTypeID DeviceAllocation::layout() const {
return layout_;
}
std::vector<int64_t> const & DeviceAllocation::stride() const {
return stride_;
}
/// Gets the extent vector
std::vector<int> const & DeviceAllocation::extent() const {
return extent_;
}
/// Gets the number of adjacent tensors in memory
int DeviceAllocation::batch_count() const {
return batch_count_;
}
/// Gets the stride (in units of elements) between items
int64_t DeviceAllocation::batch_stride() const {
return batch_stride_;
}
/// Gets the stride (in units of bytes) between items
int64_t DeviceAllocation::batch_stride_bytes() const {
return bytes(type_, batch_stride_);
}
size_t DeviceAllocation::capacity() const {
return capacity_;
}
size_t DeviceAllocation::bytes() const {
return bytes(type_, capacity_);
}
/// Copies from an equivalent-sized tensor in device memory
void DeviceAllocation::copy_from_device(void const *ptr) {
if (!bytes()) {
#ifndef NDEBUG
std::cout << "Skipping copy of size 0 allocation\n";
#endif
return;
}
cudaError_t result = cudaMemcpy(data(), ptr, bytes(), cudaMemcpyDeviceToDevice);
if (result != cudaSuccess) {
throw std::runtime_error("Failed device-to-device copy");
}
}
/// Copies from an equivalent-sized tensor in device memory
void DeviceAllocation::copy_from_host(void const *ptr) {
if (!bytes()) {
#ifndef NDEBUG
std::cout << "Skipping copy of size 0 allocation\n";
#endif
return;
}
cudaError_t result = cudaMemcpy(data(), ptr, bytes(), cudaMemcpyHostToDevice);
if (result != cudaSuccess) {
throw std::runtime_error("Failed host-to-device copy");
}
}
/// Copies from an equivalent-sized tensor in device memory
void DeviceAllocation::copy_to_host(void *ptr) {
if (!bytes()) {
#ifndef NDEBUG
std::cout << "Skipping copy of size 0 allocation\n";
#endif
return;
}
cudaError_t result = cudaMemcpy(ptr, data(), bytes(), cudaMemcpyDeviceToHost);
if (result != cudaSuccess) {
throw std::runtime_error("Failed device-to-host copy");
}
}
void DeviceAllocation::initialize_random_device(int seed, Distribution dist) {
if (!bytes()) {
#ifndef NDEBUG
std::cout << "Skipping initialization of size 0 allocation\n";
#endif
return;
}
if (!data()) {
throw std::runtime_error("Attempting to initialize invalid allocation.");
}
// Instantiate calls to CURAND here. This file takes a long time to compile for
// this reason.
switch (type_) {
case library::NumericTypeID::kF16:
cutlass::reference::device::BlockFillRandom<cutlass::half_t>(
reinterpret_cast<cutlass::half_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kBF16:
cutlass::reference::device::BlockFillRandom<cutlass::bfloat16_t>(
reinterpret_cast<cutlass::bfloat16_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kTF32:
cutlass::reference::device::BlockFillRandom<cutlass::tfloat32_t>(
reinterpret_cast<cutlass::tfloat32_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kF32:
cutlass::reference::device::BlockFillRandom<float>(
reinterpret_cast<float *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kCBF16:
cutlass::reference::device::BlockFillRandom<complex<bfloat16_t>>(
reinterpret_cast<complex<bfloat16_t> *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kCTF32:
cutlass::reference::device::BlockFillRandom<cutlass::complex<cutlass::tfloat32_t>>(
reinterpret_cast<cutlass::complex<cutlass::tfloat32_t> *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kCF32:
cutlass::reference::device::BlockFillRandom<cutlass::complex<float>>(
reinterpret_cast<cutlass::complex<float> *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kFE4M3:
cutlass::reference::device::BlockFillRandom<cutlass::float_e4m3_t>(
reinterpret_cast<cutlass::float_e4m3_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kFE5M2:
cutlass::reference::device::BlockFillRandom<cutlass::float_e5m2_t>(
reinterpret_cast<cutlass::float_e5m2_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kF64:
cutlass::reference::device::BlockFillRandom<double>(
reinterpret_cast<double *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kCF64:
cutlass::reference::device::BlockFillRandom<complex<double>>(
reinterpret_cast<complex<double> *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS2:
cutlass::reference::device::BlockFillRandom<int2b_t>(
reinterpret_cast<int2b_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS4:
cutlass::reference::device::BlockFillRandom<int4b_t>(
reinterpret_cast<int4b_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS8:
cutlass::reference::device::BlockFillRandom<int8_t>(
reinterpret_cast<int8_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS16:
cutlass::reference::device::BlockFillRandom<int16_t>(
reinterpret_cast<int16_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS32:
cutlass::reference::device::BlockFillRandom<int32_t>(
reinterpret_cast<int32_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS64:
cutlass::reference::device::BlockFillRandom<int64_t>(
reinterpret_cast<int64_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kB1:
cutlass::reference::device::BlockFillRandom<uint1b_t>(
reinterpret_cast<uint1b_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU2:
cutlass::reference::device::BlockFillRandom<uint2b_t>(
reinterpret_cast<uint2b_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU4:
cutlass::reference::device::BlockFillRandom<uint4b_t>(
reinterpret_cast<uint4b_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU8:
cutlass::reference::device::BlockFillRandom<uint8_t>(
reinterpret_cast<uint8_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU16:
cutlass::reference::device::BlockFillRandom<uint16_t>(
reinterpret_cast<uint16_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU32:
cutlass::reference::device::BlockFillRandom<uint32_t>(
reinterpret_cast<uint32_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU64:
cutlass::reference::device::BlockFillRandom<uint64_t>(
reinterpret_cast<uint64_t *>(pointer_),
capacity_,
seed,
dist
);
break;
default: break;
}
}
void DeviceAllocation::initialize_random_host(int seed, Distribution dist) {
if (!bytes()) {
#ifndef NDEBUG
std::cout << "Skipping initialization of size 0 allocation\n";
#endif
return;
}
if (!data()) {
throw std::runtime_error("Attempting to initialize invalid allocation.");
}
std::vector<uint8_t> host_data(bytes());
switch (type_) {
case library::NumericTypeID::kFE4M3:
cutlass::reference::host::BlockFillRandom<cutlass::float_e4m3_t>(
reinterpret_cast<cutlass::float_e4m3_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kFE5M2:
cutlass::reference::host::BlockFillRandom<cutlass::float_e5m2_t>(
reinterpret_cast<cutlass::float_e5m2_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kF16:
cutlass::reference::host::BlockFillRandom<cutlass::half_t>(
reinterpret_cast<cutlass::half_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kBF16:
cutlass::reference::host::BlockFillRandom<cutlass::bfloat16_t>(
reinterpret_cast<cutlass::bfloat16_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kTF32:
cutlass::reference::host::BlockFillRandom<cutlass::tfloat32_t>(
reinterpret_cast<cutlass::tfloat32_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kF32:
cutlass::reference::host::BlockFillRandom<float>(
reinterpret_cast<float *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kCF16:
cutlass::reference::host::BlockFillRandom<cutlass::complex<cutlass::half_t>>(
reinterpret_cast<cutlass::complex<cutlass::half_t> *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kCBF16:
cutlass::reference::host::BlockFillRandom<cutlass::complex<cutlass::bfloat16_t>>(
reinterpret_cast<cutlass::complex<cutlass::bfloat16_t> *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kCTF32:
cutlass::reference::host::BlockFillRandom<cutlass::complex<cutlass::tfloat32_t>>(
reinterpret_cast<cutlass::complex<cutlass::tfloat32_t> *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kCF32:
cutlass::reference::host::BlockFillRandom<cutlass::complex<float>>(
reinterpret_cast<cutlass::complex<float> *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kF64:
cutlass::reference::host::BlockFillRandom<double>(
reinterpret_cast<double *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kCF64:
cutlass::reference::host::BlockFillRandom<cutlass::complex<double>>(
reinterpret_cast<cutlass::complex<double> *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS2:
cutlass::reference::host::BlockFillRandom<int2b_t>(
reinterpret_cast<int2b_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS4:
cutlass::reference::host::BlockFillRandom<int4b_t>(
reinterpret_cast<int4b_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS8:
cutlass::reference::host::BlockFillRandom<int8_t>(
reinterpret_cast<int8_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS16:
cutlass::reference::host::BlockFillRandom<int16_t>(
reinterpret_cast<int16_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS32:
cutlass::reference::host::BlockFillRandom<int32_t>(
reinterpret_cast<int32_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS64:
cutlass::reference::host::BlockFillRandom<int64_t>(
reinterpret_cast<int64_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kB1:
cutlass::reference::host::BlockFillRandom<uint1b_t>(
reinterpret_cast<uint1b_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU2:
cutlass::reference::host::BlockFillRandom<uint2b_t>(
reinterpret_cast<uint2b_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU4:
cutlass::reference::host::BlockFillRandom<uint4b_t>(
reinterpret_cast<uint4b_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU8:
cutlass::reference::host::BlockFillRandom<uint8_t>(
reinterpret_cast<uint8_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU16:
cutlass::reference::host::BlockFillRandom<uint16_t>(
reinterpret_cast<uint16_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU32:
cutlass::reference::host::BlockFillRandom<uint32_t>(
reinterpret_cast<uint32_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU64:
cutlass::reference::host::BlockFillRandom<uint64_t>(
reinterpret_cast<uint64_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
default: break;
}
copy_from_host(host_data.data());
}
void DeviceAllocation::initialize_sequential_device(Distribution dist) {
if (!bytes()) {
#ifndef NDEBUG
std::cout << "Skipping initialization of size 0 allocation\n";
#endif
return;
}
if (!data()) {
throw std::runtime_error("Attempting to initialize invalid allocation.");
}
switch (type_) {
case library::NumericTypeID::kFE4M3:
cutlass::reference::device::BlockFillSequential<cutlass::float_e4m3_t>(
reinterpret_cast<cutlass::float_e4m3_t *>(pointer_),
capacity_,
static_cast<cutlass::float_e4m3_t>(dist.sequential.delta),
static_cast<cutlass::float_e4m3_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kFE5M2:
cutlass::reference::device::BlockFillSequential<cutlass::float_e5m2_t>(
reinterpret_cast<cutlass::float_e5m2_t *>(pointer_),
capacity_,
static_cast<cutlass::float_e5m2_t>(dist.sequential.delta),
static_cast<cutlass::float_e5m2_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kF16:
cutlass::reference::device::BlockFillSequential<cutlass::half_t>(
reinterpret_cast<cutlass::half_t *>(pointer_),
capacity_,
static_cast<cutlass::half_t>(dist.sequential.delta),
static_cast<cutlass::half_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kBF16:
cutlass::reference::device::BlockFillSequential<cutlass::bfloat16_t>(
reinterpret_cast<cutlass::bfloat16_t *>(pointer_),
capacity_,
static_cast<cutlass::bfloat16_t>(dist.sequential.delta),
static_cast<cutlass::bfloat16_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kTF32:
cutlass::reference::device::BlockFillSequential<cutlass::tfloat32_t>(
reinterpret_cast<cutlass::tfloat32_t *>(pointer_),
capacity_,
static_cast<cutlass::tfloat32_t>(dist.sequential.delta),
static_cast<cutlass::tfloat32_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kF32:
cutlass::reference::device::BlockFillSequential<float>(
reinterpret_cast<float *>(pointer_),
capacity_,
static_cast<float>(dist.sequential.delta),
static_cast<float>(dist.sequential.start)
);
break;
case library::NumericTypeID::kCF16:
cutlass::reference::device::BlockFillSequential<cutlass::complex<cutlass::half_t>>(
reinterpret_cast<cutlass::complex<cutlass::half_t> *>(pointer_),
capacity_,
cutlass::complex<cutlass::half_t>(
static_cast<cutlass::half_t>(dist.sequential.delta)),
cutlass::complex<cutlass::half_t>(
static_cast<cutlass::half_t>(dist.sequential.start))
);
break;
case library::NumericTypeID::kCBF16:
cutlass::reference::device::BlockFillSequential<cutlass::complex<cutlass::bfloat16_t>>(
reinterpret_cast<cutlass::complex<cutlass::bfloat16_t> *>(pointer_),
capacity_,
cutlass::complex<cutlass::bfloat16_t>(
static_cast<cutlass::bfloat16_t>(dist.sequential.delta)),
cutlass::complex<cutlass::bfloat16_t>(
static_cast<cutlass::bfloat16_t>(dist.sequential.start))
);
break;
case library::NumericTypeID::kCTF32:
cutlass::reference::device::BlockFillSequential<cutlass::complex<cutlass::tfloat32_t>>(
reinterpret_cast<cutlass::complex<cutlass::tfloat32_t> *>(pointer_),
capacity_,
cutlass::complex<cutlass::tfloat32_t>(
static_cast<cutlass::tfloat32_t>(dist.sequential.delta)),
cutlass::complex<cutlass::tfloat32_t>(
static_cast<cutlass::tfloat32_t>(dist.sequential.start))
);
break;
case library::NumericTypeID::kCF32:
cutlass::reference::device::BlockFillSequential<cutlass::complex<float>>(
reinterpret_cast<cutlass::complex<float> *>(pointer_),
capacity_,
cutlass::complex<float>(
static_cast<float>(dist.sequential.delta)),
cutlass::complex<float>(
static_cast<float>(dist.sequential.start))
);
break;
case library::NumericTypeID::kF64:
cutlass::reference::device::BlockFillSequential<double>(
reinterpret_cast<double *>(pointer_),
capacity_,
static_cast<double>(dist.sequential.delta),
static_cast<double>(dist.sequential.start)
);
break;
case library::NumericTypeID::kCF64:
cutlass::reference::device::BlockFillSequential<cutlass::complex<double>>(
reinterpret_cast<cutlass::complex<double> *>(pointer_),
capacity_,
cutlass::complex<double>(
static_cast<double>(dist.sequential.delta)),
cutlass::complex<double>(
static_cast<double>(dist.sequential.start))
);
break;
case library::NumericTypeID::kS2:
cutlass::reference::device::BlockFillSequential<int2b_t>(
reinterpret_cast<int2b_t *>(pointer_),
capacity_,
static_cast<int2b_t>(dist.sequential.delta),
static_cast<int2b_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kS4:
cutlass::reference::device::BlockFillSequential<int4b_t>(
reinterpret_cast<int4b_t *>(pointer_),
capacity_,
static_cast<int4b_t>(dist.sequential.delta),
static_cast<int4b_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kS8:
cutlass::reference::device::BlockFillSequential<int8_t>(
reinterpret_cast<int8_t *>(pointer_),
capacity_,
static_cast<int8_t>(dist.sequential.delta),
static_cast<int8_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kS16:
cutlass::reference::device::BlockFillSequential<int16_t>(
reinterpret_cast<int16_t *>(pointer_),
capacity_,
static_cast<int16_t>(dist.sequential.delta),
static_cast<int16_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kS32:
cutlass::reference::device::BlockFillSequential<int32_t>(
reinterpret_cast<int32_t *>(pointer_),
capacity_,
static_cast<int32_t>(dist.sequential.delta),
static_cast<int32_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kS64:
cutlass::reference::device::BlockFillSequential<int64_t>(
reinterpret_cast<int64_t *>(pointer_),
capacity_,
static_cast<int64_t>(dist.sequential.delta),
static_cast<int64_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kB1:
cutlass::reference::device::BlockFillSequential<uint1b_t>(
reinterpret_cast<uint1b_t *>(pointer_),
capacity_,
static_cast<uint1b_t>(dist.sequential.delta),
static_cast<uint1b_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kU2:
cutlass::reference::device::BlockFillSequential<uint2b_t>(
reinterpret_cast<uint2b_t *>(pointer_),
capacity_,
static_cast<uint2b_t>(dist.sequential.delta),
static_cast<uint2b_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kU4:
cutlass::reference::device::BlockFillSequential<uint4b_t>(
reinterpret_cast<uint4b_t *>(pointer_),
capacity_,
static_cast<uint4b_t>(dist.sequential.delta),
static_cast<uint4b_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kU8:
cutlass::reference::device::BlockFillSequential<uint8_t>(
reinterpret_cast<uint8_t *>(pointer_),
capacity_,
static_cast<uint8_t>(dist.sequential.delta),
static_cast<uint8_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kU16:
cutlass::reference::device::BlockFillSequential<uint16_t>(
reinterpret_cast<uint16_t *>(pointer_),
capacity_,
static_cast<uint16_t>(dist.sequential.delta),
static_cast<uint16_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kU32:
cutlass::reference::device::BlockFillSequential<uint32_t>(
reinterpret_cast<uint32_t *>(pointer_),
capacity_,
static_cast<uint32_t>(dist.sequential.delta),
static_cast<uint32_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kU64:
cutlass::reference::device::BlockFillSequential<uint64_t>(
reinterpret_cast<uint64_t *>(pointer_),
capacity_,
static_cast<uint64_t>(dist.sequential.delta),
static_cast<uint64_t>(dist.sequential.start)
);
break;
default: break;
}
}
void DeviceAllocation::initialize_sequential_host(Distribution dist) {
if (!bytes()) {
#ifndef NDEBUG
std::cout << "Skipping initialization of size 0 allocation\n";
#endif
return;
}
if (!data()) {
throw std::runtime_error("Attempting to initialize invalid allocation.");
}
std::vector<uint8_t> host_data(bytes());
switch (type_) {
case library::NumericTypeID::kFE4M3:
cutlass::reference::host::BlockFillSequential<cutlass::float_e4m3_t>(
reinterpret_cast<cutlass::float_e4m3_t *>(host_data.data()),
capacity_,
static_cast<cutlass::float_e4m3_t>(dist.sequential.delta),
static_cast<cutlass::float_e4m3_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kFE5M2:
cutlass::reference::host::BlockFillSequential<cutlass::float_e5m2_t>(
reinterpret_cast<cutlass::float_e5m2_t *>(host_data.data()),
capacity_,
static_cast<cutlass::float_e5m2_t>(dist.sequential.delta),
static_cast<cutlass::float_e5m2_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kF16:
cutlass::reference::host::BlockFillSequential<cutlass::half_t>(
reinterpret_cast<cutlass::half_t *>(host_data.data()),
capacity_,
static_cast<cutlass::half_t>(dist.sequential.delta),
static_cast<cutlass::half_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kBF16:
cutlass::reference::host::BlockFillSequential<cutlass::bfloat16_t>(
reinterpret_cast<cutlass::bfloat16_t *>(host_data.data()),
capacity_,
static_cast<cutlass::bfloat16_t>(dist.sequential.delta),
static_cast<cutlass::bfloat16_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kTF32:
cutlass::reference::host::BlockFillSequential<cutlass::tfloat32_t>(
reinterpret_cast<cutlass::tfloat32_t *>(host_data.data()),
capacity_,
static_cast<cutlass::tfloat32_t>(dist.sequential.delta),
static_cast<cutlass::tfloat32_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kF32:
cutlass::reference::host::BlockFillSequential<float>(
reinterpret_cast<float *>(host_data.data()),
capacity_,
static_cast<float>(dist.sequential.delta),
static_cast<float>(dist.sequential.start)
);
break;
case library::NumericTypeID::kCF16:
cutlass::reference::host::BlockFillSequential<cutlass::complex<cutlass::half_t>>(
reinterpret_cast<cutlass::complex<cutlass::half_t> *>(host_data.data()),
capacity_,
cutlass::complex<cutlass::half_t>(
static_cast<cutlass::half_t>(dist.sequential.delta)),
cutlass::complex<cutlass::half_t>(
static_cast<cutlass::half_t>(dist.sequential.start))
);
break;
case library::NumericTypeID::kCBF16:
cutlass::reference::host::BlockFillSequential<cutlass::complex<cutlass::bfloat16_t>>(
reinterpret_cast<cutlass::complex<cutlass::bfloat16_t> *>(host_data.data()),
capacity_,
cutlass::complex<cutlass::bfloat16_t>(
static_cast<cutlass::bfloat16_t>(dist.sequential.delta)),
cutlass::complex<cutlass::bfloat16_t>(
static_cast<cutlass::bfloat16_t>(dist.sequential.start))
);
break;
case library::NumericTypeID::kCTF32:
cutlass::reference::host::BlockFillSequential<cutlass::complex<cutlass::tfloat32_t>>(
reinterpret_cast<cutlass::complex<cutlass::tfloat32_t> *>(host_data.data()),
capacity_,
cutlass::complex<cutlass::tfloat32_t>(
static_cast<cutlass::tfloat32_t>(dist.sequential.delta)),
cutlass::complex<cutlass::tfloat32_t>(
static_cast<cutlass::tfloat32_t>(dist.sequential.start))
);
break;
case library::NumericTypeID::kCF32:
cutlass::reference::host::BlockFillSequential<cutlass::complex<float>>(
reinterpret_cast<cutlass::complex<float> *>(host_data.data()),
capacity_,
cutlass::complex<float>(
static_cast<float>(dist.sequential.delta)),
cutlass::complex<float>(
static_cast<float>(dist.sequential.start))
);
break;
case library::NumericTypeID::kF64:
cutlass::reference::host::BlockFillSequential<double>(
reinterpret_cast<double *>(host_data.data()),
capacity_,
static_cast<double>(dist.sequential.delta),
static_cast<double>(dist.sequential.start)
);
break;
case library::NumericTypeID::kCF64:
cutlass::reference::host::BlockFillSequential<cutlass::complex<double>>(
reinterpret_cast<cutlass::complex<double> *>(host_data.data()),
capacity_,
cutlass::complex<double>(
static_cast<double>(dist.sequential.delta)),
cutlass::complex<double>(
static_cast<double>(dist.sequential.start))
);
break;
case library::NumericTypeID::kS2:
cutlass::reference::host::BlockFillSequential<int2b_t>(
reinterpret_cast<int2b_t *>(host_data.data()),
capacity_,
static_cast<int2b_t>(dist.sequential.delta),
static_cast<int2b_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kS4:
cutlass::reference::host::BlockFillSequential<int4b_t>(
reinterpret_cast<int4b_t *>(host_data.data()),
capacity_,
static_cast<int4b_t>(dist.sequential.delta),
static_cast<int4b_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kS8:
cutlass::reference::host::BlockFillSequential<int8_t>(
reinterpret_cast<int8_t *>(host_data.data()),
capacity_,
static_cast<int8_t>(dist.sequential.delta),
static_cast<int8_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kS16:
cutlass::reference::host::BlockFillSequential<int16_t>(
reinterpret_cast<int16_t *>(host_data.data()),
capacity_,
static_cast<int16_t>(dist.sequential.delta),
static_cast<int16_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kS32:
cutlass::reference::host::BlockFillSequential<int32_t>(
reinterpret_cast<int32_t *>(host_data.data()),
capacity_,
static_cast<int32_t>(dist.sequential.delta),
static_cast<int32_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kS64:
cutlass::reference::host::BlockFillSequential<int64_t>(
reinterpret_cast<int64_t *>(host_data.data()),
capacity_,
static_cast<int64_t>(dist.sequential.delta),
static_cast<int64_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kB1:
cutlass::reference::host::BlockFillSequential<uint1b_t>(
reinterpret_cast<uint1b_t *>(host_data.data()),
capacity_,
static_cast<uint1b_t>(dist.sequential.delta),
static_cast<uint1b_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kU2:
cutlass::reference::host::BlockFillSequential<uint2b_t>(
reinterpret_cast<uint2b_t *>(host_data.data()),
capacity_,
static_cast<uint2b_t>(dist.sequential.delta),
static_cast<uint2b_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kU4:
cutlass::reference::host::BlockFillSequential<uint4b_t>(
reinterpret_cast<uint4b_t *>(host_data.data()),
capacity_,
static_cast<uint4b_t>(dist.sequential.delta),
static_cast<uint4b_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kU8:
cutlass::reference::host::BlockFillSequential<uint8_t>(
reinterpret_cast<uint8_t *>(host_data.data()),
capacity_,
static_cast<uint8_t>(dist.sequential.delta),
static_cast<uint8_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kU16:
cutlass::reference::host::BlockFillSequential<uint16_t>(
reinterpret_cast<uint16_t *>(host_data.data()),
capacity_,
static_cast<uint16_t>(dist.sequential.delta),
static_cast<uint16_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kU32:
cutlass::reference::host::BlockFillSequential<uint32_t>(
reinterpret_cast<uint32_t *>(host_data.data()),
capacity_,
static_cast<uint32_t>(dist.sequential.delta),
static_cast<uint32_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kU64:
cutlass::reference::host::BlockFillSequential<uint64_t>(
reinterpret_cast<uint64_t *>(host_data.data()),
capacity_,
static_cast<uint64_t>(dist.sequential.delta),
static_cast<uint64_t>(dist.sequential.start)
);
break;
default: break;
}
copy_from_host(host_data.data());
}
void DeviceAllocation::initialize_random_sparsemeta_device(int seed, int MetaSizeInBits) {
if (!bytes()) {
#ifndef NDEBUG
std::cout << "Skipping initialization of size 0 allocation\n";
#endif
return;
}
if (!data()) {
throw std::runtime_error("Attempting to initialize invalid allocation.");
}
// Instantiate calls to CURAND here. This file takes a long time to compile for
// this reason.
switch (type_) {
case library::NumericTypeID::kU16:
cutlass::reference::device::BlockFillRandomSparseMeta<uint16_t>(
reinterpret_cast<uint16_t *>(pointer_),
capacity_,
seed,
MetaSizeInBits
);
break;
case library::NumericTypeID::kU32:
cutlass::reference::device::BlockFillRandomSparseMeta<uint32_t>(
reinterpret_cast<uint32_t *>(pointer_),
capacity_,
seed,
MetaSizeInBits
);
break;
default:
break;
}
}
void DeviceAllocation::initialize_random_sparsemeta_host(int seed, int MetaSizeInBits) {
if (!bytes()) {
#ifndef NDEBUG
std::cout << "Skipping initialization of size 0 allocation\n";
#endif
return;
}
if (!data()) {
throw std::runtime_error("Attempting to initialize invalid allocation.");
}
std::vector<uint8_t> host_data(bytes());
switch (type_) {
case library::NumericTypeID::kS16:
cutlass::reference::host::BlockFillRandomSparseMeta<uint16_t>(
reinterpret_cast<uint16_t *>(host_data.data()),
capacity_,
seed,
MetaSizeInBits
);
break;
case library::NumericTypeID::kS32:
cutlass::reference::host::BlockFillRandomSparseMeta<uint32_t>(
reinterpret_cast<uint32_t *>(host_data.data()),
capacity_,
seed,
MetaSizeInBits
);
break;
default:
break;
}
copy_from_host(host_data.data());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns true if two blocks have exactly the same value
bool DeviceAllocation::block_compare_equal(
library::NumericTypeID numeric_type,
void const *ptr_A,
void const *ptr_B,
size_t capacity) {
switch (numeric_type) {
case library::NumericTypeID::kFE4M3:
return reference::device::BlockCompareEqual<float_e4m3_t>(
reinterpret_cast<float_e4m3_t const *>(ptr_A),
reinterpret_cast<float_e4m3_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kFE5M2:
return reference::device::BlockCompareEqual<float_e5m2_t>(
reinterpret_cast<float_e5m2_t const *>(ptr_A),
reinterpret_cast<float_e5m2_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kF16:
return reference::device::BlockCompareEqual<half_t>(
reinterpret_cast<half_t const *>(ptr_A),
reinterpret_cast<half_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kBF16:
return reference::device::BlockCompareEqual<bfloat16_t>(
reinterpret_cast<bfloat16_t const *>(ptr_A),
reinterpret_cast<bfloat16_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kTF32:
return reference::device::BlockCompareEqual<tfloat32_t>(
reinterpret_cast<tfloat32_t const *>(ptr_A),
reinterpret_cast<tfloat32_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kF32:
return reference::device::BlockCompareEqual<float>(
reinterpret_cast<float const *>(ptr_A),
reinterpret_cast<float const *>(ptr_B),
capacity);
case library::NumericTypeID::kCF32:
return reference::device::BlockCompareEqual<cutlass::complex<float> >(
reinterpret_cast<complex<float> const *>(ptr_A),
reinterpret_cast<complex<float> const *>(ptr_B),
capacity);
case library::NumericTypeID::kCF16:
return reference::device::BlockCompareEqual<complex<half_t>>(
reinterpret_cast<complex<half_t> const *>(ptr_A),
reinterpret_cast<complex<half_t> const *>(ptr_B),
capacity);
case library::NumericTypeID::kCBF16:
return reference::device::BlockCompareEqual<complex<bfloat16_t>>(
reinterpret_cast<complex<bfloat16_t> const *>(ptr_A),
reinterpret_cast<complex<bfloat16_t> const *>(ptr_B),
capacity);
case library::NumericTypeID::kCTF32:
return reference::device::BlockCompareEqual<complex<tfloat32_t>>(
reinterpret_cast<complex<tfloat32_t> const *>(ptr_A),
reinterpret_cast<complex<tfloat32_t> const *>(ptr_B),
capacity);
case library::NumericTypeID::kF64:
return reference::device::BlockCompareEqual<double>(
reinterpret_cast<double const *>(ptr_A),
reinterpret_cast<double const *>(ptr_B),
capacity);
case library::NumericTypeID::kCF64:
return reference::device::BlockCompareEqual<complex<double>>(
reinterpret_cast<complex<double> const *>(ptr_A),
reinterpret_cast<complex<double> const *>(ptr_B),
capacity);
case library::NumericTypeID::kS2:
return reference::device::BlockCompareEqual<int2b_t>(
reinterpret_cast<int2b_t const *>(ptr_A),
reinterpret_cast<int2b_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kS4:
return reference::device::BlockCompareEqual<int4b_t>(
reinterpret_cast<int4b_t const *>(ptr_A),
reinterpret_cast<int4b_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kS8:
return reference::device::BlockCompareEqual<int8_t>(
reinterpret_cast<int8_t const *>(ptr_A),
reinterpret_cast<int8_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kS16:
return reference::device::BlockCompareEqual<int16_t>(
reinterpret_cast<int16_t const *>(ptr_A),
reinterpret_cast<int16_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kS32:
return reference::device::BlockCompareEqual<int32_t>(
reinterpret_cast<int32_t const *>(ptr_A),
reinterpret_cast<int32_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kS64:
return reference::device::BlockCompareEqual<int64_t>(
reinterpret_cast<int64_t const *>(ptr_A),
reinterpret_cast<int64_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kB1:
return reference::device::BlockCompareEqual<uint1b_t>(
reinterpret_cast<uint1b_t const *>(ptr_A),
reinterpret_cast<uint1b_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kU2:
return reference::device::BlockCompareEqual<uint2b_t>(
reinterpret_cast<uint2b_t const *>(ptr_A),
reinterpret_cast<uint2b_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kU4:
return reference::device::BlockCompareEqual<uint4b_t>(
reinterpret_cast<uint4b_t const *>(ptr_A),
reinterpret_cast<uint4b_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kU8:
return reference::device::BlockCompareEqual<uint8_t>(
reinterpret_cast<uint8_t const *>(ptr_A),
reinterpret_cast<uint8_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kU16:
return reference::device::BlockCompareEqual<uint16_t>(
reinterpret_cast<uint16_t const *>(ptr_A),
reinterpret_cast<uint16_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kU32:
return reference::device::BlockCompareEqual<uint32_t>(
reinterpret_cast<uint32_t const *>(ptr_A),
reinterpret_cast<uint32_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kU64:
return reference::device::BlockCompareEqual<uint64_t>(
reinterpret_cast<uint64_t const *>(ptr_A),
reinterpret_cast<uint64_t const *>(ptr_B),
capacity);
default:
throw std::runtime_error(std::string("Unsupported numeric type: ") + to_string(numeric_type));
}
}
/// Returns true if two blocks have approximately the same value
bool DeviceAllocation::block_compare_relatively_equal(
library::NumericTypeID numeric_type,
void const *ptr_A,
void const *ptr_B,
size_t capacity,
double epsilon,
double nonzero_floor) {
switch (numeric_type) {
case library::NumericTypeID::kFE4M3:
return reference::device::BlockCompareRelativelyEqual<float_e4m3_t>(
reinterpret_cast<float_e4m3_t const *>(ptr_A),
reinterpret_cast<float_e4m3_t const *>(ptr_B),
capacity,
static_cast<float_e4m3_t>(epsilon),
static_cast<float_e4m3_t>(nonzero_floor));
case library::NumericTypeID::kFE5M2:
return reference::device::BlockCompareRelativelyEqual<float_e5m2_t>(
reinterpret_cast<float_e5m2_t const *>(ptr_A),
reinterpret_cast<float_e5m2_t const *>(ptr_B),
capacity,
static_cast<float_e5m2_t>(epsilon),
static_cast<float_e5m2_t>(nonzero_floor));
case library::NumericTypeID::kF16:
return reference::device::BlockCompareRelativelyEqual<half_t>(
reinterpret_cast<half_t const *>(ptr_A),
reinterpret_cast<half_t const *>(ptr_B),
capacity,
static_cast<half_t>(epsilon),
static_cast<half_t>(nonzero_floor));
case library::NumericTypeID::kBF16:
return reference::device::BlockCompareRelativelyEqual<bfloat16_t>(
reinterpret_cast<bfloat16_t const *>(ptr_A),
reinterpret_cast<bfloat16_t const *>(ptr_B),
capacity,
static_cast<bfloat16_t>(epsilon),
static_cast<bfloat16_t>(nonzero_floor));
case library::NumericTypeID::kTF32:
return reference::device::BlockCompareRelativelyEqual<tfloat32_t>(
reinterpret_cast<tfloat32_t const *>(ptr_A),
reinterpret_cast<tfloat32_t const *>(ptr_B),
capacity,
static_cast<tfloat32_t>(epsilon),
static_cast<tfloat32_t>(nonzero_floor));
case library::NumericTypeID::kF32:
return reference::device::BlockCompareRelativelyEqual<float>(
reinterpret_cast<float const *>(ptr_A),
reinterpret_cast<float const *>(ptr_B),
capacity,
static_cast<float>(epsilon),
static_cast<float>(nonzero_floor));
case library::NumericTypeID::kF64:
return reference::device::BlockCompareRelativelyEqual<double>(
reinterpret_cast<double const *>(ptr_A),
reinterpret_cast<double const *>(ptr_B),
capacity,
static_cast<double>(epsilon),
static_cast<double>(nonzero_floor));
case library::NumericTypeID::kS2:
return reference::device::BlockCompareRelativelyEqual<int2b_t>(
reinterpret_cast<int2b_t const *>(ptr_A),
reinterpret_cast<int2b_t const *>(ptr_B),
capacity,
static_cast<int2b_t>(epsilon),
static_cast<int2b_t>(nonzero_floor));
case library::NumericTypeID::kS4:
return reference::device::BlockCompareRelativelyEqual<int4b_t>(
reinterpret_cast<int4b_t const *>(ptr_A),
reinterpret_cast<int4b_t const *>(ptr_B),
capacity,
static_cast<int4b_t>(epsilon),
static_cast<int4b_t>(nonzero_floor));
case library::NumericTypeID::kS8:
return reference::device::BlockCompareRelativelyEqual<int8_t>(
reinterpret_cast<int8_t const *>(ptr_A),
reinterpret_cast<int8_t const *>(ptr_B),
capacity,
static_cast<int8_t>(epsilon),
static_cast<int8_t>(nonzero_floor));
case library::NumericTypeID::kS16:
return reference::device::BlockCompareRelativelyEqual<int16_t>(
reinterpret_cast<int16_t const *>(ptr_A),
reinterpret_cast<int16_t const *>(ptr_B),
capacity,
static_cast<int16_t>(epsilon),
static_cast<int16_t>(nonzero_floor));
case library::NumericTypeID::kS32:
return reference::device::BlockCompareRelativelyEqual<int32_t>(
reinterpret_cast<int32_t const *>(ptr_A),
reinterpret_cast<int32_t const *>(ptr_B),
capacity,
static_cast<int32_t>(epsilon),
static_cast<int32_t>(nonzero_floor));
case library::NumericTypeID::kS64:
return reference::device::BlockCompareRelativelyEqual<int64_t>(
reinterpret_cast<int64_t const *>(ptr_A),
reinterpret_cast<int64_t const *>(ptr_B),
capacity,
static_cast<int64_t>(epsilon),
static_cast<int64_t>(nonzero_floor));
case library::NumericTypeID::kB1:
return reference::device::BlockCompareRelativelyEqual<uint1b_t>(
reinterpret_cast<uint1b_t const *>(ptr_A),
reinterpret_cast<uint1b_t const *>(ptr_B),
capacity,
static_cast<uint1b_t>(epsilon),
static_cast<uint1b_t>(nonzero_floor));
case library::NumericTypeID::kU2:
return reference::device::BlockCompareRelativelyEqual<uint2b_t>(
reinterpret_cast<uint2b_t const *>(ptr_A),
reinterpret_cast<uint2b_t const *>(ptr_B),
capacity,
static_cast<uint2b_t>(epsilon),
static_cast<uint2b_t>(nonzero_floor));
case library::NumericTypeID::kU4:
return reference::device::BlockCompareRelativelyEqual<uint4b_t>(
reinterpret_cast<uint4b_t const *>(ptr_A),
reinterpret_cast<uint4b_t const *>(ptr_B),
capacity,
static_cast<uint4b_t>(epsilon),
static_cast<uint4b_t>(nonzero_floor));
case library::NumericTypeID::kU8:
return reference::device::BlockCompareRelativelyEqual<uint8_t>(
reinterpret_cast<uint8_t const *>(ptr_A),
reinterpret_cast<uint8_t const *>(ptr_B),
capacity,
static_cast<uint8_t>(epsilon),
static_cast<uint8_t>(nonzero_floor));
case library::NumericTypeID::kU16:
return reference::device::BlockCompareRelativelyEqual<uint16_t>(
reinterpret_cast<uint16_t const *>(ptr_A),
reinterpret_cast<uint16_t const *>(ptr_B),
capacity,
static_cast<uint16_t>(epsilon),
static_cast<uint16_t>(nonzero_floor));
case library::NumericTypeID::kU32:
return reference::device::BlockCompareRelativelyEqual<uint32_t>(
reinterpret_cast<uint32_t const *>(ptr_A),
reinterpret_cast<uint32_t const *>(ptr_B),
capacity,
static_cast<uint32_t>(epsilon),
static_cast<uint32_t>(nonzero_floor));
case library::NumericTypeID::kU64:
return reference::device::BlockCompareRelativelyEqual<uint64_t>(
reinterpret_cast<uint64_t const *>(ptr_A),
reinterpret_cast<uint64_t const *>(ptr_B),
capacity,
static_cast<uint64_t>(epsilon),
static_cast<uint64_t>(nonzero_floor));
// No relatively equal comparison for complex numbers.
//
// As a simplification, we can require bitwise equality. This avoids false positives.
// (i.e. "pass" really means passing. "Fail" may not actually mean failure given appropriate epsilon.)
//
case library::NumericTypeID::kCF16:
return reference::device::BlockCompareEqual<cutlass::complex<half_t> >(
reinterpret_cast<complex<half_t> const *>(ptr_A),
reinterpret_cast<complex<half_t> const *>(ptr_B),
capacity);
case library::NumericTypeID::kCF32:
return reference::device::BlockCompareEqual<cutlass::complex<float> >(
reinterpret_cast<complex<float> const *>(ptr_A),
reinterpret_cast<complex<float> const *>(ptr_B),
capacity);
case library::NumericTypeID::kCF64:
return reference::device::BlockCompareEqual<cutlass::complex<double> >(
reinterpret_cast<complex<double> const *>(ptr_A),
reinterpret_cast<complex<double> const *>(ptr_B),
capacity);
default:
{
throw std::runtime_error(std::string("Unsupported numeric type: ") + to_string(numeric_type));
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Permits copying dynamic vectors into static-length vectors
template <typename TensorCoord, int Rank>
struct vector_to_coord {
vector_to_coord(TensorCoord &coord, std::vector<int> const &vec) {
coord[Rank - 1] = vec.at(Rank - 1);
if (Rank > 1) {
vector_to_coord<TensorCoord, Rank - 1>(coord, vec);
}
}
vector_to_coord(TensorCoord &coord, std::vector<int64_t> const &vec) {
coord[Rank - 1] = (int)vec.at(Rank - 1);
if (Rank > 1) {
vector_to_coord<TensorCoord, Rank - 1>(coord, vec);
}
}
};
/// Permits copying dynamic vectors into static-length vectors
template <typename TensorCoord>
struct vector_to_coord<TensorCoord, 1> {
vector_to_coord(TensorCoord &coord, std::vector<int> const &vec) {
coord[0] = vec.at(0);
}
vector_to_coord(TensorCoord &coord, std::vector<int64_t> const &vec) {
coord[0] = (int)vec.at(0);
}
};
/// Permits copying dynamic vectors into static-length vectors
template <typename TensorCoord>
struct vector_to_coord<TensorCoord, 0> {
vector_to_coord(TensorCoord &coord, std::vector<int> const &vec) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Element, typename Layout>
static void write_tensor_csv_static_tensor_view(
std::ostream &out,
DeviceAllocation &allocation) {
Coord<Layout::kRank> extent;
Coord<Layout::kStrideRank, typename Layout::Stride::Index> stride;
if (allocation.extent().size() != Layout::kRank) {
throw std::runtime_error("Allocation extent has invalid rank");
}
if (allocation.stride().size() != Layout::kStrideRank) {
throw std::runtime_error("Allocation stride has invalid rank");
}
vector_to_coord<Coord<Layout::kRank>, Layout::kRank>(extent, allocation.extent());
vector_to_coord<Coord<Layout::kStrideRank, typename Layout::Stride::Index>,
Layout::kStrideRank>(stride, allocation.stride());
Layout layout(stride);
HostTensor<Element, Layout> host_tensor(extent, layout, false);
if (host_tensor.capacity() != allocation.batch_stride()) {
throw std::runtime_error("Unexpected capacity to equal.");
}
host_tensor.copy_in_device_to_host(
static_cast<Element const *>(allocation.data()),
allocation.batch_stride());
TensorViewWrite(out, host_tensor.host_view());
out << "\n\n";
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
static void write_tensor_csv_static_type(
std::ostream &out,
DeviceAllocation &allocation) {
switch (allocation.layout()) {
case library::LayoutTypeID::kRowMajor:
write_tensor_csv_static_tensor_view<T, layout::RowMajor>(out, allocation);
break;
case library::LayoutTypeID::kColumnMajor:
write_tensor_csv_static_tensor_view<T, layout::ColumnMajor>(out, allocation);
break;
case library::LayoutTypeID::kRowMajorInterleavedK2:
write_tensor_csv_static_tensor_view<T, layout::RowMajorInterleaved<2>>(out, allocation);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK2:
write_tensor_csv_static_tensor_view<T, layout::ColumnMajorInterleaved<2>>(out, allocation);
break;
case library::LayoutTypeID::kRowMajorInterleavedK4:
write_tensor_csv_static_tensor_view<T, layout::RowMajorInterleaved<4>>(out, allocation);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK4:
write_tensor_csv_static_tensor_view<T, layout::ColumnMajorInterleaved<4>>(out, allocation);
break;
case library::LayoutTypeID::kRowMajorInterleavedK16:
write_tensor_csv_static_tensor_view<T, layout::RowMajorInterleaved<16>>(out, allocation);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK16:
write_tensor_csv_static_tensor_view<T, layout::ColumnMajorInterleaved<16>>(out, allocation);
break;
case library::LayoutTypeID::kRowMajorInterleavedK32:
write_tensor_csv_static_tensor_view<T, layout::RowMajorInterleaved<32>>(out, allocation);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK32:
write_tensor_csv_static_tensor_view<T, layout::ColumnMajorInterleaved<32>>(out, allocation);
break;
case library::LayoutTypeID::kRowMajorInterleavedK64:
write_tensor_csv_static_tensor_view<T, layout::RowMajorInterleaved<64>>(out, allocation);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK64:
write_tensor_csv_static_tensor_view<T, layout::ColumnMajorInterleaved<64>>(out, allocation);
break;
case library::LayoutTypeID::kTensorNHWC:
write_tensor_csv_static_tensor_view<T, layout::TensorNHWC>(out, allocation);
break;
case library::LayoutTypeID::kTensorNDHWC:
write_tensor_csv_static_tensor_view<T, layout::TensorNDHWC>(out, allocation);
break;
case library::LayoutTypeID::kTensorNC32HW32:
write_tensor_csv_static_tensor_view<T, layout::TensorNCxHWx<32>>(out, allocation);
break;
case library::LayoutTypeID::kTensorNC64HW64:
write_tensor_csv_static_tensor_view<T, layout::TensorNCxHWx<64>>(out, allocation);
break;
case library::LayoutTypeID::kTensorC32RSK32:
write_tensor_csv_static_tensor_view<T, layout::TensorCxRSKx<32>>(out, allocation);
break;
case library::LayoutTypeID::kTensorC64RSK64:
write_tensor_csv_static_tensor_view<T, layout::TensorCxRSKx<64>>(out, allocation);
break;
default:
throw std::runtime_error("Unhandled layout");
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Writes a tensor to csv
void DeviceAllocation::write_tensor_csv(
std::ostream &out) {
switch (this->type()) {
case library::NumericTypeID::kFE4M3:
write_tensor_csv_static_type<float_e4m3_t>(out, *this);
break;
case library::NumericTypeID::kFE5M2:
write_tensor_csv_static_type<float_e5m2_t>(out, *this);
break;
case library::NumericTypeID::kF16:
write_tensor_csv_static_type<half_t>(out, *this);
break;
case library::NumericTypeID::kBF16:
write_tensor_csv_static_type<bfloat16_t>(out, *this);
break;
case library::NumericTypeID::kTF32:
write_tensor_csv_static_type<tfloat32_t>(out, *this);
break;
case library::NumericTypeID::kF32:
write_tensor_csv_static_type<float>(out, *this);
break;
case library::NumericTypeID::kF64:
write_tensor_csv_static_type<double>(out, *this);
break;
case library::NumericTypeID::kS2:
write_tensor_csv_static_type<int2b_t>(out, *this);
break;
case library::NumericTypeID::kS4:
write_tensor_csv_static_type<int4b_t>(out, *this);
break;
case library::NumericTypeID::kS8:
write_tensor_csv_static_type<int8_t>(out, *this);
break;
case library::NumericTypeID::kS16:
write_tensor_csv_static_type<int16_t>(out, *this);
break;
case library::NumericTypeID::kS32:
write_tensor_csv_static_type<int32_t>(out, *this);
break;
case library::NumericTypeID::kS64:
write_tensor_csv_static_type<int64_t>(out, *this);
break;
case library::NumericTypeID::kB1:
write_tensor_csv_static_type<uint1b_t>(out, *this);
break;
case library::NumericTypeID::kU2:
write_tensor_csv_static_type<uint2b_t>(out, *this);
break;
case library::NumericTypeID::kU4:
write_tensor_csv_static_type<uint4b_t>(out, *this);
break;
case library::NumericTypeID::kU8:
write_tensor_csv_static_type<uint8_t>(out, *this);
break;
case library::NumericTypeID::kU16:
write_tensor_csv_static_type<uint16_t>(out, *this);
break;
case library::NumericTypeID::kU32:
write_tensor_csv_static_type<uint32_t>(out, *this);
break;
case library::NumericTypeID::kU64:
write_tensor_csv_static_type<uint64_t>(out, *this);
break;
case library::NumericTypeID::kCF16:
write_tensor_csv_static_type<cutlass::complex<half_t> >(out, *this);
break;
case library::NumericTypeID::kCF32:
write_tensor_csv_static_type<cutlass::complex<float> >(out, *this);
break;
case library::NumericTypeID::kCF64:
write_tensor_csv_static_type<cutlass::complex<double> >(out, *this);
break;
case library::NumericTypeID::kVoid:
// Not dump anything as it is a empty tensor.
break;
default:
throw std::runtime_error(std::string("Unsupported numeric type: ") + to_string(this->type()) ) ;
}
}
template <typename Element, typename Layout>
static void tensor_fill_tensor_view(DeviceAllocation &allocation, Element val = Element()) {
Coord<Layout::kRank> extent;
Coord<Layout::kStrideRank, typename Layout::LongIndex> stride;
if (allocation.extent().size() != Layout::kRank) {
throw std::runtime_error("Allocation extent has invalid rank");
}
if (allocation.stride().size() != Layout::kStrideRank) {
throw std::runtime_error("Allocation stride has invalid rank");
}
vector_to_coord<Coord<Layout::kRank>, Layout::kRank>(extent, allocation.extent());
vector_to_coord<Coord<Layout::kStrideRank, typename Layout::LongIndex>,
Layout::kStrideRank>(stride, allocation.stride());
TensorView<Element, Layout> view(
static_cast<Element *>(allocation.data()),
Layout(stride),
extent
);
cutlass::reference::device::TensorFill<Element, Layout>(
view,
val
);
}
template <typename Element>
static void tensor_fill(DeviceAllocation &allocation, Element val = Element()) {
switch (allocation.layout()) {
case library::LayoutTypeID::kRowMajor:
tensor_fill_tensor_view<Element, layout::RowMajor>(allocation, val);
break;
case library::LayoutTypeID::kColumnMajor:
tensor_fill_tensor_view<Element, layout::ColumnMajor>(allocation, val);
break;
case library::LayoutTypeID::kTensorNHWC:
tensor_fill_tensor_view<Element, layout::TensorNHWC>(allocation, val);
break;
case library::LayoutTypeID::kTensorNDHWC:
tensor_fill_tensor_view<Element, layout::TensorNDHWC>(allocation, val);
break;
case library::LayoutTypeID::kTensorNC32HW32:
tensor_fill_tensor_view<Element, layout::TensorNCxHWx<32>>(allocation, val);
break;
case library::LayoutTypeID::kTensorNC64HW64:
tensor_fill_tensor_view<Element, layout::TensorNCxHWx<64>>(allocation, val);
break;
case library::LayoutTypeID::kTensorC32RSK32:
tensor_fill_tensor_view<Element, layout::TensorCxRSKx<32>>(allocation, val);
break;
case library::LayoutTypeID::kTensorC64RSK64:
tensor_fill_tensor_view<Element, layout::TensorCxRSKx<64>>(allocation, val);
break;
default:
throw std::runtime_error("Unsupported layout");
break;
}
}
/// Fills a tensor uniformly with a value (most frequently used to clear the tensor)
void DeviceAllocation::fill_device(double val = 0.0) {
switch (this->type()) {
case library::NumericTypeID::kFE4M3:
tensor_fill<float_e4m3_t>(*this, static_cast<float_e4m3_t>(val));
break;
case library::NumericTypeID::kFE5M2:
tensor_fill<float_e5m2_t>(*this, static_cast<float_e5m2_t>(val));
break;
case library::NumericTypeID::kF16:
tensor_fill<half_t>(*this, static_cast<half_t>(val));
break;
case library::NumericTypeID::kBF16:
tensor_fill<bfloat16_t>(*this, static_cast<bfloat16_t>(val));
break;
case library::NumericTypeID::kTF32:
tensor_fill<tfloat32_t>(*this, static_cast<tfloat32_t>(val));
break;
case library::NumericTypeID::kF32:
tensor_fill<float>(*this, static_cast<float>(val));
break;
case library::NumericTypeID::kF64:
tensor_fill<double>(*this, static_cast<double>(val));
break;
case library::NumericTypeID::kS2:
tensor_fill<int2b_t>(*this, static_cast<int2b_t>(val));
break;
case library::NumericTypeID::kS4:
tensor_fill<int4b_t>(*this, static_cast<int4b_t>(val));
break;
case library::NumericTypeID::kS8:
tensor_fill<int8_t>(*this, static_cast<int8_t>(val));
break;
case library::NumericTypeID::kS16:
tensor_fill<int16_t>(*this, static_cast<int16_t>(val));
break;
case library::NumericTypeID::kS32:
tensor_fill<int32_t>(*this, static_cast<int32_t>(val));
break;
case library::NumericTypeID::kS64:
tensor_fill<int64_t>(*this, static_cast<int64_t>(val));
break;
case library::NumericTypeID::kB1:
tensor_fill<uint1b_t>(*this, static_cast<uint1b_t>(val));
break;
case library::NumericTypeID::kU2:
tensor_fill<uint2b_t>(*this, static_cast<uint2b_t>(val));
break;
case library::NumericTypeID::kU4:
tensor_fill<uint4b_t>(*this, static_cast<uint4b_t>(val));
break;
case library::NumericTypeID::kU8:
tensor_fill<uint8_t>(*this, static_cast<uint8_t>(val));
break;
case library::NumericTypeID::kU16:
tensor_fill<uint16_t>(*this, static_cast<uint16_t>(val));
break;
case library::NumericTypeID::kU32:
tensor_fill<uint32_t>(*this, static_cast<uint32_t>(val));
break;
case library::NumericTypeID::kU64:
tensor_fill<uint64_t>(*this, static_cast<uint64_t>(val));
break;
case library::NumericTypeID::kCF16:
tensor_fill<cutlass::complex<half_t> >(*this, from_real<half_t>(val));
break;
case library::NumericTypeID::kCF32:
tensor_fill<cutlass::complex<float> >(*this, from_real<float>(val));
break;
case library::NumericTypeID::kCF64:
tensor_fill<cutlass::complex<double> >(*this, from_real<double>(val));
break;
default:
throw std::runtime_error(std::string("Unsupported numeric type: ") + to_string(this->type()));
}
}
/// Fills a tensor uniformly with a value (most frequently used to clear the tensor)
void DeviceAllocation::fill_host(double val = 0.0) {
std::vector<uint8_t> host_data(bytes());
switch (this->type()) {
case library::NumericTypeID::kFE4M3:
cutlass::reference::host::BlockFill<float_e4m3_t>(
reinterpret_cast<float_e4m3_t *>(host_data.data()),
capacity_,
static_cast<float_e4m3_t>(val)
);
break;
case library::NumericTypeID::kFE5M2:
cutlass::reference::host::BlockFill<float_e5m2_t>(
reinterpret_cast<float_e5m2_t *>(host_data.data()),
capacity_,
static_cast<float_e5m2_t>(val)
);
break;
case library::NumericTypeID::kF16:
cutlass::reference::host::BlockFill<half_t>(
reinterpret_cast<half_t *>(host_data.data()),
capacity_,
static_cast<half_t>(val)
);
break;
case library::NumericTypeID::kBF16:
cutlass::reference::host::BlockFill<bfloat16_t>(
reinterpret_cast<bfloat16_t *>(host_data.data()),
capacity_,
static_cast<bfloat16_t>(val)
);
break;
case library::NumericTypeID::kTF32:
cutlass::reference::host::BlockFill<tfloat32_t>(
reinterpret_cast<tfloat32_t *>(host_data.data()),
capacity_,
static_cast<tfloat32_t>(val)
);
break;
case library::NumericTypeID::kF32:
cutlass::reference::host::BlockFill<float>(
reinterpret_cast<float *>(host_data.data()),
capacity_,
static_cast<float>(val)
);
break;
case library::NumericTypeID::kF64:
cutlass::reference::host::BlockFill<double>(
reinterpret_cast<double *>(host_data.data()),
capacity_,
static_cast<double>(val)
);
break;
case library::NumericTypeID::kS2:
cutlass::reference::host::BlockFill<int2b_t>(
reinterpret_cast<int2b_t *>(host_data.data()),
capacity_,
static_cast<int2b_t>(val)
);
break;
case library::NumericTypeID::kS4:
cutlass::reference::host::BlockFill<int4b_t>(
reinterpret_cast<int4b_t *>(host_data.data()),
capacity_,
static_cast<int4b_t>(val)
);
break;
case library::NumericTypeID::kS8:
cutlass::reference::host::BlockFill<int8_t>(
reinterpret_cast<int8_t *>(host_data.data()),
capacity_,
static_cast<int8_t>(val)
);
break;
case library::NumericTypeID::kS16:
cutlass::reference::host::BlockFill<int16_t>(
reinterpret_cast<int16_t *>(host_data.data()),
capacity_,
static_cast<int16_t>(val)
);
break;
case library::NumericTypeID::kS32:
cutlass::reference::host::BlockFill<int32_t>(
reinterpret_cast<int32_t *>(host_data.data()),
capacity_,
static_cast<int32_t>(val)
);
break;
case library::NumericTypeID::kS64:
cutlass::reference::host::BlockFill<int64_t>(
reinterpret_cast<int64_t *>(host_data.data()),
capacity_,
static_cast<int64_t>(val)
);
break;
case library::NumericTypeID::kB1:
cutlass::reference::host::BlockFill<uint1b_t>(
reinterpret_cast<uint1b_t *>(host_data.data()),
capacity_,
static_cast<uint1b_t>(val)
);
break;
case library::NumericTypeID::kU2:
cutlass::reference::host::BlockFill<uint2b_t>(
reinterpret_cast<uint2b_t *>(host_data.data()),
capacity_,
static_cast<uint2b_t>(val)
);
break;
case library::NumericTypeID::kU4:
cutlass::reference::host::BlockFill<uint4b_t>(
reinterpret_cast<uint4b_t *>(host_data.data()),
capacity_,
static_cast<uint4b_t>(val)
);
break;
case library::NumericTypeID::kU8:
cutlass::reference::host::BlockFill<uint8_t>(
reinterpret_cast<uint8_t *>(host_data.data()),
capacity_,
static_cast<uint8_t>(val)
);
break;
case library::NumericTypeID::kU16:
cutlass::reference::host::BlockFill<uint16_t>(
reinterpret_cast<uint16_t *>(host_data.data()),
capacity_,
static_cast<uint16_t>(val)
);
break;
case library::NumericTypeID::kU32:
cutlass::reference::host::BlockFill<uint32_t>(
reinterpret_cast<uint32_t *>(host_data.data()),
capacity_,
static_cast<uint32_t>(val)
);
break;
case library::NumericTypeID::kU64:
cutlass::reference::host::BlockFill<uint64_t>(
reinterpret_cast<uint64_t *>(host_data.data()),
capacity_,
static_cast<uint64_t>(val)
);
break;
default:
throw std::runtime_error(std::string("Unsupported numeric type: ") + to_string(this->type()));
}
copy_from_host(host_data.data());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
| tools/profiler/src/device_allocation.cu/0 | {
"file_path": "tools/profiler/src/device_allocation.cu",
"repo_id": "tools",
"token_count": 31435
} | 61 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief GETT device reference code
*/
#pragma once
#include <cute/tensor.hpp>
namespace cutlass::reference::device {
template <
class ATensor,
class BTensor,
class CTensor,
class DTensor,
class ElementAccumulator,
class ElementEpilogue>
__global__ static
void
gett_kernel(
DTensor D,
ATensor const A,
BTensor const B,
CTensor const C,
ElementEpilogue alpha, ElementEpilogue beta,
ElementAccumulator acc_init)
{
using namespace cute;
static_assert(DTensor::rank == 3, "(M,N,L)");
static_assert(ATensor::rank == 3, "(M,K,L)");
static_assert(BTensor::rank == 3, "(N,K,L)");
static_assert(CTensor::rank == 3, "(M,N,L)");
assert(size<0>(A) == size<0>(D)); // M
assert(size<0>(C) == size<0>(D)); // M
assert(size<0>(B) == size<1>(D)); // N
assert(size<1>(C) == size<1>(D)); // N
assert(size<1>(A) == size<1>(B)); // K
assert(size<2>(A) == size<2>(D)); // L
assert(size<2>(B) == size<2>(D)); // L
assert(size<2>(C) == size<2>(D)); // L
NumericConverter<ElementAccumulator, typename ATensor::value_type> a_converter;
NumericConverter<ElementAccumulator, typename BTensor::value_type> b_converter;
NumericConverter<ElementEpilogue, ElementAccumulator> acc_converter;
NumericConverter<ElementEpilogue, typename CTensor::value_type> source_converter;
NumericConverter<typename DTensor::value_type, ElementEpilogue> output_converter;
// Thread id to each element of D
for (int tid = threadIdx.x + blockDim.x * blockIdx.x;
tid < size(D);
tid += blockDim.x * gridDim.x) {
// (m,n,l) coordinate
auto mnl_coord = idx2crd(tid, product_each(shape(D)));
auto m = get<0>(mnl_coord);
auto n = get<1>(mnl_coord);
auto l = get<2>(mnl_coord);
auto A_ml = A(m,_,l);
auto B_nl = B(n,_,l);
ElementAccumulator accum = ElementAccumulator(0);
for (int k = 0; k < size<1>(A); ++k) {
ElementAccumulator a = a_converter(A_ml(k));
ElementAccumulator b = b_converter(B_nl(k));
accum += a * b;
}
ElementEpilogue scaled_output = (alpha * acc_converter(accum)) + (beta * source_converter(C(m,n,l)));
D(m,n,l) = output_converter(scaled_output);
}
}
// Most general version
template <
class ProblemShapeMNKL,
class ElementA,
class StrideA,
class ElementB,
class StrideB,
class ElementAccumulator,
class ElementC,
class StrideC,
class ElementD,
class StrideD,
class ElementEpilogue>
void
gett(
ProblemShapeMNKL problem_shape_mnkl,
ElementA const* ptr_A, StrideA stride_a_mkl,
ElementB const* ptr_B, StrideB stride_b_nkl,
ElementAccumulator _,
ElementC const* ptr_C, StrideC stride_c_mnl,
ElementD * ptr_D, StrideD stride_d_mnl,
ElementEpilogue alpha, ElementEpilogue beta,
cudaStream_t stream = 0) {
using namespace cute;
static_assert(cute::rank(ProblemShapeMNKL{}) == 4);
auto M = get<0>(problem_shape_mnkl);
auto N = get<1>(problem_shape_mnkl);
auto K = get<2>(problem_shape_mnkl);
auto L = get<3>(problem_shape_mnkl);
// Represent the full tensors
auto A = make_tensor(make_gmem_ptr(ptr_A), make_shape(M,K,L), stride_a_mkl); // (M,K,L)
auto B = make_tensor(make_gmem_ptr(ptr_B), make_shape(N,K,L), stride_b_nkl); // (N,K,L)
auto C = make_tensor(make_gmem_ptr(ptr_C), make_shape(M,N,L), stride_c_mnl); // (M,N,L)
auto D = make_tensor(make_gmem_ptr(ptr_D), make_shape(M,N,L), stride_d_mnl); // (M,N,L)
dim3 dimBlock(256);
dim3 dimGrid(240);
gett_kernel<<< dimGrid, dimBlock, 0, stream >>>(D, A, B, C, alpha, beta, ElementAccumulator(0));
}
} // namespace cutlass::reference::device
| tools/util/include/cutlass/util/reference/device/gett.hpp/0 | {
"file_path": "tools/util/include/cutlass/util/reference/device/gett.hpp",
"repo_id": "tools",
"token_count": 1966
} | 62 |