text
stringlengths 27
947k
| id
stringlengths 10
118
| metadata
dict | __index_level_0__
int64 0
80
|
---|---|---|---|
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM B (filter tile)
matrix from memory.
This iterator assumes TensorNDHWC layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv3d_problem_size.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_
>
class Conv3dDgradFilterTileAccessIteratorAnalytic {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNDHWC;
using ThreadMap = ThreadMap_;
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kAnalytic;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 3;
using ConvProblemSize = typename conv::Conv3dProblemSize;
static int const kAccessesPerVector = 1;
static_assert(sizeof_bits<Element>::value >= 8,
"DGRAD requires elements of size 8b or larger.");
//
// Parameters structure
//
struct Params {
Layout layout;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(
Conv3dProblemSize const &problem_size,
Layout const &layout
): layout(layout) {
}
};
private:
Params const ¶ms_;
Conv3dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
char const *pointer_;
// For a fixed filter position (t,r,s) find and fill offset_k_, offset_c_ in strided and contiguous dimension
int filter_t_;
int filter_r_;
int filter_s_;
int offset_k_[ThreadMap::Iterations::kStrided];
int offset_c_[ThreadMap::Iterations::kContiguous];
public:
CUTLASS_HOST_DEVICE
Conv3dDgradFilterTileAccessIteratorAnalytic(
Params const ¶ms,
Conv3dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
filter_t_(0),
filter_r_(0),
filter_s_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
offset_c_[c] = threadblock_offset.column() + thread_coord.contiguous()
+ c * ThreadMap::Delta::kContiguous;
}
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_k_[s] =
threadblock_offset.row() + thread_coord.strided() + s * ThreadMap::Delta::kStrided;
}
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
// moves to the next tile
++filter_s_;
if (filter_s_ < problem_size_.S) {
return;
}
filter_s_ = 0;
++filter_r_;
if (filter_r_ < problem_size_.R) {
return;
}
filter_r_ = 0;
++filter_t_;
if (filter_t_ < problem_size_.T) {
return;
}
filter_t_ = 0;
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_k_[s] += Shape::kRow * problem_size_.split_k_slices;
}
}
/// Returns the coordinate in the filter tensor w that is currently pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at() const {
int c = offset_c_[iteration_contiguous_];
int k = offset_k_[iteration_strided_];
return TensorCoord(k, filter_t_, filter_r_, filter_s_, c);
}
/// Returns true if the current coordinate is within the filter tensor w
CUTLASS_HOST_DEVICE
bool valid() const {
TensorCoord coord = at();
return coord.n() < problem_size_.K && coord.c() < problem_size_.C;
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
TensorCoord coord = at();
LongIndex offset = params_.layout(coord);
return reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv3dDgradFilterTileAccessIteratorAnalytic &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv3dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.C % (128/sizeof_bits<Element>::value)) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/threadblock/conv3d_dgrad_filter_tile_access_iterator_analytic.h/0 | {
"file_path": "include/cutlass/conv/threadblock/conv3d_dgrad_filter_tile_access_iterator_analytic.h",
"repo_id": "include",
"token_count": 2771
} | 21 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a multistage threadblock-scoped Implicit GEMM Convolution kernel.
*/
#pragma once
#include "cutlass/aligned_buffer.h"
#include "cutlass/arch/memory.h"
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/cache_operation.h"
#include "cutlass/conv/threadblock/depthwise_mma_base.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Iterates over tiles of A operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorA_,
/// Iterates over tiles of A operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorA_,
/// Cache operation for operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorB_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB_,
/// Cache operation for operand B
cutlass::arch::CacheOperation::Kind CacheOpB,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// Number of stages,
int Stages,
/// Epilogue stores the data into global memory
typename Epilogue_,
/// iterator implementation variants
conv::IteratorAlgorithm IteratorAlgorithm_ = conv::IteratorAlgorithm::kOptimized,
/// Used for partial specialization
typename Enable = bool>
class DepthwiseFpropDirectConvMultipleStage :
public DepthwiseDirectConvMmaBase<Shape_, Policy_, Stages> {
public:
///< Base class
using Base = DepthwiseDirectConvMmaBase<Shape_, Policy_, Stages>;
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
///< Iterates over tiles of A operand in global memory
using IteratorA = IteratorA_;
///< Iterates over tiles of B operand in global memory
using IteratorB = IteratorB_;
///< Policy describing tuning details
using Policy = Policy_;
using Epilogue = Epilogue_;
using SmemIteratorA = SmemIteratorA_;
using SmemIteratorB = SmemIteratorB_;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB;
static conv::IteratorAlgorithm const kItertorAlgorithm = IteratorAlgorithm_;
//
// Dependent types
//
/// Fragment of accumulator tile
using ElementC = typename Policy::Operator::ElementC;
using FragmentC = typename Policy::Operator::FragmentC;
/// Warp-level Mma
using Operator = typename Policy::Operator;
/// Internal structure exposed for introspection.
struct Detail {
/// Number of cp.async instructions to load one stage of operand A
static int const AsyncCopyIterationsPerStageA =
IteratorA::ThreadMap::Iterations::kCount;
/// Number of cp.async instructions to load one stage of operand B
static int const AsyncCopyIterationsPerStageB =
IteratorB::ThreadMap::Iterations::kCount;
/// Number of stages
static int const kStages = Stages;
/// Number of cp.async instructions to load on group of operand B
static int const kAccessesPerGroupB =
(AsyncCopyIterationsPerStageB + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations;
};
private:
using WarpLoadedFragmentA = typename Operator::FragmentA;
using WarpLoadedFragmentB = typename Operator::FragmentB;
using WarpTransformedFragmentA = typename Operator::TransformedFragmentA;
using WarpTransformedFragmentB = typename Operator::TransformedFragmentB;
private:
//
// Data members
//
/// Iterator to write threadblock-scoped tile of A operand to shared memory
SmemIteratorA smem_iterator_A_;
/// Iterator to write threadblock-scoped tile of B operand to shared memory
SmemIteratorB smem_iterator_B_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
DepthwiseFpropDirectConvMultipleStage(
///< Shared storage needed for internal use by threadblock-scoped GEMM
typename Base::SharedStorage &shared_storage,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx
):
Base(shared_storage, thread_idx, warp_idx, lane_idx),
smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx),
smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx)
{
// Compute warp location within threadblock tile by mapping the warp_id to
// three coordinates:
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_m = warp_idx_mn % Base::WarpCount::kM;
int warp_idx_n = warp_idx_mn / Base::WarpCount::kM;
// Add per-warp offsets in units of warp-level tiles
this->warp_tile_iterator_A_.add_tile_offset(
{warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
this->warp_tile_iterator_B_.add_tile_offset(
{Base::kWarpGemmIterations * warp_idx_k, warp_idx_n});
}
CUTLASS_DEVICE
void copy_tiles_and_advance(IteratorA &iterator_A,
IteratorB &iterator_B,
int group_start_A = 0,
int group_start_B = 0) {
if (kItertorAlgorithm == conv::IteratorAlgorithm::kFixedStrideDilation) {
// Number of iterators is a static value.
iterator_A.set_iteration_index(group_start_A * IteratorA::kAccessesPerVector);
this->smem_iterator_A_.set_iteration_index(group_start_A);
// Async Copy for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(this->smem_iterator_A_.get());
int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value *
IteratorA::ThreadMap::kElementsPerAccess /
IteratorA::kAccessesPerVector / 8;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) {
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>(
dst_ptr + v, iterator_A.get(), iterator_A.valid());
++iterator_A;
}
++this->smem_iterator_A_;
}
} else {
// Number of iterators is a runtime value.
iterator_A.set_iteration_index(group_start_A * IteratorA::kAccessesPerVector);
this->smem_iterator_A_.set_iteration_index(group_start_A);
// Async Copy for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < iterator_A.get_iteration_num(); ++j) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(this->smem_iterator_A_.get());
int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value *
IteratorA::ThreadMap::kElementsPerAccess /
IteratorA::kAccessesPerVector / 8;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) {
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>(
dst_ptr + v, iterator_A.get(), iterator_A.valid());
++iterator_A;
}
++this->smem_iterator_A_;
}
}
}
/// Perform a threadblock-scoped matrix multiply-accumulate
CUTLASS_DEVICE
void operator()(
///< problem size of GEMM
int gemm_k_iterations,
///< destination accumulator tile
FragmentC &accum,
///< iterator over A operand in global memory
IteratorA &iterator_A,
///< Params of global memory iterator
typename IteratorA::Params const &iterator_a_params,
///< iterator over B operand in global memory
IteratorB &iterator_B,
///< Params of global memory iterator
typename IteratorB::Params const &iterator_b_params,
///< initial value of accumulator
FragmentC const &src_accum,
/// Epilogue
Epilogue &epilogue,
///< Output operator
typename Epilogue::OutputOp const &output_op,
///< Tile iterator for destination
typename Epilogue::OutputTileIterator &destination_iterator,
///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
typename Epilogue::OutputTileIterator &source_iterator,
int split_k_slices = 1
) {
//
// Prologue
//
// Issue several complete stages
CUTLASS_PRAGMA_UNROLL
for (int stage = 0; stage < Base::kStages - 1; ++stage, --gemm_k_iterations) {
if (stage == 0) {
iterator_B.set_iteration_index(0);
this->smem_iterator_B_.set_iteration_index(0);
// Async Copy for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) {
typename IteratorB::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB::AccessType *>(this->smem_iterator_B_.get());
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) {
int const kSrcBytes = sizeof_bits<typename IteratorB::Element>::value *
IteratorB::ThreadMap::kElementsPerAccess /
IteratorB::kAccessesPerVector / 8;
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>(
dst_ptr + v, iterator_B.get(), iterator_B.valid());
++iterator_B;
}
++this->smem_iterator_B_;
}
}
if(kItertorAlgorithm == conv::IteratorAlgorithm::kFixedStrideDilation){
// Number of iterators is compilation static.
iterator_A.set_iteration_index(0);
this->smem_iterator_A_.set_iteration_index(0);
// Async Copy for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(this->smem_iterator_A_.get());
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) {
int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value *
IteratorA::ThreadMap::kElementsPerAccess /
IteratorA::kAccessesPerVector / 8;
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>(
dst_ptr + v, iterator_A.get(), iterator_A.valid());
++iterator_A;
}
++this->smem_iterator_A_;
}
} else {
// Number of iterators is a runtime value.
iterator_A.set_iteration_index(0);
this->smem_iterator_A_.set_iteration_num(iterator_A.get_iteration_num());
this->smem_iterator_A_.set_iteration_index(0);
// Async Copy for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < iterator_A.get_iteration_num(); ++j) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(this->smem_iterator_A_.get());
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) {
int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value *
IteratorA::ThreadMap::kElementsPerAccess /
IteratorA::kAccessesPerVector / 8;
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>(
dst_ptr + v, iterator_A.get(), iterator_A.valid());
++iterator_A;
}
++this->smem_iterator_A_;
}
}
// Move to the next stage
iterator_A.advance();
this->smem_iterator_A_.add_tile_offset({1, 0});
// Inserts a fence to group cp.async instructions into stages.
cutlass::arch::cp_async_fence();
}
/////////////////////////////////////////////////////////////////////////////
// Waits until kStages-2 stages have committed.
cutlass::arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
// Pair of fragments used to overlap shared memory loads and math
// instructions
WarpLoadedFragmentA warp_loaded_frag_A[2];
WarpLoadedFragmentB warp_loaded_frag_B[2];
WarpTransformedFragmentA warp_transformed_frag_A[2];
WarpTransformedFragmentB warp_transformed_frag_B[2];
Operator warp_mma;
this->warp_tile_iterator_A_.set_kgroup_index(0);
this->warp_tile_iterator_B_.set_kgroup_index(0);
this->warp_tile_iterator_A_.setup_initial_status(iterator_a_params);
this->warp_tile_iterator_A_.load(warp_loaded_frag_A[0]);
this->warp_tile_iterator_B_.load(warp_loaded_frag_B[0]);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
int smem_write_stage_idx = Base::kStages - 1;
int smem_read_stage_idx = 0;
warp_mma.transform(warp_transformed_frag_A[0], warp_transformed_frag_B[0],
warp_loaded_frag_A[0], warp_loaded_frag_B[0]);
//
// Mainloop
//
unsigned int iterations = 0;
constexpr int inner_loop_iterations = round_up(Base::kWarpGemmIterations, 2);
CUTLASS_GEMM_LOOP
for (; gemm_k_iterations > (-Base::kStages + 1);) { // Each iteration is a cta tile.
accum.clear();
//
// Loop over GEMM K dimension
//
// Computes a warp-level GEMM on data held in shared memory
// Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < inner_loop_iterations; ++warp_mma_k) {
if (Base::kWarpGemmIterations % 2 == 0 || warp_mma_k + 1 != Base::kWarpGemmIterations) {
// Load warp-level tiles from shared memory, wrapping to k offset if
// this is the last group as the case may be.
this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Shape::kK);
this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Shape::kK);
this->warp_tile_iterator_A_.load(warp_loaded_frag_A[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_B_.load(warp_loaded_frag_B[(warp_mma_k + 1) % 2]);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
}
if (warp_mma_k > 0)
warp_mma.transform(warp_transformed_frag_A[warp_mma_k % 2],
warp_transformed_frag_B[warp_mma_k % 2],
warp_loaded_frag_A[warp_mma_k % 2],
warp_loaded_frag_B[warp_mma_k % 2]);
// Issue global->shared copies for the next stage
int group_start_iteration_A, group_start_iteration_B;
if (warp_mma_k == 0) {
group_start_iteration_A = 0;
group_start_iteration_B = 0;
copy_tiles_and_advance(
iterator_A, iterator_B, group_start_iteration_A, group_start_iteration_B);
}
if (warp_mma_k < Base::kWarpGemmIterations) {
warp_mma(
accum,
warp_transformed_frag_A[warp_mma_k % 2],
warp_transformed_frag_B[warp_mma_k % 2],
accum
);
}
if (warp_mma_k + 1 == inner_loop_iterations)
warp_mma.transform(warp_transformed_frag_A[(warp_mma_k + 1) % 2],
warp_transformed_frag_B[(warp_mma_k + 1) % 2],
warp_loaded_frag_A[(warp_mma_k + 1) % 2],
warp_loaded_frag_B[(warp_mma_k + 1) % 2]);
if (warp_mma_k + 2 == inner_loop_iterations) {
// Inserts a fence to group cp.async instructions into stages.
cutlass::arch::cp_async_fence();
// Waits until kStages-2 stages of cp.async have committed
arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
// Move to the next cta
iterator_A.advance();
this->smem_iterator_A_.add_tile_offset({1, 0});
// Add negative offsets to return iterators to the 'start' of the
// circular buffer in shared memory
if (smem_write_stage_idx == (Base::kStages - 1)) {
this->smem_iterator_A_.add_tile_offset({-Base::kStages, 0});
smem_write_stage_idx = 0;
} else {
++smem_write_stage_idx;
}
if (smem_read_stage_idx == (Base::kStages - 1)) {
this->warp_tile_iterator_A_.advance(- (Base::kStages-1) * iterator_A.get_load_size());
smem_read_stage_idx = 0;
} else {
this->warp_tile_iterator_A_.advance(iterator_A.get_load_size());
++smem_read_stage_idx;
}
if (kItertorAlgorithm == conv::IteratorAlgorithm::kFixedStrideDilation) {
this->warp_tile_iterator_A_.setup_initial_status(iterator_a_params);
}
// goback to start position. B has no multiple stage
this->warp_tile_iterator_B_.add_tile_offset({-Policy::kPartitionsK * Shape::kK, 0});
--gemm_k_iterations;
}
}
//
// Epilogue
//
int32_t smem_base_offset = iterator_B.get_load_size() + (iterations % Base::kStages) * iterator_A.get_load_size();
destination_iterator.set_tile_index(iterations * split_k_slices);
source_iterator.set_tile_index(iterations * split_k_slices);
epilogue(output_op, destination_iterator, accum, source_iterator, smem_base_offset);
++iterations;
}
// Insert fence and wait for all outstanding cp.async operations to commit.
cutlass::arch::cp_async_fence();
cutlass::arch::cp_async_wait<0>();
__syncthreads();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/threadblock/depthwise_fprop_direct_conv_multistage.h/0 | {
"file_path": "include/cutlass/conv/threadblock/depthwise_fprop_direct_conv_multistage.h",
"repo_id": "include",
"token_count": 8631
} | 22 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Helpers for printing cutlass/core objects
*/
#pragma once
#include <iostream>
#include <typeinfo>
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix.h"
#include "cutlass/quaternion.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/tensor_view.h"
#include "cutlass/gemm/gemm_enumerated_types.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/conv3d_problem_size.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Output operator for CUDA built-in dim3 type
inline std::ostream &operator<<(std::ostream &out, dim3 d) {
return out << d.x << ", " << d.y << ", " << d.z;
}
/// Output operator for CUDA built-in error type
inline std::ostream &operator<<(std::ostream &out, cudaError_t error) {
return out << cudaGetErrorString(error);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
///////////////////////////////////////////////////////////////////////////////////////////////////
// stream operators for cutlass namespace //
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Element, int Rank>
inline
std::ostream& operator<<(std::ostream& out, Array<Element, Rank> const& v) {
for (int i = 0; i < Rank; ++i) {
out << (i ? ", " : "") << v[i];
}
return out;
}
template <int Rank>
inline
std::ostream& operator<<(std::ostream& out, Coord<Rank> const& coord) {
for (int i = 0; i < Rank; ++i) {
out << (i ? ", " : "") << coord[i];
}
return out;
}
inline
std::istream & operator>>(std::istream &stream, half_t &x) {
float tmp;
stream >> tmp;
x = static_cast<cutlass::half_t>(tmp);
return stream;
}
inline
std::ostream & operator<<(std::ostream &out, half_t const &x) {
return out << float(x);
}
inline
std::ostream & operator<<(std::ostream &out, bfloat16_t const &x) {
return out << float(x);
}
inline
std::ostream & operator<<(std::ostream &out, tfloat32_t const &x) {
return out << float(x);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Helper to enable formatted printing of CUTLASS scalar types to an ostream
template <typename T>
struct ScalarIO {
/// Value to print
T value;
/// Default ctor
ScalarIO() { }
/// Constructs from a value
ScalarIO(T value): value(value) {}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Default printing to ostream
template <typename T>
inline std::ostream &operator<<(std::ostream &out, ScalarIO<T> const &scalar) {
return out << scalar.value;
}
/// Printing to ostream of int8_t as integer rather than character
template <>
inline std::ostream &operator<<(std::ostream &out, ScalarIO<int8_t> const &scalar) {
return out << int(scalar.value);
}
/// Printing to ostream of uint8_t as integer rather than character
template <>
inline std::ostream &operator<<(std::ostream &out, ScalarIO<uint8_t> const &scalar) {
return out << unsigned(scalar.value);
}
/// Default printing to ostream for MatrixShape
template <int Row, int Column>
inline
std::ostream & operator<<(std::ostream &out, MatrixShape<Row, Column> const &matrix_shape) {
out << "cutlass::MatrixShape::(kRow, kColumn) {"
<< cutlass::MatrixShape<Row,Column>::kRow <<","
<< cutlass::MatrixShape<Row,Column>::kColumn <<"}";
return out;
}
/// Prints matrix to ostream
template <typename Element, int Rows, int Columns>
std::ostream & operator<<(std::ostream &out, Matrix<Element, Rows, Columns> const &rhs) {
for (int i = 0; i < Rows; ++i) {
for (int j = 0; j < Columns; ++j) {
ScalarIO<Element> element(rhs.at(i, j));
out << (j ? ", " : "") << element;
}
out << "\\n";
}
return out;
}
template <typename T>
std::ostream &operator<<(std::ostream &out, Quaternion<T> const &rhs) {
out << ScalarIO<T>(rhs.w()) << " ";
if (rhs.x() >= 0) {
out << "+";
}
out << ScalarIO<T>(rhs.x()) << "*i ";
if (rhs.y() >= 0) {
out << "+";
}
out << ScalarIO<T>(rhs.y()) << "*j ";
if (rhs.z() >= 0) {
out << "+";
}
out << ScalarIO<T>(rhs.z()) << "*k";
return out;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
// stream operators for cutlass::gemm namespace //
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace gemm {
/// Default printing to ostream for GemmShape
template <int M, int N, int K>
inline
std::ostream & operator<<(std::ostream &out, GemmShape<M,N,K> const &gemm_shape) {
out << "cutlass::gemm::GemmShape::(kM, kN, kK) {"
<< cutlass::gemm::GemmShape<M,N,K>::kM <<","
<< cutlass::gemm::GemmShape<M,N,K>::kN <<","
<< cutlass::gemm::GemmShape<M,N,K>::kK << "}";
return out;
}
/// Default printing to ostream for GemmCoord
inline
std::ostream & operator<<(std::ostream &out, GemmCoord const &gemm_coord) {
out << "cutlass::gemm::GemmCoord {"
<< gemm_coord.m() <<","
<< gemm_coord.n() <<","
<< gemm_coord.k() << "}";
return out;
}
} //namespace gemm
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
// stream operators for cutlass namespace //
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Default printing to ostream for PitchLinearShape
template < int Contiguous, int Strided>
inline
std::ostream & operator<<(std::ostream &out, PitchLinearShape<Contiguous, Strided> const &pitch_linear_shape) {
out << "cutlass::PitchLinearShape:(kContiguous, kStrided) {"
<< cutlass::layout::PitchLinearShape<Contiguous,Strided>::kContiguous <<","
<< cutlass::layout::PitchLinearShape<Contiguous,Strided>::kStrided <<"}";
return out;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
// stream operators for cutlass::conv namespace //
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace conv {
/// Default printing to ostream for Conv2dProblemSize
inline
std::ostream& operator<<(std::ostream& out, Conv2dProblemSize const& problem) {
out << "NHWC: (" << problem.N << ", " << problem.H << ", " << problem.W << ", " << problem.C << ")" << std::endl
<< "KRSC: (" << problem.K << ", " << problem.R << ", " << problem.S << ", " << problem.C / problem.groups << ")" << std::endl
<< "NPQK: (" << problem.N << ", " << problem.P << ", " << problem.Q << ", " << problem.K << ")" << std::endl
<< "groups: (" << problem.groups << ")" << std::endl
<< "Pad_h, Pad_w: (" << problem.pad_h << ", " << problem.pad_w << ")" << std::endl
<< "Stride_h, Stride_w: (" << problem.stride_h << ", " << problem.stride_w << ")" << std::endl
<< "Dilation_h, Dilation_w: (" << problem.dilation_h << ", " << problem.dilation_w << ")" << std::endl
<< "split_k_slices: (" << problem.split_k_slices << ")" << std::endl
<< "mode: (" << ((problem.mode==conv::Mode::kConvolution) ? "conv" : "xcross") << ")";
return out;
}
/// Default printing to ostream for Conv3dProblemSize
inline
std::ostream& operator<<(std::ostream& out, Conv3dProblemSize const& problem) {
out << "NDHWC: (" << problem.N << ", " << problem.D << ", " << problem.H << ", " << problem.W << ", " << problem.C << ")" << std::endl
<< "KTRSC: (" << problem.K << ", " << problem.T << ", " << problem.R << ", " << problem.S << ", " << problem.C << ")" << std::endl
<< "NZPQK: (" << problem.N << ", " << problem.Z << ", " << problem.P << ", " << problem.Q << ", " << problem.K << ")" << std::endl
<< "pad_d, pad_h, pad_w: (" << problem.pad_d << ", " << problem.pad_h << ", " << problem.pad_w << ")" << std::endl
<< "stride_d, stride_h, stride_w: (" << problem.stride_d << ", " << problem.stride_h << ", " << problem.stride_w << ")" << std::endl
<< "dilation_d, dilation_h, dilation_w: (" << problem.dilation_d << ", " << problem.dilation_h << ", " << problem.dilation_w << ")" << std::endl
<< "split_k_slices: (" << problem.split_k_slices << ") " << std::endl
<< "mode: (" << ((problem.mode==conv::Mode::kConvolution) ? "conv" : "xcross") << ")";
return out;
}
} // namespace conv
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/core_io.h/0 | {
"file_path": "include/cutlass/core_io.h",
"repo_id": "include",
"token_count": 3568
} | 23 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Functor performing elementwise operations used by epilogues.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cute/tensor.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace collective {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Applies an element wise operation to all elements within the fragment
/// and writes it out to destination storage.
///
/// Ways to generalize this:
/// - CTA tile shape
/// - vectorization requirements (GMEM)
/// - vectoriz(able) transform()
///
template <
class StrideC_,
class StrideD_,
class ThreadEpilogueOp_,
class SmemLayout_,
class CopyAtomR2S_,
class TiledCopyS2R_,
class CopyAtomR2G_
>
class Epilogue {
public:
//
// Type Aliases
//
// derived types of output thread level operator
using ThreadEpilogueOp = ThreadEpilogueOp_;
using ElementAccumulator = typename ThreadEpilogueOp::ElementAccumulator;
using ElementCompute = typename ThreadEpilogueOp::ElementCompute;
using ElementScalar = ElementCompute;
using ElementOutput = typename ThreadEpilogueOp::ElementOutput;
using ElementC = typename ThreadEpilogueOp::ElementC;
using StrideC = StrideC_;
using ElementD = typename ThreadEpilogueOp::ElementD;
using StrideD = StrideD_;
using SmemLayout = SmemLayout_;
using CopyAtomR2S = CopyAtomR2S_;
using TiledCopyS2R = TiledCopyS2R_;
using CopyAtomR2G = CopyAtomR2G_;
static const int kOutputAlignment = ThreadEpilogueOp::kCount;
using AlignmentType = typename cute::uint_bit<sizeof_bits<ElementOutput>::value * kOutputAlignment>::type;
static_assert(cute::rank(StrideC{}) == 3, "StrideCD must be rank-3: [M, N, L]");
static_assert(cute::rank(StrideD{}) == 3, "StrideCD must be rank-3: [M, N, L]");
struct SharedStorage
{
cute::array_aligned<ElementAccumulator, cute::cosize_v<SmemLayout>> smem_epilogue;
};
// Host side epilogue arguments
struct Arguments {
typename ThreadEpilogueOp::Params thread{};
ElementC const* ptr_C = nullptr;
StrideC dC{};
ElementD* ptr_D = nullptr;
StrideD dD{};
};
// Device side epilogue params
using Params = Arguments;
//
// Methods
//
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(
[[maybe_unused]] ProblemShape const& _,
Arguments const& args,
[[maybe_unused]] void* workspace) {
return args;
}
template <class ProblemShape>
static size_t
get_workspace_size(ProblemShape const& problem_shape, Arguments const& args) {
return 0;
}
template <class ProblemShape>
static cutlass::Status
initialize_workspace(ProblemShape const& problem_shape, Arguments const& args, void* workspace, cudaStream_t stream,
CudaHostAdapter* cuda_adapter = nullptr) {
return cutlass::Status::kSuccess;
}
template <class ProblemShape>
CUTLASS_HOST_DEVICE static bool
can_implement(
[[maybe_unused]] ProblemShape const& problem_shape,
[[maybe_unused]] Arguments const& args) {
return true;
}
CUTLASS_HOST_DEVICE
Epilogue(Params const& params_)
: params(params_), epilogue_op(params_.thread) { }
CUTLASS_DEVICE
bool
is_source_needed() {
return epilogue_op.is_source_needed();
}
template<
class ProblemShapeMNKL,
class BlockShapeMNK,
class BlockCoordMNKL,
class FrgEngine, class FrgLayout,
class TiledMma,
class ResidueMNK
>
CUTLASS_DEVICE void
operator()(
ProblemShapeMNKL problem_shape_mnkl,
BlockShapeMNK blk_shape_MNK,
BlockCoordMNKL blk_coord_mnkl,
cute::Tensor<FrgEngine,FrgLayout> const& accumulators, // (MMA,MMA_M,MMA_N)
TiledMma tiled_mma,
ResidueMNK residue_mnk,
int thread_idx,
char* smem_buf)
{
using namespace cute;
using X = Underscore;
static_assert(cute::rank(ProblemShapeMNKL{}) == 4, "ProblemShapeMNKL must be rank 4");
static_assert(is_static<BlockShapeMNK>::value, "ThreadBlock tile shape must be static");
static_assert(cute::rank(BlockShapeMNK{}) == 3, "BlockShapeMNK must be rank 3");
static_assert(cute::rank(BlockCoordMNKL{}) == 4, "BlockCoordMNKL must be rank 3");
// synchronizing function for smem reads/writes
#if CUDA_BARRIER_ENABLED
auto synchronize = [] () { cutlass::arch::NamedBarrier::sync(typename TiledCopyS2R::TiledNumThr{}, cutlass::arch::ReservedNamedBarriers::EpilogueBarrier); };
#else
auto synchronize = [] () { __syncthreads(); };
#endif
// Separate out problem shape for convenience
auto M = get<0>(problem_shape_mnkl);
auto N = get<1>(problem_shape_mnkl);
auto L = get<3>(problem_shape_mnkl);
// Represent the full output tensor
Tensor mC_mnl = make_tensor(make_gmem_ptr(params.ptr_C), make_shape(M,N,L), params.dC); // (m,n,l)
Tensor mD_mnl = make_tensor(make_gmem_ptr(params.ptr_D), make_shape(M,N,L), params.dD); // (m,n,l)
Tensor gC_mnl = local_tile(mC_mnl, blk_shape_MNK, make_coord(_,_,_), Step<_1,_1, X>{}); // (BLK_M,BLK_N,m,n,l)
Tensor gD_mnl = local_tile(mD_mnl, blk_shape_MNK, make_coord(_,_,_), Step<_1,_1, X>{}); // (BLK_M,BLK_N,m,n,l)
// Slice to get the tile this CTA is responsible for
auto [m_coord, n_coord, k_coord, l_coord] = blk_coord_mnkl;
Tensor gC = gC_mnl(_,_,m_coord,n_coord,l_coord); // (BLK_M,BLK_N)
Tensor gD = gD_mnl(_,_,m_coord,n_coord,l_coord); // (BLK_M,BLK_N)
// Construct a tensor in SMEM that we can partition for rearranging data
SharedStorage& storage = *reinterpret_cast<SharedStorage*>(smem_buf);
Tensor sC = make_tensor(make_smem_ptr(storage.smem_epilogue.data()), SmemLayout{}); // (SMEM_M,SMEM_N)
// Partition sC to match the accumulator partitioning
auto tiled_r2s = make_tiled_copy_C(CopyAtomR2S{}, tiled_mma);
auto tC = tiled_r2s.get_thread_slice(thread_idx);
Tensor tCaC = tC.retile_S(accumulators); // ((Atom,AtomNum), MMA_M, MMA_N)
Tensor tCsC = tC.partition_D(sC); // ((Atom,AtomNum),PIPE_M,PIPE_N)
// Tile gD and gC by the shape of SmemLayout first
auto tile = make_shape(size<0>(sC), size<1>(sC));
Tensor gCt = flat_divide(gC, tile); // (SMEM_M,SMEM_N,TILE_M,TILE_N)
Tensor gDt = flat_divide(gD, tile); // (SMEM_M,SMEM_N,TILE_M,TILE_N)
// Partition sC, gC, and gD for the output
auto tiled_s2r = TiledCopyS2R{};
auto tD = tiled_s2r.get_thread_slice(thread_idx);
Tensor tDsC = tD.partition_S(sC); // ((Atom,AtomNum),ATOM_M,ATOM_N)
Tensor tDgC = tD.partition_D(gCt); // ((Atom,AtomNum),ATOM_M,ATOM_N,TILE_M,TILE_N)
Tensor tDgD = tD.partition_D(gDt); // ((Atom,AtomNum),ATOM_M,ATOM_N,TILE_M,TILE_N)
// Allocate intermediate registers on the dst tensors
Tensor tDrC = make_tensor<ElementAccumulator>(take<0,3>(shape(tDgC))); // ((Atom,AtomNum),ATOM_M,ATOM_N)
Tensor tDrD = make_tensor<ElementOutput>(shape(tDrC)); // ((Atom,AtomNum),ATOM_M,ATOM_N)
// Repeat the D-partitioning for coordinates and predication
Tensor cD = make_identity_tensor(make_shape(size<0>(gD),size<1>(gD))); // (BLK_M,BLK_N) -> (blk_m,blk_n)
Tensor cDt = flat_divide(cD, tile); // (SMEM_M,SMEM_N,TILE_M,TILE_N)
Tensor tDcD = tD.partition_D(cDt); // ((Atom,AtomNum),ATOM_M,ATOM_N,TILE_M,TILE_N)
CUTE_STATIC_ASSERT(size<1>(tCaC) % size<3>(tDgC) == 0); // TILE_M divides MMA_M
CUTE_STATIC_ASSERT(size<2>(tCaC) % size<4>(tDgC) == 0); // TILE_N divides MMA_N
CUTE_STATIC_ASSERT(typename TiledCopyS2R::TiledNumThr{} == size<0>(typename TiledMma::AtomLayoutC_TV{}));
#if 0
if (thread_idx == 0 && m_coord == 0 && n_coord == 0) {
print("aC : "); print(accumulators.layout()); print("\n");
print("gC : "); print(gC.layout()); print("\n");
print("gD : "); print(gD.layout()); print("\n");
print("sC : "); print(sC.layout()); print("\n");
print("\n");
print("tCsC : "); print(tCsC.layout()); print("\n");
print("tCaC : "); print(tCaC.layout()); print("\n");
print("\n");
print("gDt : "); print(gDt.layout()); print("\n");
print("tDsC : "); print(tDsC.layout()); print("\n");
print("tDrC : "); print(tDrC.layout()); print("\n");
print("\n");
print("tDrD : "); print(tDrD.layout()); print("\n");
print("tDgC : "); print(tDgC.layout()); print("\n");
print("tDgD : "); print(tDgD.layout()); print("\n");
print("\n");
}
#endif
// For each tiling needed for SmemLayout to cover shape(gD)
CUTLASS_PRAGMA_UNROLL
for (int step_m = 0; step_m < size<2>(cDt); ++step_m)
{
CUTLASS_PRAGMA_UNROLL
for (int step_n = 0; step_n < size<3>(cDt); ++step_n)
{
// Step 1. Copy to SMEM
CUTLASS_PRAGMA_UNROLL
for (int pipe_m = 0; pipe_m < size<1>(tCsC); ++pipe_m) {
CUTLASS_PRAGMA_UNROLL
for (int pipe_n = 0; pipe_n < size<2>(tCsC); ++pipe_n) {
int mma_m = step_m * size<1>(tCsC) + pipe_m;
int mma_n = step_n * size<2>(tCsC) + pipe_n;
copy(tiled_r2s, tCaC(_,mma_m,mma_n), tCsC(_,pipe_m,pipe_n));
}
}
// Step 2. Wait for SMEM writes to complete
synchronize();
// Step 3. Copy from SMEM into a fragment
copy(tiled_s2r, tDsC, tDrC);
// Step 4. Wait for SMEM reads to complete
synchronize();
Tensor tDgDmn = tDgD(_,_,_,step_m,step_n);
Tensor tDcDmn = tDcD(_,_,_,step_m,step_n);
if (epilogue_op.is_source_needed()) {
// source is needed
Tensor tDgCmn = tDgC(_,_,_,step_m,step_n);
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < size<1>(tDgDmn); ++m)
{
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < size<2>(tDgDmn); ++n)
{
// Predication
if (get<0>(tDcDmn(0,m,n)) < get<0>(residue_mnk) &&
get<1>(tDcDmn(0,m,n)) < get<1>(residue_mnk))
{
// Step 5. Elementwise operation with conversion
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < size<0>(tDrC); ++i) {
tDrD(i,m,n) = epilogue_op(tDrC(i,m,n), tDgCmn(i,m,n));
}
// Step 6. Copy to GMEM
copy(CopyAtomR2G{}, tDrD(_,m,n), tDgDmn(_,m,n));
}
}
}
}
else {
// source is not needed, avoid load and lift compute
// Step 5. Elementwise operation with conversion
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < size(tDrC); ++i) {
tDrD(i) = epilogue_op(tDrC(i));
}
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < size<1>(tDgDmn); ++m)
{
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < size<2>(tDgDmn); ++n)
{
// Predication
if (get<0>(tDcDmn(0,m,n)) < get<0>(residue_mnk) &&
get<1>(tDcDmn(0,m,n)) < get<1>(residue_mnk))
{
// Step 6. Copy to GMEM
copy(CopyAtomR2G{}, tDrD(_,m,n), tDgDmn(_,m,n));
}
}
}
}
}
}
}
private:
Params params;
ThreadEpilogueOp epilogue_op;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace collective
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/collective/sm70_epilogue_vectorized.hpp/0 | {
"file_path": "include/cutlass/epilogue/collective/sm70_epilogue_vectorized.hpp",
"repo_id": "include",
"token_count": 6352
} | 24 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Functor performing linear combination operations used by epilogues.
*/
#pragma once
#include <cuda_fp16.h>
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/epilogue/thread/activation.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <typename Element, int ElementsPerAccess>
struct ArrayMaximum {
CUTLASS_HOST_DEVICE
Array<Element, ElementsPerAccess> operator()(
Array<Element, ElementsPerAccess> const &lhs,
Array<Element, ElementsPerAccess> const &rhs) const {
Array<Element, ElementsPerAccess> result;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < ElementsPerAccess; ++i) {
result[i] = platform::max(lhs[i].get(), rhs[i]);
}
return result;
}
CUTLASS_HOST_DEVICE
Array<Element, ElementsPerAccess> operator()(
Array<Element, ElementsPerAccess> const &lhs,
Element rhs) const {
Array<Element, ElementsPerAccess> result;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < ElementsPerAccess; ++i) {
result[i] = platform::max(lhs[i].get(), rhs);
}
return result;
}
};
/// Partial specialization: Element=float
template <int ElementsPerAccess>
struct ArrayMaximum<float, ElementsPerAccess> {
CUTLASS_HOST_DEVICE
Array<float, ElementsPerAccess> operator()(
Array<float, ElementsPerAccess> const &lhs,
Array<float, ElementsPerAccess> const &rhs) const {
Array<float, ElementsPerAccess> result;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < ElementsPerAccess; ++i) {
result[i] = fmax(lhs[i], rhs[i]);
}
return result;
}
CUTLASS_HOST_DEVICE
Array<float, ElementsPerAccess> operator()(
Array<float, ElementsPerAccess> const &lhs,
float rhs) const {
Array<float, ElementsPerAccess> result;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < ElementsPerAccess; ++i) {
result[i] = fmax(lhs[i], rhs);
}
return result;
}
};
/// Partial specialization: Element=half
template <int ElementsPerAccess>
struct ArrayMaximum<half_t, ElementsPerAccess> {
CUTLASS_DEVICE
Array<half_t, ElementsPerAccess> operator()(
Array<half_t, ElementsPerAccess> const &lhs,
Array<half_t, ElementsPerAccess> const &rhs) const {
Array<half_t, ElementsPerAccess> result;
#if __CUDA_ARCH__ >= 800
int const kVectorCount = ElementsPerAccess / 2;
__half2 const *lhs_ptr = reinterpret_cast<__half2 const *>(lhs.raw_data());
__half2 const *rhs_ptr = reinterpret_cast<__half2 const *>(rhs.raw_data());
__half2 *res_ptr = reinterpret_cast<__half2 *>(result.raw_data());
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kVectorCount; ++i) {
res_ptr[i] = __hmax2(lhs_ptr[i], rhs_ptr[i]);
}
static_assert(!(ElementsPerAccess % 2), "Output array must be divisible by vector length.");
#else
__half const *lhs_ptr = reinterpret_cast<__half const *>(lhs.raw_data());
__half const *rhs_ptr = reinterpret_cast<__half const *>(rhs.raw_data());
__half *res_ptr = reinterpret_cast<__half *>(result.raw_data());
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < ElementsPerAccess; ++i) {
res_ptr[i] = ((lhs_ptr[i] < rhs_ptr[i]) ? rhs_ptr[i] : lhs_ptr[i]);
}
#endif
return result;
}
CUTLASS_DEVICE
Array<half_t, ElementsPerAccess> operator()(
Array<half_t, ElementsPerAccess> const &lhs,
half_t const &rhs) const {
Array<half_t, ElementsPerAccess> result;
#if __CUDA_ARCH__ >= 800
int const kVectorCount = ElementsPerAccess / 2;
__half rhs_raw = reinterpret_cast<__half const &>(rhs);
__half2 rhs_pair = __half2half2(rhs_raw);
__half2 const *lhs_ptr = reinterpret_cast<__half2 const *>(lhs.raw_data());
__half2 *res_ptr = reinterpret_cast<__half2 *>(result.raw_data());
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kVectorCount; ++i) {
res_ptr[i] = __hmax2(lhs_ptr[i], rhs_pair);
}
static_assert(!(ElementsPerAccess % 2), "Output array must be divisible by vector length.");
#else
__half const *lhs_ptr = reinterpret_cast<__half const *>(lhs.raw_data());
__half const rhs_raw = reinterpret_cast<__half const &>(rhs);
__half *res_ptr = reinterpret_cast<__half *>(result.raw_data());
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < ElementsPerAccess; ++i) {
res_ptr[i] = ((lhs_ptr[i] < rhs_raw) ? rhs_raw : lhs_ptr[i]);
}
#endif
return result;
}
};
/// Partial specialization: Element=bfloat16_t
template <int ElementsPerAccess>
struct ArrayMaximum<bfloat16_t, ElementsPerAccess> {
using NvType = __nv_bfloat16;
using NvTypeV2 = __nv_bfloat162;
CUTLASS_DEVICE
Array<bfloat16_t, ElementsPerAccess> operator()(
Array<bfloat16_t, ElementsPerAccess> const &lhs,
Array<bfloat16_t, ElementsPerAccess> const &rhs) const {
Array<bfloat16_t, ElementsPerAccess> result;
#if __CUDA_ARCH__ >= 800
int const kVectorCount = ElementsPerAccess / 2;
NvTypeV2 const *lhs_ptr = reinterpret_cast<NvTypeV2 const *>(lhs.raw_data());
NvTypeV2 const *rhs_ptr = reinterpret_cast<NvTypeV2 const *>(rhs.raw_data());
NvTypeV2 *res_ptr = reinterpret_cast<NvTypeV2 *>(result.raw_data());
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kVectorCount; ++i) {
res_ptr[i] = __hmax2(lhs_ptr[i], rhs_ptr[i]);
}
#else
NvType const *lhs_ptr = reinterpret_cast<NvType const *>(lhs.raw_data());
NvType const *rhs_ptr = reinterpret_cast<NvType const *>(rhs.raw_data());
NvType *res_ptr = reinterpret_cast<NvType *>(result.raw_data());
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < ElementsPerAccess; ++i) {
res_ptr[i] = ((lhs_ptr[i] < rhs_ptr[i]) ? rhs_ptr[i] : lhs_ptr[i]);
}
#endif
return result;
}
CUTLASS_DEVICE
Array<bfloat16_t, ElementsPerAccess> operator()(
Array<bfloat16_t, ElementsPerAccess> const &lhs,
bfloat16_t rhs) const {
Array<bfloat16_t, ElementsPerAccess> result;
#if __CUDA_ARCH__ >= 800
int const kVectorCount = ElementsPerAccess / 2;
NvType rhs_raw = reinterpret_cast<NvType const &>(rhs);
NvTypeV2 rhs_pair = __bfloat162bfloat162(rhs_raw);
NvTypeV2 const *lhs_ptr = reinterpret_cast<NvTypeV2 const *>(lhs.raw_data());
NvTypeV2 *res_ptr = reinterpret_cast<NvTypeV2 *>(result.raw_data());
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kVectorCount; ++i) {
res_ptr[i] = __hmax2(lhs_ptr[i], rhs_pair);
}
static_assert(!(ElementsPerAccess % 2), "Output array must be divisible by vector length.");
#else
NvType const *lhs_ptr = reinterpret_cast<NvType const *>(lhs.raw_data());
NvType const rhs_raw = reinterpret_cast<NvType const &>(rhs);
NvType *res_ptr = reinterpret_cast<NvType *>(result.raw_data());
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < ElementsPerAccess; ++i) {
res_ptr[i] = ((lhs_ptr[i] < rhs_raw) ? rhs_raw : lhs_ptr[i]);
}
#endif
return result;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Element, int ElementsPerAccess>
struct ReluConditional {
CUTLASS_HOST_DEVICE
void operator()(
bool conditional[],
Array<Element, ElementsPerAccess> const &fragment,
Element threshold) const {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < ElementsPerAccess; ++i) {
conditional[i] = !(fragment[i] < threshold);
}
}
};
template <int ElementsPerAccess>
struct ReluConditional<half_t, ElementsPerAccess> {
CUTLASS_DEVICE
void operator()(
bool conditional[],
Array<half_t, ElementsPerAccess> const &fragment,
half_t threshold) const {
__half y = reinterpret_cast<__half const &>(threshold);
__half const *x = reinterpret_cast<__half const *>(fragment.raw_data());
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < ElementsPerAccess; ++i) {
conditional[i] = !__hlt(x[i], y);
}
}
};
template <int ElementsPerAccess>
struct ReluConditional<bfloat16_t, ElementsPerAccess> {
CUTLASS_DEVICE
void operator()(
bool conditional[],
Array<bfloat16_t, ElementsPerAccess> const &fragment,
bfloat16_t threshold) const {
__nv_bfloat16 y = reinterpret_cast<__nv_bfloat16 const &>(threshold);
__nv_bfloat16 const *x = reinterpret_cast<__nv_bfloat16 const *>(fragment.raw_data());
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < ElementsPerAccess; ++i) {
conditional[i] = !__hlt(x[i], y);
}
}
};
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
/// This is a partial specialization for fused Bias and ReLU. It supports the option of packing
/// ReLU conditionals in a bit vector that may be used by backwards passes as an optimization.
///
/// This class can only be used with cutlass::epilogue::threadblock::EpilogueWithBroadcast<>.
///
/// This base class is meant to define the concept required of the
/// EpilogueWithBroadcast::OutputOp
template <
typename ElementC_,
typename ElementAccumulator_,
typename ElementCompute_,
typename ElementZ_,
int ElementsPerAccess,
bool StoreT_ = true,
typename ElementVector_ = ElementC_
>
class LinearCombinationBiasRelu {
public:
using ElementOutput = ElementC_;
using ElementC = ElementC_;
using ElementAccumulator = ElementAccumulator_;
using ElementCompute = ElementCompute_;
using ElementZ = ElementZ_;
using ElementVector = ElementVector_;
using ElementT = uint1b_t;
static int const kElementsPerAccess = ElementsPerAccess;
static int const kCount = kElementsPerAccess;
using ElementwiseOp = ReLu<ElementCompute>;
using BinaryOp = plus<ElementCompute>;
// Indicates that this epilogue applies only one binary operation
static bool const kIsSingleSource = true;
using FragmentAccumulator = Array<ElementAccumulator, kElementsPerAccess>;
using FragmentCompute = Array<ElementCompute, kElementsPerAccess>;
using FragmentC = Array<ElementOutput, kElementsPerAccess>;
using FragmentZ = Array<ElementZ, kElementsPerAccess>;
using FragmentT = Array<ElementT, kElementsPerAccess>;
/// If true, the 'Z' tensor is stored
static bool const kStoreZ = true;
/// If true, the 'T' tensor is stored
static bool const kStoreT = StoreT_;
/// Host-constructable parameters structure
struct Params {
ElementCompute alpha; ///< scales accumulators
ElementCompute beta; ///< scales source tensor
ElementCompute const *alpha_ptr; ///< pointer to accumulator scalar - if not null, loads it from memory
ElementCompute const *beta_ptr; ///< pointer to source scalar - if not null, loads it from memory
ElementZ threshold; ///< ReLu threshold
//
// Methods
//
//
// Methods
//
CUTLASS_HOST_DEVICE
Params():
alpha(ElementCompute(1)),
beta(ElementCompute()),
alpha_ptr(nullptr),
beta_ptr(nullptr),
threshold(ElementCompute()) { }
CUTLASS_HOST_DEVICE
Params(
ElementCompute alpha,
ElementCompute beta,
ElementCompute threshold_ = ElementCompute()
):
alpha(alpha), beta(beta), alpha_ptr(nullptr), beta_ptr(nullptr) {
NumericConverter<ElementZ, ElementCompute> convert_threshold;
threshold = convert_threshold(threshold_);
}
CUTLASS_HOST_DEVICE
Params(
ElementCompute alpha
): alpha(alpha), beta(0), alpha_ptr(nullptr), beta_ptr(nullptr), threshold(ElementZ()) {
}
CUTLASS_HOST_DEVICE
Params(
ElementCompute const *alpha_ptr,
ElementCompute const *beta_ptr,
ElementCompute threshold_ = ElementCompute()
): alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) {
NumericConverter<ElementZ, ElementCompute> convert_threshold;
threshold = convert_threshold(threshold_);
}
CUTLASS_HOST_DEVICE
Params(
ElementCompute const *alpha_ptr
): alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(nullptr), threshold(ElementZ()) {
}
};
private:
//
// Data members
//
ElementCompute alpha_;
ElementCompute beta_;
ElementZ threshold_;
public:
//
// Methods
//
/// Constructor from Params
CUTLASS_HOST_DEVICE
LinearCombinationBiasRelu(Params const ¶ms) {
alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha);
beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta);
threshold_ = params.threshold;
}
/// Returns true if source is needed
CUTLASS_HOST_DEVICE
bool is_source_needed() const {
return beta_ != ElementCompute(0);
}
/// Functionally required for serial reduction in the epilogue
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {
if (k_partition) {
beta_ = ElementCompute(1);
}
if (k_partition != k_partition_count - 1) {
// set to NaN to make ReLU no-op for all except last k partitions
int64_t allones = -1;
threshold_ = reinterpret_cast<ElementZ const &>(allones);
}
}
/// Applies the operation when is_source_needed() is true
CUTLASS_HOST_DEVICE
void operator()(
FragmentZ &frag_Z,
FragmentT &frag_T,
FragmentAccumulator const &AB,
FragmentC const &frag_C,
FragmentCompute const &V) const {
BinaryOp binary_op;
FragmentCompute tmp_Accum = NumericArrayConverter<ElementCompute, ElementAccumulator, kElementsPerAccess>()(AB);
FragmentCompute tmp_C = NumericArrayConverter<ElementCompute, ElementC, kElementsPerAccess>()(frag_C);
FragmentCompute result_Z;
bool conditions[kElementsPerAccess];
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kElementsPerAccess; ++i) {
ElementCompute z = alpha_ * tmp_Accum[i];
z += beta_ * tmp_C[i];
z = binary_op(z, V[i]);
result_Z[i] = z;
}
NumericArrayConverter<ElementZ, ElementCompute, kElementsPerAccess> convert_z;
frag_Z = convert_z(result_Z);
//
// Compute condition
//
detail::ReluConditional<ElementZ, kElementsPerAccess> relu_conditional;
relu_conditional(conditions, frag_Z, threshold_);
detail::ArrayMaximum<ElementZ, kElementsPerAccess> maximum_op;
frag_Z = maximum_op(frag_Z, threshold_);
if (kStoreT) {
PackPredicates<kElementsPerAccess> pack_predicates;
frag_T = pack_predicates(conditions);
}
}
/// Applies the operation when is_source_needed() is false
CUTLASS_HOST_DEVICE
void operator()(
FragmentZ &frag_Z,
FragmentT &frag_T,
FragmentAccumulator const &AB,
FragmentCompute const &V) const {
BinaryOp binary_op;
FragmentCompute tmp_Accum = NumericArrayConverter<ElementCompute, ElementAccumulator, kElementsPerAccess>()(AB);
FragmentCompute result_Z;
bool conditions[kElementsPerAccess];
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kElementsPerAccess; ++i) {
ElementCompute z = binary_op(alpha_ * tmp_Accum[i], V[i]);
result_Z[i] = z;
}
NumericArrayConverter<ElementZ, ElementCompute, kElementsPerAccess> convert_z;
frag_Z = convert_z(result_Z);
//
// Compute condition
//
detail::ReluConditional<ElementZ, kElementsPerAccess> relu_conditional;
relu_conditional(conditions, frag_Z, threshold_);
detail::ArrayMaximum<ElementZ, kElementsPerAccess> maximum_op;
frag_Z = maximum_op(frag_Z, threshold_);
//
// Compute conditions
//
//
// Store
//
if (kStoreT) {
PackPredicates<kElementsPerAccess> pack_predicates;
frag_T = pack_predicates(conditions);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/thread/linear_combination_bias_relu.h/0 | {
"file_path": "include/cutlass/epilogue/thread/linear_combination_bias_relu.h",
"repo_id": "include",
"token_count": 6788
} | 25 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Functor performing linear combination operation, bias addition, and tensor-tensor
elementwise operations
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/numeric_types.h"
#include "cutlass/epilogue/thread/activation.h"
#include "cutlass/epilogue/thread/detail.hpp"
#include "cutlass/epilogue/thread/scale_type.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace thread {
namespace detail {
/// Returns whether a source operand is needed for a combination of binary operation and scale
/// type. Simple specialized checks are made for cases in which 0 is an identity element of
/// the binary operation.
template <class BinaryOp, class ElementCompute, ScaleType::Kind Scale>
CUTLASS_HOST_DEVICE
bool is_binary_op_source_needed(ElementCompute scale) {
if constexpr (cute::is_same_v<BinaryOp, NoOp<ElementCompute>>) {
return false;
}
else if constexpr (cute::is_same_v<BinaryOp, plus<ElementCompute>> || cute::is_same_v<BinaryOp, minus<ElementCompute>>) {
// Cases for binary operators for which 0 is an identity element
if constexpr (Scale == ScaleType::NoBetaScaling) return true;
if constexpr (Scale == ScaleType::OnlyAlphaScaling) return false;
if constexpr (Scale == ScaleType::Nothing) return false;
return scale != ElementCompute(0);
}
return true;
}
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
/** Compute a tensor-tensor broadcast epilogue.
*
* @param ElementOutput_ Data type used to load and store tensors
* @param ElementAccumulator_ Accumulator data type
* @param ElementCompute_ Data type used to compute linear combination
* @param ElementBias_ Data type of Bias elements
* @param ActivationFunctor_ Fused Activation
* @param BinaryOp0_ Binary operation to perform on O0 and C0. detail::NoOp means no operation
* @param BinaryOp1_ Binary operation to perform on O1 and C1. detail::NoOp means no operation
* @param UnaryOp_ Unary operation to perform on final result
* @param Scale Controls the type of Alpha and Beta scaling to perform
* @param Round How values should be rounded in conversions
* @param ElementSource_ Data type used for source operands
*
* Computes the following:
* O0 = alpha * accumulator + bias
* O1 = BinaryOp0(O0, beta * C0)
* O2 = BinaryOp1(O1, beta * C1)
* D = UnaryOp(O2)
*/
template <
class ElementOutput_,
class ElementAccumulator_ = ElementOutput_,
class ElementCompute_ = ElementOutput_,
class ElementBias_ = ElementCompute_,
template <class T> class ActivationFunctor_ = Identity,
template <class T> class BinaryOp0_ = plus,
template <class T> class BinaryOp1_ = detail::NoOp,
template <class T> class UnaryOp_ = Identity,
ScaleType::Kind Scale = ScaleType::Default,
FloatRoundStyle Round = FloatRoundStyle::round_to_nearest,
class ElementSource_ = ElementOutput_
>
class LinearCombinationTensorBroadcast {
public:
using ElementOutput = ElementOutput_;
using ElementAccumulator = ElementAccumulator_;
using ElementCompute = ElementCompute_;
using ElementScalar = ElementCompute;
using ElementBias = ElementBias_;
using ElementC = ElementSource_;
using ElementD = ElementOutput_;
using ElementScalingFactor = ElementAccumulator_;
using UnaryOp = UnaryOp_<ElementCompute>;
using BinaryOp0 = BinaryOp0_<ElementCompute>;
using BinaryOp1 = BinaryOp1_<ElementCompute>;
using ActivationFunctor = ActivationFunctor_<ElementCompute>;
static constexpr int kCount = 1;
static constexpr ScaleType::Kind kScale = Scale;
using FragmentOutput = Array<ElementOutput, kCount>;
using FragmentAccumulator = Array<ElementAccumulator, kCount>;
using ComputeFragment = Array<ElementCompute, kCount>;
using FragmentBias = Array<ElementBias, kCount>;
static constexpr FloatRoundStyle kRound = Round;
using NoOpType = detail::NoOp<ElementCompute>;
static constexpr bool IsBinaryOp0Enabled = !cute::is_same_v<BinaryOp0, NoOpType>;
static constexpr bool IsBinaryOp1Enabled = !cute::is_same_v<BinaryOp1, NoOpType>;
static constexpr bool IsUnaryOpEnabled = !cute::is_same_v<UnaryOp, NoOpType> && !cute::is_same_v<UnaryOp, Identity<ElementCompute>>;
/// Host-constructable parameters structure
struct Params {
ElementCompute alpha{}; ///< scales accumulators
ElementCompute beta{}; ///< scales source tensor
ElementCompute const* alpha_ptr = nullptr; ///< pointer to accumulator scalar - if not null, loads it from memory
ElementCompute const* beta_ptr = nullptr; ///< pointer to source scalar - if not null, loads it from memory
//
// Methods
//
Params() = default;
CUTLASS_HOST_DEVICE
Params(ElementCompute const* alpha_ptr, ElementCompute const* beta_ptr)
: alpha_ptr(alpha_ptr),
beta_ptr(beta_ptr) {}
CUTLASS_HOST_DEVICE
Params(ElementCompute const* alpha_ptr)
: alpha_ptr(alpha_ptr) {}
CUTLASS_HOST_DEVICE
Params(ElementCompute alpha,
ElementCompute beta)
: alpha(alpha),
beta(beta) {}
};
private:
//
// Data members
//
ElementCompute alpha_;
ElementCompute beta_;
public:
/// Constructs the function object, possibly loading from pointers in host memory
CUTLASS_HOST_DEVICE
LinearCombinationTensorBroadcast(Params const& params)
: alpha_(params.alpha_ptr ? *params.alpha_ptr : params.alpha),
beta_(params.beta_ptr ? *params.beta_ptr : params.beta) {}
/// Returns true if source 0 is needed
CUTLASS_HOST_DEVICE
bool is_source0_needed() const {
return detail::is_binary_op_source_needed<BinaryOp0, ElementCompute, Scale>(beta_);
}
/// Returns true if source 1 is needed
CUTLASS_HOST_DEVICE
bool is_source1_needed() const {
return detail::is_binary_op_source_needed<BinaryOp1, ElementCompute, Scale>(beta_);
}
//
// Specialization for scalar
//
CUTLASS_HOST_DEVICE
ElementD operator()(ElementAccumulator const accumulator, ElementC const source0, ElementC source1, ElementBias const bias) {
// Convert everything to Compute type, do compute, and then store to output type
NumericConverter<ElementCompute, ElementAccumulator, Round> accumulator_converter;
NumericConverter<ElementCompute, ElementBias, Round> bias_converter;
NumericConverter<ElementCompute, ElementC, Round> source_converter;
NumericConverter<ElementD, ElementCompute, Round> destination_converter;
ActivationFunctor act;
multiplies<ElementCompute> mul;
multiply_add<ElementCompute> madd;
ElementCompute intermediate = accumulator_converter(accumulator);
intermediate = madd(alpha_, intermediate, bias_converter(bias));
intermediate = act(intermediate);
// Apply BinaryOp0, if needed
if constexpr (IsBinaryOp0Enabled) {
BinaryOp0 bin0;
ElementCompute converted_source = source_converter(source0);
intermediate = bin0(intermediate, mul(beta_, converted_source));
}
// Apply BinaryOp1, if needed
if constexpr (IsBinaryOp1Enabled) {
BinaryOp1 bin1;
ElementCompute converted_source = source_converter(source1);
intermediate = bin1(intermediate, mul(beta_, converted_source));
}
// Apply UnaryOp, if needed
if constexpr (IsUnaryOpEnabled) {
UnaryOp unary;
intermediate = unary(intermediate);
}
return destination_converter(intermediate);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/thread/linear_combination_tensor_broadcast.hpp/0 | {
"file_path": "include/cutlass/epilogue/thread/linear_combination_tensor_broadcast.hpp",
"repo_id": "include",
"token_count": 3027
} | 26 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#include <cuda/std/utility>
#else
#include <assert.h>
#include <utility>
#endif
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/functional.h"
#include "cutlass/fast_math.h"
#include "cutlass/layout/vector.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/transform/threadblock/regular_tile_iterator.h"
#include "cutlass/epilogue/threadblock/epilogue_base.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
#include "cutlass/numeric_types.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// This base class is meant to define the concept required of the
/// EpilogueWithBroadcast::OutputOp
template <
typename ElementC_,
typename ElementAccumulator_,
typename ElementCompute_,
typename ElementZ_,
typename ElementT_,
int ElementsPerAccess,
bool StoreZ = true,
bool StoreT = true
>
struct EpilogueWithBroadcastOpBase {
using ElementOutput = ElementC_;
using ElementAccumulator = ElementAccumulator_;
using ElementCompute = ElementCompute_;
using ElementZ = ElementZ_;
using ElementT = ElementT_;
static int const kElementsPerAccess = ElementsPerAccess;
using FragmentAccumulator = Array<ElementAccumulator, kElementsPerAccess>;
using FragmentCompute = Array<ElementCompute, kElementsPerAccess>;
using FragmentC = Array<ElementOutput, kElementsPerAccess>;
using FragmentZ = Array<ElementZ, kElementsPerAccess>;
using FragmentT = Array<ElementT, kElementsPerAccess>;
/// If true, the 'Z' tensor is stored
static bool const kStoreZ = StoreZ;
/// If true, the 'T' tensor is stored
static bool const kStoreT = StoreT;
/// Parameters structure - required
struct Params { };
//
// Methods
//
/// Constructor from Params
EpilogueWithBroadcastOpBase(Params const ¶ms_) { }
/// Determine if the source is needed. May return false if
bool is_source_needed() const {
return true;
}
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) { }
/// Applies the operation when is_source_needed() is true
CUTLASS_HOST_DEVICE
void operator()(
FragmentZ &frag_Z,
FragmentT &frag_T,
FragmentAccumulator const &AB,
FragmentC const &frag_C1,
FragmentC const &frag_C2,
FragmentCompute const &V) const {
}
/// Applies the operation when is_source_needed() is false
CUTLASS_HOST_DEVICE
void operator()(
FragmentZ &frag_Z,
FragmentT &frag_T,
FragmentAccumulator const &AB,
FragmentCompute const &V) const {
}
};
////////////////////////////////////////////////////////////////////////////////
/// Epilogue operator with bias vector broadcast over columns.
///
/// Computes the following:
///
///
/// Z, T = OutputOp(AB, C, Broadcast)
///
/// if (ElementwiseOp::kStoreZ) {
/// store(converted_u);
/// }
///
/// if (ElementwiseOp::kStoreT) {
/// store(v);
/// }
///
template <
typename Shape_, ///< Shape of threadblock tile (concept: GemmShape)
typename WarpMmaOperator_, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp)
int PartitionsK, ///< Number of partitions of the K dimension
typename OutputTileIterator_, ///< Tile iterator reading and writing output tensors (z)
typename TensorTileIterator_, ///< Additional tile iterator for tensor-valued operands (t)
typename ElementVector_, ///< Pointer to broadcast vector
typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators
typename WarpTileIterator_, ///< Warp-scoped tile iterator writing accumulators to SMEM
typename SharedLoadIterator_, ///< Threadblock-scoped tile iterator loading from SMEM
typename OutputOp_, ///< Output operator - concept is EpilogueWithBroadcastOp
typename Padding_, ///< Padding added to SMEM allocation to avoid bank conflicts (concept: MatrixShape)
int FragmentsPerPartition = 1, ///< Used to coarsten the epilogue granularity
int IterationsUnroll = ///< Used to reduce binary size when epilogue op is large
(!IsEpilogueFunctorHeavy<OutputOp_>::value),
bool IsSingleSource = OutputOp_::kIsSingleSource
>
class EpilogueWithBroadcast;
template <
typename Shape_,
typename WarpMmaOperator_,
int PartitionsK,
typename OutputTileIterator_,
typename TensorTileIterator_,
typename ElementVector_,
typename AccumulatorFragmentIterator_,
typename WarpTileIterator_,
typename SharedLoadIterator_,
typename OutputOp_,
typename Padding_,
int FragmentsPerPartition,
int IterationsUnroll
>
class EpilogueWithBroadcast<
Shape_,
WarpMmaOperator_,
PartitionsK,
OutputTileIterator_,
TensorTileIterator_,
ElementVector_,
AccumulatorFragmentIterator_,
WarpTileIterator_,
SharedLoadIterator_,
OutputOp_,
Padding_,
FragmentsPerPartition,
IterationsUnroll,
false
> :
public EpilogueBase<
Shape_,
typename WarpMmaOperator_::Shape,
PartitionsK,
AccumulatorFragmentIterator_,
WarpTileIterator_,
Padding_,
FragmentsPerPartition> {
public:
using Base = EpilogueBase<
Shape_,
typename WarpMmaOperator_::Shape,
PartitionsK,
AccumulatorFragmentIterator_,
WarpTileIterator_,
Padding_,
FragmentsPerPartition>;
static bool const kIsSingleSource = false;
using Shape = Shape_;
using WarpMmaOperator = WarpMmaOperator_;
static int const kPartitionsK = PartitionsK;
using OutputTileIterator = OutputTileIterator_;
using TensorTileIterator = TensorTileIterator_;
using ElementVector = ElementVector_;
using AccumulatorFragmentIterator = AccumulatorFragmentIterator_;
using WarpTileIterator = WarpTileIterator_;
using SharedLoadIterator = SharedLoadIterator_;
using OutputOp = OutputOp_;
using Padding = Padding_;
using Layout = layout::RowMajor;
using LongIndex = typename Layout::LongIndex;
/// The complete warp-level accumulator tile
using AccumulatorTile = typename Base::AccumulatorTile;
/// Accumulator element
using ElementAccumulator = typename WarpTileIterator::Element;
/// Compute data type produced by the output op
using ElementCompute = typename OutputOp::ElementCompute;
/// Compute fragment
using FragmentCompute = Array<ElementCompute, OutputTileIterator::Fragment::kElements>;
/// Thread map used by output tile iterators
using ThreadMap = typename OutputTileIterator::ThreadMap;
/// Fragment object used to store the broadcast values
using BroadcastFragment = Array<
ElementCompute,
ThreadMap::Iterations::kColumn * ThreadMap::kElementsPerAccess>;
/// Output element
using ElementOutput = typename OutputTileIterator::Element;
/// Data type of additional tensor
using ElementTensor = typename TensorTileIterator::Element;
/// Output access size
static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess;
/// Tensor reference to destination tensor
using TensorRef = typename OutputTileIterator::TensorRef;
/// Tensor reference to sync tensor
using SyncTensorRef = typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>;
/// Const tensor reference to source tensor
using ConstTensorRef = typename OutputTileIterator::ConstTensorRef;
/// Array type used to output
using OutputAccessType = Array<
typename OutputTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
/// Array type used by output functor
using AccumulatorAccessType = Array<typename WarpTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
/// Array type used by output functor
using ComputeAccessType = Array<ElementCompute, OutputTileIterator::kElementsPerAccess>;
/// Tensor access type
using TensorAccessType = Array<ElementTensor, OutputTileIterator::kElementsPerAccess>;
/// Number of warps
using WarpCount = typename Base::WarpCount;
/// Shared memory allocation from epilogue base class
using BaseSharedStorage = typename Base::SharedStorage;
static int constexpr kSmemTiles = Base::kFragmentsPerIteration > 1 ? Base::kFragmentsPerIteration : kPartitionsK;
static int constexpr kSmemPointerOffset = Base::SharedStorage::StorageShape::kCount / kSmemTiles;
/// Used for the broadcast
struct BroadcastDetail {
/// Number of threads per warp
static int const kWarpSize = 32;
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
/// Number of distinct scalar column indices handled by each thread
static int const kColumnsPerThread = ThreadMap::Iterations::kColumn * ThreadMap::kElementsPerAccess;
/// Number of distinct scalar row indices handled by each thread
static int const kRowsPerThread = ThreadMap::Iterations::kCount / ThreadMap::Iterations::kColumn;
/// Number of threads per threadblock
static int const kThreadCount = kWarpSize * WarpCount::kCount;
/// Number of distinct threads per row of output tile
static int const kThreadsPerRow = (Shape::kN / kColumnsPerThread);
/// Number of distinct threads which must be reduced during the final reduction phase within the threadblock.
static int const kThreadRows = kThreadCount / kThreadsPerRow;
/// I'm not sure what I meant here.
static int const kThreadAccessesPerRow = const_max(1, (Shape::kN + kThreadCount - 1) / kThreadCount);
/// Shape of the shared memory allocation for the epilogue
using StorageShape = MatrixShape<
kThreadRows,
Shape::kN
>;
/// Debug printing
CUTLASS_DEVICE
static void print() {
#if 0
printf("BroadcastDetail {\n");
printf(
" kColumnsPerThread: %d\nkRowsPerThread: %d\n,kThreadCount: %d\nkThreadsPerRow: %d\n"
"kThreadRows: %d\nThreadAccessesPerRow: %d\nStorageShape: %d x %d (count: %d)\n",
kColumnsPerThread,
kRowsPerThread,
kThreadCount,
kThreadsPerRow,
kThreadRows,
kThreadAccessesPerRow,
StorageShape::kRow,
StorageShape::kColumn,
StorageShape::kCount
);
printf("};\n");
#endif
}
};
/// Shared storage structure (shadows base) with additional SMEM buffer for reduction
struct SharedStorage {
union {
BaseSharedStorage base;
};
CUTLASS_HOST_DEVICE
SharedStorage() { }
};
public:
static_assert(SharedLoadIterator::Fragment::kElements == OutputTileIterator::Fragment::kElements,
"Mismatch between shared load iterator and output tile iterator.");
static_assert(OutputTileIterator::kElementsPerAccess, "OutputTileIterator::kElementsPerAccess must not be zero.");
static_assert(!(OutputTileIterator::Fragment::kElements % OutputTileIterator::kElementsPerAccess),
"Divisibility");
private:
/// Loads fragment from shared memory aligned with output tensor
SharedLoadIterator shared_load_iterator_;
/// Thread index within the threadblock
int thread_idx_;
public:
/// Constructor
CUTLASS_DEVICE
EpilogueWithBroadcast(
SharedStorage &shared_storage, ///< Shared storage object
int thread_idx, ///< ID of a thread within the threadblock
int warp_idx, ///< ID of warp within threadblock
int lane_idx ///< Id of thread within warp
):
Base(shared_storage.base, thread_idx, warp_idx, lane_idx),
shared_load_iterator_(shared_storage.base.reference(), thread_idx),
thread_idx_(thread_idx)
{
}
/// Streams the result to global memory
CUTLASS_DEVICE
void operator()(
OutputOp const &output_op, ///< Output operator
ElementVector const * broadcast_ptr, ///< Broadcast vector
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
OutputTileIterator source_iterator1, ///< Tile iterator for first source accumulator matrix
OutputTileIterator source_iterator2, ///< Tile iterator for second source accumulator matrix
TensorTileIterator tensor_iterator, ///< Threadblock tile iterator for additional tensor operand
MatrixCoord const &problem_size = ///< Problem size needed to guard against out-of-bounds accesses
MatrixCoord(Shape::kM, Shape::kN),
MatrixCoord const &threadblock_offset = ///< Threadblock's initial offset within the problem size space
MatrixCoord()) {
BroadcastFragment broadcast_fragment;
load_broadcast_fragment_(broadcast_fragment, broadcast_ptr, problem_size, threadblock_offset);
if (!output_op.is_source_needed()) {
compute_source_not_needed_(
output_op,
broadcast_fragment,
destination_iterator,
accumulators,
tensor_iterator);
}
else {
compute_source_needed_(
output_op,
broadcast_fragment,
destination_iterator,
accumulators,
source_iterator1,
source_iterator2,
tensor_iterator);
}
}
private:
CUTLASS_DEVICE
void load_broadcast_fragment_(
BroadcastFragment & broadcast_fragment, ///< Fragment containing the accumulated partial reduction over columns
ElementVector const * broadcast_ptr, ///< Broadcast vector
MatrixCoord const &problem_size, ///< Problem size needed to guard against out-of-bounds accesses
MatrixCoord const &threadblock_offset ///< Threadblock's initial offset within the problem size space
) {
broadcast_fragment.clear();
// If no pointer is supplied, set with all zeros and avoid memory accesses
if (!broadcast_ptr) {
return;
}
int thread_initial_column = ThreadMap::initial_offset(thread_idx_).column();
int thread_column_idx = threadblock_offset.column() + thread_initial_column;
broadcast_ptr += thread_initial_column;
NumericArrayConverter<ElementCompute, ElementVector, BroadcastDetail::kElementsPerAccess> converter;
using AccessType = AlignedArray<ElementVector, BroadcastDetail::kElementsPerAccess>;
using ComputeFragmentType = Array<ElementCompute, BroadcastDetail::kElementsPerAccess>;
ComputeFragmentType *frag_ptr = reinterpret_cast<ComputeFragmentType *>(&broadcast_fragment);
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < ThreadMap::Iterations::kColumn; ++j) {
AccessType loaded;
loaded.clear();
if (thread_column_idx < problem_size.column()) {
loaded = *reinterpret_cast<AccessType const *>(broadcast_ptr);
}
ComputeFragmentType cvt = converter(loaded);
frag_ptr[j] = cvt;
thread_column_idx += ThreadMap::Delta::kColumn;
broadcast_ptr += ThreadMap::Delta::kColumn;
}
}
template <class Seq>
struct acc2smem_source_not_needed;
template <size_t... Seq>
struct acc2smem_source_not_needed<cutlass::index_sequence<Seq...>> {
template <int Advance>
CUTLASS_DEVICE static void helper(AccumulatorFragmentIterator accum_fragment_iterator,
WarpTileIterator &warp_tile_iterator) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Advance; i++) {
++accum_fragment_iterator;
}
CUTLASS_PRAGMA_UNROLL
for (int p = 0; p < Base::kFragmentsPerIteration; ++p) {
typename AccumulatorFragmentIterator::Fragment accum_fragment;
accum_fragment_iterator.load(accum_fragment);
++accum_fragment_iterator;
warp_tile_iterator.store(accum_fragment);
if (p < Base::kFragmentsPerIteration - 1) {
warp_tile_iterator.add_pointer_offset(kSmemPointerOffset);
}
}
if (Base::kFragmentsPerIteration > 1) {
warp_tile_iterator.add_pointer_offset(kSmemPointerOffset *
(1 - Base::kFragmentsPerIteration));
}
}
CUTLASS_DEVICE
static void push(size_t pos,
AccumulatorFragmentIterator const &iterator_begin,
WarpTileIterator &warp_tile_iterator) {
int dummy[] = {
(pos == (Seq * Base::kFragmentsPerIteration)) &&
(helper<Seq * Base::kFragmentsPerIteration>(iterator_begin, warp_tile_iterator), 0)...};
CUTLASS_UNUSED(dummy[0]);
}
};
/// Streams the result to global memory
CUTLASS_DEVICE
void compute_source_not_needed_(
OutputOp const &output_op, ///< Output operator
BroadcastFragment const &broadcast_fragment, ///< Fragment containing the accumulated partial reduction over columns
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
TensorTileIterator tensor_iterator ///< Threadblock tile iterator for additioanl tensor operand
) {
//
// Iterator over warp-level accumulator fragment
//
AccumulatorFragmentIterator accum_fragment_iterator(accumulators);
//
// Iterate over accumulator tile
//
// CUTLASS_PRAGMA_UNROLL
#pragma unroll(IterationsUnroll ? OutputTileIterator::kIterations / Base::kFragmentsPerIteration : 1)
for (int iter = 0; iter < OutputTileIterator::kIterations; iter += Base::kFragmentsPerIteration) {
//
// Convert and store fragment
//
__syncthreads();
acc2smem_source_not_needed<
cutlass::make_index_sequence<OutputTileIterator::kIterations /
Base::kFragmentsPerIteration>>::push(iter,
accum_fragment_iterator,
this->warp_tile_iterator_);
__syncthreads();
//
// Load fragments from shared memory
//
CUTLASS_PRAGMA_UNROLL
for (int p = 0; p < Base::kFragmentsPerIteration; ++p) {
typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK];
shared_load_iterator_.load(aligned_accum_fragment[0]);
if (p < Base::kFragmentsPerIteration - 1) {
shared_load_iterator_.add_pointer_offset(kSmemPointerOffset);
}
else if (kPartitionsK > 1) {
plus <typename SharedLoadIterator::Fragment> add_fragments;
CUTLASS_PRAGMA_UNROLL
for ( int i = 1; i < kPartitionsK; ++i) {
shared_load_iterator_.add_pointer_offset(kSmemPointerOffset);
shared_load_iterator_.load(aligned_accum_fragment[i]);
aligned_accum_fragment[0] = add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]);
}
shared_load_iterator_.add_pointer_offset((1 - kPartitionsK) * kSmemPointerOffset);
}
//
// Apply output operation
//
typename OutputTileIterator::Fragment frag_Z;
typename TensorTileIterator::Fragment frag_T;
apply_output_operator_source_not_needed_(
frag_Z,
frag_T,
output_op,
aligned_accum_fragment[0],
broadcast_fragment);
//
// Conditionally store fragments
//
if (OutputOp::kStoreZ) {
destination_iterator.store(frag_Z);
++destination_iterator;
}
if (OutputOp::kStoreT) {
tensor_iterator.store(frag_T);
++tensor_iterator;
}
}
if (Base::kFragmentsPerIteration > 1) {
shared_load_iterator_.add_pointer_offset(kSmemPointerOffset * (1 - Base::kFragmentsPerIteration));
}
}
}
template<class Seq>
struct acc2smem_source_needed;
template <size_t... Seq>
struct acc2smem_source_needed<cutlass::index_sequence<Seq...>> {
template<int Advance>
CUTLASS_DEVICE
static void helper(AccumulatorFragmentIterator accum_fragment_iterator,
WarpTileIterator &warp_tile_iterator) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Advance; i++) {
++accum_fragment_iterator;
}
typename AccumulatorFragmentIterator::Fragment accum_fragment;
accum_fragment_iterator.load(accum_fragment);
warp_tile_iterator.store(accum_fragment);
}
CUTLASS_DEVICE
static void push(size_t pos,
AccumulatorFragmentIterator const &iterator_begin,
WarpTileIterator &warp_tile_iterator) {
int dummy[] = {(pos == Seq) && (helper<Seq>(iterator_begin, warp_tile_iterator), 0)...};
}
};
/// Streams the result to global memory
CUTLASS_DEVICE
void compute_source_needed_(
OutputOp const &output_op, ///< Output operator
BroadcastFragment const &broadcast_fragment, ///< Fragment containing the accumulated partial reduction over columns
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
OutputTileIterator source_iterator1, ///< Tile iterator for first source accumulator matrix
OutputTileIterator source_iterator2, ///< Tile iterator for second source accumulator matrix
TensorTileIterator tensor_iterator ///< Threadblock tile iterator for additioanl tensor operand
) {
typename OutputTileIterator::Fragment source_fragment1;
source_fragment1.clear();
typename OutputTileIterator::Fragment source_fragment2;
source_fragment2.clear();
//
// Iterator over warp-level accumulator fragment
//
AccumulatorFragmentIterator accum_fragment_iterator(accumulators);
//
// Iterate over accumulator tile
//
#pragma unroll(IterationsUnroll ? OutputTileIterator::kIterations : 1)
for (int iter = 0; iter < OutputTileIterator::kIterations; ++iter) {
//
// Load the source
//
source_iterator1.load(source_fragment1);
++source_iterator1;
source_iterator2.load(source_fragment2);
++source_iterator2;
//
// Convert and store fragment
//
__syncthreads();
acc2smem_source_needed<cutlass::make_index_sequence<OutputTileIterator::kIterations>>::push(
iter, accum_fragment_iterator, this->warp_tile_iterator_);
__syncthreads();
//
// Load fragments from shared memory
//
typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK];
shared_load_iterator_.load(aligned_accum_fragment[0]);
// If the number of k-slices is > 1 - perform a reduction amongst the k-slices
if (kPartitionsK > 1)
{
plus <typename SharedLoadIterator::Fragment> add_fragments;
const int tile_row_offset = Base::SharedStorage::StorageShape::kRow / PartitionsK;
CUTLASS_PRAGMA_UNROLL
for ( int i = 1; i < kPartitionsK; ++i) {
shared_load_iterator_.add_tile_offset({tile_row_offset , 0});
shared_load_iterator_.load(aligned_accum_fragment[i]);
aligned_accum_fragment[0] = add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]);
}
shared_load_iterator_.add_tile_offset({-1 * (kPartitionsK-1) * tile_row_offset, 0});
}
//
// Apply output operation
//
typename OutputTileIterator::Fragment frag_Z;
typename TensorTileIterator::Fragment frag_T;
apply_output_operator_(
frag_Z,
frag_T,
output_op,
aligned_accum_fragment[0],
source_fragment1,
source_fragment2,
broadcast_fragment);
//
// Conditionally store fragments
//
if (OutputOp::kStoreZ) {
destination_iterator.store(frag_Z);
++destination_iterator;
}
if (OutputOp::kStoreT) {
tensor_iterator.store(frag_T);
++tensor_iterator;
}
}
}
/// Helper to invoke the output functor over each vector of output
CUTLASS_DEVICE
void apply_output_operator_(
typename OutputTileIterator::Fragment &frag_Z,
typename TensorTileIterator::Fragment &frag_T,
OutputOp const &output_op,
typename SharedLoadIterator::Fragment const &frag_AB,
typename OutputTileIterator::Fragment const &frag_C1,
typename OutputTileIterator::Fragment const &frag_C2,
BroadcastFragment const &frag_Broadcast) {
using AccessTypeZ = Array<typename OutputTileIterator::Element, kElementsPerAccess>;
using AccessTypeT = Array<typename TensorTileIterator::Element, kElementsPerAccess>;
using AccessTypeBroadcast = Array<ElementCompute, kElementsPerAccess>;
AccessTypeZ *frag_Z_ptr = reinterpret_cast<AccessTypeZ *>(&frag_Z);
AccessTypeT *frag_T_ptr = reinterpret_cast<AccessTypeT *>(&frag_T);
AccumulatorAccessType const *frag_AB_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&frag_AB);
OutputAccessType const *frag_C1_ptr =
reinterpret_cast<OutputAccessType const *>(&frag_C1);
OutputAccessType const *frag_C2_ptr =
reinterpret_cast<OutputAccessType const *>(&frag_C2);
AccessTypeBroadcast const *frag_Broadcast_ptr =
reinterpret_cast<AccessTypeBroadcast const *>(&frag_Broadcast);
int const kOutputOpIterations =
OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kOutputOpIterations; ++i) {
output_op(
frag_Z_ptr[i],
frag_T_ptr[i],
frag_AB_ptr[i],
frag_C1_ptr[i],
frag_C2_ptr[i],
frag_Broadcast_ptr[i % ThreadMap::Iterations::kColumn]);
}
}
/// Helper to invoke the output functor over each vector of output
CUTLASS_DEVICE
void apply_output_operator_source_not_needed_(
typename OutputTileIterator::Fragment &frag_Z,
typename TensorTileIterator::Fragment &frag_T,
OutputOp const &output_op,
typename SharedLoadIterator::Fragment const &frag_AB,
BroadcastFragment const &frag_Broadcast) {
using AccessTypeZ = Array<typename OutputTileIterator::Element, kElementsPerAccess>;
using AccessTypeT = Array<typename TensorTileIterator::Element, kElementsPerAccess>;
using AccessTypeBroadcast = Array<ElementCompute, kElementsPerAccess>;
AccessTypeZ *frag_Z_ptr = reinterpret_cast<AccessTypeZ *>(&frag_Z);
AccessTypeT *frag_T_ptr = reinterpret_cast<AccessTypeT *>(&frag_T);
AccumulatorAccessType const *frag_AB_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&frag_AB);
AccessTypeBroadcast const *frag_Broadcast_ptr =
reinterpret_cast<AccessTypeBroadcast const *>(&frag_Broadcast);
int const kOutputOpIterations =
OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kOutputOpIterations; ++i) {
output_op(
frag_Z_ptr[i],
frag_T_ptr[i],
frag_AB_ptr[i],
frag_Broadcast_ptr[i % ThreadMap::Iterations::kColumn]);
}
}
public:
/// Stream-K reduce helper
CUTLASS_DEVICE
void reduce(
int reduce_fragment_idx, ///< Reduce fragment index
OutputOp const &output_op, ///< Output operator
ElementVector const * broadcast_ptr, ///< Broadcast vector
OutputTileIterator destination_iterator, ///< Tile iterator for destination
OutputTileIterator source_iterator1, ///< Tile iterator for first source accumulator matrix
OutputTileIterator source_iterator2, ///< Tile iterator for second source accumulator matrix
TensorTileIterator tensor_iterator, ///< Threadblock tile iterator for additional tensor operand
MatrixCoord const &problem_size = ///< Problem size needed to guard against out-of-bounds accesses
MatrixCoord(Shape::kM, Shape::kN),
MatrixCoord const &threadblock_offset = ///< Threadblock's initial offset within the problem size space
MatrixCoord())
{
BroadcastFragment broadcast_fragment;
load_broadcast_fragment_(broadcast_fragment, broadcast_ptr, problem_size, threadblock_offset);
// Initialize/load source-fragment data
typename OutputTileIterator::Fragment source_fragment1;
source_fragment1.clear();
typename OutputTileIterator::Fragment source_fragment2;
source_fragment2.clear();
if (output_op.is_source_needed())
{
source_iterator1 += reduce_fragment_idx;
source_iterator1.load(source_fragment1);
source_iterator2 += reduce_fragment_idx;
source_iterator2.load(source_fragment2);
}
// Load fragment from shared memory
typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK];
shared_load_iterator_.load(aligned_accum_fragment[0]);
// Add fragments shared by other k partitions
if (kPartitionsK > 1)
{
plus <typename SharedLoadIterator::Fragment> add_fragments;
CUTLASS_PRAGMA_UNROLL
for ( int i = 1; i < kPartitionsK; ++i) {
shared_load_iterator_.add_pointer_offset(kSmemPointerOffset);
shared_load_iterator_.load(aligned_accum_fragment[i]);
aligned_accum_fragment[0] = add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]);
}
}
//
// Apply output operation
//
typename OutputTileIterator::Fragment frag_Z;
typename TensorTileIterator::Fragment frag_T;
if (!output_op.is_source_needed()) {
apply_output_operator_source_not_needed_(
frag_Z,
frag_T,
output_op,
aligned_accum_fragment[0],
broadcast_fragment);
} else {
apply_output_operator_(
frag_Z,
frag_T,
output_op,
aligned_accum_fragment[0],
source_fragment1,
source_fragment2,
broadcast_fragment);
}
//
// Conditionally store fragments
//
if (OutputOp::kStoreZ) {
destination_iterator += reduce_fragment_idx;
destination_iterator.store(frag_Z);
}
if (OutputOp::kStoreT) {
tensor_iterator += reduce_fragment_idx;
tensor_iterator.store(frag_T);
}
}
};
template <
typename Shape_,
typename WarpMmaOperator_,
int PartitionsK,
typename OutputTileIterator_,
typename TensorTileIterator_,
typename ElementVector_,
typename AccumulatorFragmentIterator_,
typename WarpTileIterator_,
typename SharedLoadIterator_,
typename OutputOp_,
typename Padding_,
int FragmentsPerPartition,
int IterationsUnroll
>
class EpilogueWithBroadcast<
Shape_,
WarpMmaOperator_,
PartitionsK,
OutputTileIterator_,
TensorTileIterator_,
ElementVector_,
AccumulatorFragmentIterator_,
WarpTileIterator_,
SharedLoadIterator_,
OutputOp_,
Padding_,
FragmentsPerPartition,
IterationsUnroll,
true
> :
public EpilogueBase<
Shape_,
typename WarpMmaOperator_::Shape,
PartitionsK,
AccumulatorFragmentIterator_,
WarpTileIterator_,
Padding_,
FragmentsPerPartition> {
public:
using Base = EpilogueBase<
Shape_,
typename WarpMmaOperator_::Shape,
PartitionsK,
AccumulatorFragmentIterator_,
WarpTileIterator_,
Padding_,
FragmentsPerPartition>;
static bool const kIsSingleSource = true;
using Shape = Shape_;
using WarpMmaOperator = WarpMmaOperator_;
static int const kPartitionsK = PartitionsK;
using OutputTileIterator = OutputTileIterator_;
using TensorTileIterator = TensorTileIterator_;
using ElementVector = ElementVector_;
using AccumulatorFragmentIterator = AccumulatorFragmentIterator_;
using WarpTileIterator = WarpTileIterator_;
using SharedLoadIterator = SharedLoadIterator_;
using OutputOp = OutputOp_;
using Padding = Padding_;
using Layout = layout::RowMajor;
using LongIndex = typename Layout::LongIndex;
/// The complete warp-level accumulator tile
using AccumulatorTile = typename Base::AccumulatorTile;
/// Accumulator element
using ElementAccumulator = typename WarpTileIterator::Element;
/// Compute data type produced by the output op
using ElementCompute = typename OutputOp::ElementCompute;
/// Compute fragment
using FragmentCompute = Array<ElementCompute, OutputTileIterator::Fragment::kElements>;
/// Thread map used by output tile iterators
using ThreadMap = typename OutputTileIterator::ThreadMap;
/// Fragment object used to store the broadcast values
using BroadcastFragment = Array<
ElementCompute,
ThreadMap::Iterations::kColumn * ThreadMap::kElementsPerAccess>;
/// Output element
using ElementOutput = typename OutputTileIterator::Element;
/// Data type of additional tensor
using ElementTensor = typename TensorTileIterator::Element;
/// Output access size
static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess;
/// Tensor reference to destination tensor
using TensorRef = typename OutputTileIterator::TensorRef;
/// Tensor reference to sync tensor
using SyncTensorRef = typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>;
/// Const tensor reference to source tensor
using ConstTensorRef = typename OutputTileIterator::ConstTensorRef;
/// Array type used to output
using OutputAccessType = Array<
typename OutputTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
/// Array type used by output functor
using AccumulatorAccessType = Array<typename WarpTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
/// Array type used by output functor
using ComputeAccessType = Array<ElementCompute, OutputTileIterator::kElementsPerAccess>;
/// Tensor access type
using TensorAccessType = Array<ElementTensor, OutputTileIterator::kElementsPerAccess>;
/// Number of warps
using WarpCount = typename Base::WarpCount;
/// Shared memory allocation from epilogue base class
using BaseSharedStorage = typename Base::SharedStorage;
static int constexpr kSmemTiles = Base::kFragmentsPerIteration > 1 ? Base::kFragmentsPerIteration : kPartitionsK;
static int constexpr kSmemPointerOffset = Base::SharedStorage::StorageShape::kCount / kSmemTiles;
/// Used for the broadcast
struct BroadcastDetail {
/// Number of threads per warp
static int const kWarpSize = 32;
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
/// Number of distinct scalar column indices handled by each thread
static int const kColumnsPerThread = ThreadMap::Iterations::kColumn * ThreadMap::kElementsPerAccess;
/// Number of distinct scalar row indices handled by each thread
static int const kRowsPerThread = ThreadMap::Iterations::kCount / ThreadMap::Iterations::kColumn;
/// Number of threads per threadblock
static int const kThreadCount = kWarpSize * WarpCount::kCount;
/// Number of distinct threads per row of output tile
static int const kThreadsPerRow = (Shape::kN / kColumnsPerThread);
/// Number of distinct threads which must be reduced during the final reduction phase within the threadblock.
static int const kThreadRows = kThreadCount / kThreadsPerRow;
/// I'm not sure what I meant here.
static int const kThreadAccessesPerRow = const_max(1, (Shape::kN + kThreadCount - 1) / kThreadCount);
/// Shape of the shared memory allocation for the epilogue
using StorageShape = MatrixShape<
kThreadRows,
Shape::kN
>;
/// Debug printing
CUTLASS_DEVICE
static void print() {
#if 0
printf("BroadcastDetail {\n");
printf(
" kColumnsPerThread: %d\nkRowsPerThread: %d\n,kThreadCount: %d\nkThreadsPerRow: %d\n"
"kThreadRows: %d\nThreadAccessesPerRow: %d\nStorageShape: %d x %d (count: %d)\n",
kColumnsPerThread,
kRowsPerThread,
kThreadCount,
kThreadsPerRow,
kThreadRows,
kThreadAccessesPerRow,
StorageShape::kRow,
StorageShape::kColumn,
StorageShape::kCount
);
printf("};\n");
#endif
}
};
/// Shared storage structure (shadows base) with additional SMEM buffer for reduction
struct SharedStorage {
union {
BaseSharedStorage base;
};
CUTLASS_HOST_DEVICE
SharedStorage() { }
};
public:
static_assert(SharedLoadIterator::Fragment::kElements == OutputTileIterator::Fragment::kElements,
"Mismatch between shared load iterator and output tile iterator.");
static_assert(OutputTileIterator::kElementsPerAccess, "OutputTileIterator::kElementsPerAccess must not be zero.");
static_assert(!(OutputTileIterator::Fragment::kElements % OutputTileIterator::kElementsPerAccess),
"Divisibility");
private:
/// Loads fragment from shared memory aligned with output tensor
SharedLoadIterator shared_load_iterator_;
/// Thread index within the threadblock
int thread_idx_;
public:
/// Constructor
CUTLASS_DEVICE
EpilogueWithBroadcast(
SharedStorage &shared_storage, ///< Shared storage object
int thread_idx, ///< ID of a thread within the threadblock
int warp_idx, ///< ID of warp within threadblock
int lane_idx ///< Id of thread within warp
):
Base(shared_storage.base, thread_idx, warp_idx, lane_idx),
shared_load_iterator_(shared_storage.base.reference(), thread_idx),
thread_idx_(thread_idx)
{
}
/// Streams the result to global memory
CUTLASS_DEVICE
void operator()(
OutputOp const &output_op, ///< Output operator
ElementVector const * broadcast_ptr, ///< Broadcast vector
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
OutputTileIterator source_iterator, ///< Tile iterator for source accumulator matrix
TensorTileIterator tensor_iterator, ///< Threadblock tile iterator for additional tensor operand
MatrixCoord const &problem_size = ///< Problem size needed to guard against out-of-bounds accesses
MatrixCoord(Shape::kM, Shape::kN),
MatrixCoord const &threadblock_offset = ///< Threadblock's initial offset within the problem size space
MatrixCoord()) {
BroadcastFragment broadcast_fragment;
load_broadcast_fragment_(broadcast_fragment, broadcast_ptr, problem_size, threadblock_offset);
if (!output_op.is_source_needed()) {
compute_source_not_needed_(
output_op,
broadcast_fragment,
destination_iterator,
accumulators,
tensor_iterator);
}
else {
compute_source_needed_(
output_op,
broadcast_fragment,
destination_iterator,
accumulators,
source_iterator,
tensor_iterator);
}
}
private:
CUTLASS_DEVICE
void load_broadcast_fragment_(
BroadcastFragment & broadcast_fragment, ///< Fragment containing the accumulated partial reduction over columns
ElementVector const * broadcast_ptr, ///< Broadcast vector
MatrixCoord const &problem_size, ///< Problem size needed to guard against out-of-bounds accesses
MatrixCoord const &threadblock_offset ///< Threadblock's initial offset within the problem size space
) {
broadcast_fragment.clear();
// If no pointer is supplied, set with all zeros and avoid memory accesses
if (!broadcast_ptr) {
return;
}
int thread_initial_column = ThreadMap::initial_offset(thread_idx_).column();
int thread_column_idx = threadblock_offset.column() + thread_initial_column;
broadcast_ptr += thread_initial_column;
NumericArrayConverter<ElementCompute, ElementVector, BroadcastDetail::kElementsPerAccess> converter;
using AccessType = AlignedArray<ElementVector, BroadcastDetail::kElementsPerAccess>;
using ComputeFragmentType = Array<ElementCompute, BroadcastDetail::kElementsPerAccess>;
ComputeFragmentType *frag_ptr = reinterpret_cast<ComputeFragmentType *>(&broadcast_fragment);
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < ThreadMap::Iterations::kColumn; ++j) {
AccessType loaded;
loaded.clear();
if (thread_column_idx < problem_size.column()) {
loaded = *reinterpret_cast<AccessType const *>(broadcast_ptr);
}
ComputeFragmentType cvt = converter(loaded);
frag_ptr[j] = cvt;
thread_column_idx += ThreadMap::Delta::kColumn;
broadcast_ptr += ThreadMap::Delta::kColumn;
}
}
template <class Seq>
struct acc2smem_source_not_needed;
template <size_t... Seq>
struct acc2smem_source_not_needed<cutlass::index_sequence<Seq...>> {
template <int Advance>
CUTLASS_DEVICE static void helper(AccumulatorFragmentIterator accum_fragment_iterator,
WarpTileIterator &warp_tile_iterator) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Advance; i++) {
++accum_fragment_iterator;
}
CUTLASS_PRAGMA_UNROLL
for (int p = 0; p < Base::kFragmentsPerIteration; ++p) {
typename AccumulatorFragmentIterator::Fragment accum_fragment;
accum_fragment_iterator.load(accum_fragment);
++accum_fragment_iterator;
warp_tile_iterator.store(accum_fragment);
if (p < Base::kFragmentsPerIteration - 1) {
warp_tile_iterator.add_pointer_offset(kSmemPointerOffset);
}
}
if (Base::kFragmentsPerIteration > 1) {
warp_tile_iterator.add_pointer_offset(kSmemPointerOffset *
(1 - Base::kFragmentsPerIteration));
}
}
CUTLASS_DEVICE
static void push(size_t pos,
AccumulatorFragmentIterator const &iterator_begin,
WarpTileIterator &warp_tile_iterator) {
int dummy[] = {
(pos == (Seq * Base::kFragmentsPerIteration)) &&
(helper<Seq * Base::kFragmentsPerIteration>(iterator_begin, warp_tile_iterator), 0)...};
CUTLASS_UNUSED(dummy[0]);
}
};
/// Streams the result to global memory
CUTLASS_DEVICE
void compute_source_not_needed_(
OutputOp const &output_op, ///< Output operator
BroadcastFragment const &broadcast_fragment, ///< Fragment containing the accumulated partial reduction over columns
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
TensorTileIterator tensor_iterator ///< Threadblock tile iterator for additioanl tensor operand
) {
//
// Iterator over warp-level accumulator fragment
//
AccumulatorFragmentIterator accum_fragment_iterator(accumulators);
//
// Iterate over accumulator tile
//
// CUTLASS_PRAGMA_UNROLL
#pragma unroll(IterationsUnroll ? OutputTileIterator::kIterations / Base::kFragmentsPerIteration : 1)
for (int iter = 0; iter < OutputTileIterator::kIterations; iter += Base::kFragmentsPerIteration) {
//
// Convert and store fragment
//
__syncthreads();
acc2smem_source_not_needed<
cutlass::make_index_sequence<OutputTileIterator::kIterations /
Base::kFragmentsPerIteration>>::push(iter,
accum_fragment_iterator,
this->warp_tile_iterator_);
__syncthreads();
//
// Load fragments from shared memory
//
CUTLASS_PRAGMA_UNROLL
for (int p = 0; p < Base::kFragmentsPerIteration; ++p) {
typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK];
shared_load_iterator_.load(aligned_accum_fragment[0]);
if (p < Base::kFragmentsPerIteration - 1) {
shared_load_iterator_.add_pointer_offset(kSmemPointerOffset);
}
else if (kPartitionsK > 1) {
plus <typename SharedLoadIterator::Fragment> add_fragments;
CUTLASS_PRAGMA_UNROLL
for ( int i = 1; i < kPartitionsK; ++i) {
shared_load_iterator_.add_pointer_offset(kSmemPointerOffset);
shared_load_iterator_.load(aligned_accum_fragment[i]);
aligned_accum_fragment[0] = add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]);
}
shared_load_iterator_.add_pointer_offset((1 - kPartitionsK) * kSmemPointerOffset);
}
//
// Apply output operation
//
typename OutputTileIterator::Fragment frag_Z;
typename TensorTileIterator::Fragment frag_T;
apply_output_operator_source_not_needed_(
frag_Z,
frag_T,
output_op,
aligned_accum_fragment[0],
broadcast_fragment);
//
// Conditionally store fragments
//
if (OutputOp::kStoreZ) {
destination_iterator.store(frag_Z);
++destination_iterator;
}
if (OutputOp::kStoreT) {
tensor_iterator.store(frag_T);
++tensor_iterator;
}
}
if (Base::kFragmentsPerIteration > 1) {
shared_load_iterator_.add_pointer_offset(kSmemPointerOffset * (1 - Base::kFragmentsPerIteration));
}
}
}
template<class Seq>
struct acc2smem_source_needed;
template <size_t... Seq>
struct acc2smem_source_needed<cutlass::index_sequence<Seq...>> {
template<int Advance>
CUTLASS_DEVICE
static void helper(AccumulatorFragmentIterator accum_fragment_iterator,
WarpTileIterator &warp_tile_iterator) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Advance; i++) {
++accum_fragment_iterator;
}
typename AccumulatorFragmentIterator::Fragment accum_fragment;
accum_fragment_iterator.load(accum_fragment);
warp_tile_iterator.store(accum_fragment);
}
CUTLASS_DEVICE
static void push(size_t pos,
AccumulatorFragmentIterator const &iterator_begin,
WarpTileIterator &warp_tile_iterator) {
int dummy[] = {(pos == Seq) && (helper<Seq>(iterator_begin, warp_tile_iterator), 0)...};
}
};
/// Streams the result to global memory
CUTLASS_DEVICE
void compute_source_needed_(
OutputOp const &output_op, ///< Output operator
BroadcastFragment const &broadcast_fragment, ///< Fragment containing the accumulated partial reduction over columns
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
OutputTileIterator source_iterator, ///< Tile iterator for source accumulator matrix
TensorTileIterator tensor_iterator ///< Threadblock tile iterator for additioanl tensor operand
) {
typename OutputTileIterator::Fragment source_fragment;
source_fragment.clear();
//
// Iterator over warp-level accumulator fragment
//
AccumulatorFragmentIterator accum_fragment_iterator(accumulators);
//
// Iterate over accumulator tile
//
#pragma unroll(IterationsUnroll ? OutputTileIterator::kIterations : 1)
for (int iter = 0; iter < OutputTileIterator::kIterations; ++iter) {
//
// Load the source
//
source_iterator.load(source_fragment);
++source_iterator;
//
// Convert and store fragment
//
__syncthreads();
acc2smem_source_needed<cutlass::make_index_sequence<OutputTileIterator::kIterations>>::push(
iter, accum_fragment_iterator, this->warp_tile_iterator_);
__syncthreads();
//
// Load fragments from shared memory
//
typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK];
shared_load_iterator_.load(aligned_accum_fragment[0]);
// If the number of k-slices is > 1 - perform a reduction amongst the k-slices
if (kPartitionsK > 1)
{
plus <typename SharedLoadIterator::Fragment> add_fragments;
const int tile_row_offset = Base::SharedStorage::StorageShape::kRow / PartitionsK;
CUTLASS_PRAGMA_UNROLL
for ( int i = 1; i < kPartitionsK; ++i) {
shared_load_iterator_.add_tile_offset({tile_row_offset , 0});
shared_load_iterator_.load(aligned_accum_fragment[i]);
aligned_accum_fragment[0] = add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]);
}
shared_load_iterator_.add_tile_offset({-1 * (kPartitionsK-1) * tile_row_offset, 0});
}
//
// Apply output operation
//
typename OutputTileIterator::Fragment frag_Z;
typename TensorTileIterator::Fragment frag_T;
apply_output_operator_(
frag_Z,
frag_T,
output_op,
aligned_accum_fragment[0],
source_fragment,
broadcast_fragment);
//
// Conditionally store fragments
//
if (OutputOp::kStoreZ) {
destination_iterator.store(frag_Z);
++destination_iterator;
}
if (OutputOp::kStoreT) {
tensor_iterator.store(frag_T);
++tensor_iterator;
}
}
}
/// Helper to invoke the output functor over each vector of output
CUTLASS_DEVICE
void apply_output_operator_(
typename OutputTileIterator::Fragment &frag_Z,
typename TensorTileIterator::Fragment &frag_T,
OutputOp const &output_op,
typename SharedLoadIterator::Fragment const &frag_AB,
typename OutputTileIterator::Fragment const &frag_C,
BroadcastFragment const &frag_Broadcast) {
using AccessTypeZ = Array<typename OutputTileIterator::Element, kElementsPerAccess>;
using AccessTypeT = Array<typename TensorTileIterator::Element, kElementsPerAccess>;
using AccessTypeBroadcast = Array<ElementCompute, kElementsPerAccess>;
AccessTypeZ *frag_Z_ptr = reinterpret_cast<AccessTypeZ *>(&frag_Z);
AccessTypeT *frag_T_ptr = reinterpret_cast<AccessTypeT *>(&frag_T);
AccumulatorAccessType const *frag_AB_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&frag_AB);
OutputAccessType const *frag_C_ptr =
reinterpret_cast<OutputAccessType const *>(&frag_C);
AccessTypeBroadcast const *frag_Broadcast_ptr =
reinterpret_cast<AccessTypeBroadcast const *>(&frag_Broadcast);
int const kOutputOpIterations =
OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kOutputOpIterations; ++i) {
output_op(
frag_Z_ptr[i],
frag_T_ptr[i],
frag_AB_ptr[i],
frag_C_ptr[i],
frag_Broadcast_ptr[i % ThreadMap::Iterations::kColumn]);
}
}
/// Helper to invoke the output functor over each vector of output
CUTLASS_DEVICE
void apply_output_operator_source_not_needed_(
typename OutputTileIterator::Fragment &frag_Z,
typename TensorTileIterator::Fragment &frag_T,
OutputOp const &output_op,
typename SharedLoadIterator::Fragment const &frag_AB,
BroadcastFragment const &frag_Broadcast) {
using AccessTypeZ = Array<typename OutputTileIterator::Element, kElementsPerAccess>;
using AccessTypeT = Array<typename TensorTileIterator::Element, kElementsPerAccess>;
using AccessTypeBroadcast = Array<ElementCompute, kElementsPerAccess>;
AccessTypeZ *frag_Z_ptr = reinterpret_cast<AccessTypeZ *>(&frag_Z);
AccessTypeT *frag_T_ptr = reinterpret_cast<AccessTypeT *>(&frag_T);
AccumulatorAccessType const *frag_AB_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&frag_AB);
AccessTypeBroadcast const *frag_Broadcast_ptr =
reinterpret_cast<AccessTypeBroadcast const *>(&frag_Broadcast);
int const kOutputOpIterations =
OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kOutputOpIterations; ++i) {
output_op(
frag_Z_ptr[i],
frag_T_ptr[i],
frag_AB_ptr[i],
frag_Broadcast_ptr[i % ThreadMap::Iterations::kColumn]);
}
}
public:
/// Stream-K reduce helper
CUTLASS_DEVICE
void reduce(
int reduce_fragment_idx, ///< Reduce fragment index
OutputOp const &output_op, ///< Output operator
ElementVector const * broadcast_ptr, ///< Broadcast vector
OutputTileIterator destination_iterator, ///< Tile iterator for destination
OutputTileIterator source_iterator, ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
TensorTileIterator tensor_iterator, ///< Threadblock tile iterator for additional tensor operand
MatrixCoord const &problem_size = ///< Problem size needed to guard against out-of-bounds accesses
MatrixCoord(Shape::kM, Shape::kN),
MatrixCoord const &threadblock_offset = ///< Threadblock's initial offset within the problem size space
MatrixCoord())
{
BroadcastFragment broadcast_fragment;
load_broadcast_fragment_(broadcast_fragment, broadcast_ptr, problem_size, threadblock_offset);
// Initialize/load source-fragment data
typename OutputTileIterator::Fragment source_fragment;
source_fragment.clear();
if (output_op.is_source_needed())
{
source_iterator += reduce_fragment_idx;
source_iterator.load(source_fragment);
}
// Load fragment from shared memory
typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK];
shared_load_iterator_.load(aligned_accum_fragment[0]);
// Add fragments shared by other k partitions
if (kPartitionsK > 1)
{
plus <typename SharedLoadIterator::Fragment> add_fragments;
CUTLASS_PRAGMA_UNROLL
for ( int i = 1; i < kPartitionsK; ++i) {
shared_load_iterator_.add_pointer_offset(kSmemPointerOffset);
shared_load_iterator_.load(aligned_accum_fragment[i]);
aligned_accum_fragment[0] = add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]);
}
}
//
// Apply output operation
//
typename OutputTileIterator::Fragment frag_Z;
typename TensorTileIterator::Fragment frag_T;
if (!output_op.is_source_needed()) {
apply_output_operator_source_not_needed_(
frag_Z,
frag_T,
output_op,
aligned_accum_fragment[0],
broadcast_fragment);
} else {
apply_output_operator_(
frag_Z,
frag_T,
output_op,
aligned_accum_fragment[0],
source_fragment,
broadcast_fragment);
}
//
// Conditionally store fragments
//
if (OutputOp::kStoreZ) {
destination_iterator.store(frag_Z);
++destination_iterator;
}
if (OutputOp::kStoreT) {
tensor_iterator.store(frag_T);
++tensor_iterator;
}
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/epilogue_with_broadcast.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/epilogue_with_broadcast.h",
"repo_id": "include",
"token_count": 22671
} | 27 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/epilogue/threadblock/output_tile_thread_map.h"
#include "cutlass/arch/arch.h"
#include "cutlass/arch/memory.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator_params.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator used to load and store output tile from global memory in epilogue.
///
/// Satisfies: ReadableTileIterator | PredicatedTileIterator | ForwardTileIterator
///
template <
typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap)
typename Element_, ///< Element data type
BlasMode BlasMode_ = BlasMode::kGemm ///< Tile Iterator for a Symmetric or Hermitian Kernel
>
class PredicatedTileIteratorBlas3 {
public:
using ThreadMap = ThreadMap_;
using Shape = typename ThreadMap::Shape;
using Element = Element_;
using Layout = layout::RowMajor;
using TensorRef = TensorRef<Element, Layout>;
using ConstTensorRef = typename TensorRef::ConstTensorRef;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorCoord = MatrixCoord;
static BlasMode const kBlasMode = BlasMode_;
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
static int const kThreads = ThreadMap::kThreads;
static int const kIterations = ThreadMap::Count::kTile;
static_assert( ThreadMap::Iterations::kRow > 0,"ThreadMap::Iterations::kRow must be > 0");
static_assert( ThreadMap::Iterations::kGroup > 0,"ThreadMap::Iterations::kGroup must be > 0");
static_assert( ThreadMap::Iterations::kCluster > 0,"ThreadMap::Iterations::kCluster must be > 0");
static_assert( ThreadMap::Iterations::kColumn > 0,"ThreadMap::Iterations::kColumn must be > 0");
/// Fragment object
using Fragment = Array<
Element,
ThreadMap::Iterations::kColumn *
ThreadMap::Iterations::kRow *
ThreadMap::Iterations::kGroup *
ThreadMap::Iterations::kCluster * ThreadMap::kElementsPerAccess>;
/// Memory access size
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
static_assert( AccessType::kElements == 1, "BLAS3 Epilogue must use AccessType::kElements as 1");
//
// Parameters struct
//
/// Uses a non-template class
struct Params : PredicatedTileIteratorParams {
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(Layout const &layout):
PredicatedTileIteratorParams(
layout.stride(0) * int(sizeof(AccessType)) / kElementsPerAccess,
make_OutputTileThreadMapDesc<ThreadMap>()
)
{
}
};
/// Mask object
struct Mask {
static int const kCount = ThreadMap::Iterations::kColumn;
/// Predicate state
bool predicates[kCount];
//
// Mask
//
CUTLASS_HOST_DEVICE
Mask() {
enable();
}
///< Efficiently disables all accesses guarded by mask
CUTLASS_HOST_DEVICE void clear() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = false;
}
}
///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask
CUTLASS_DEVICE void enable() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = true;
}
}
};
private:
//
// Data members
//
/// Parameters structure containing reference and precomputed state.
PredicatedTileIteratorParams params_;
/// Byte-level pointer
uint8_t *byte_pointer_;
/// Fill Mode for a tile on diagonal of a symmetric kernel
cutlass::FillMode fill_mode;
/// Array of boolean values to contain steady-state predicates
Mask mask_;
/// Extent of the matrix tile in rows
Index extent_row_;
/// A thread's starting row position (assuming steady-state predicates have been computed)
Index thread_start_row_;
/// Internal state counter
int state_[3];
/// Starting address of the matrix
size_t matrix_start_addr;
static_assert((kBlasMode == BlasMode::kSymmetric || kBlasMode == BlasMode::kHermitian),
"Unsupported blas3 mode.");
private:
//
// Methods
//
public:
//
// Methods
//
/// Constructor
CUTLASS_DEVICE
PredicatedTileIteratorBlas3(
PredicatedTileIteratorParams const & params,
Element *pointer,
TensorCoord extent,
int thread_idx,
TensorCoord threadblock_offset
, cutlass::FillMode fill_mode
):
params_(params), fill_mode(fill_mode)
{
TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx) + threadblock_offset;
extent_row_ = extent.row();
thread_start_row_ = thread_offset.row();
// Initialize predicates
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kColumn; ++c) {
mask_.predicates[c] = ((thread_offset.column()
+ ThreadMap::Delta::kColumn * c) < extent.column());
}
// Check Symmetric kernel modes (Lower and Upper - for diagonal CTAs, None for rest CTAs)
if ((kBlasMode == BlasMode::kSymmetric || kBlasMode == BlasMode::kHermitian) &&
fill_mode == cutlass::FillMode::kInvalid) {
arch::device_breakpoint();
}
// Starting address of the matrix
matrix_start_addr = reinterpret_cast<size_t>(pointer);
// Initialize pointer
byte_pointer_ = reinterpret_cast<uint8_t *>(pointer) +
LongIndex(thread_offset.row()) * LongIndex(params_.stride) +
LongIndex(thread_offset.column()) * sizeof(AccessType) / kElementsPerAccess;
// Initialize internal state counter
state_[0] = state_[1] = state_[2] = 0;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_byte_offset(Fragment &frag, int64_t byte_offset) {
uint8_t *byte_pointer = byte_pointer_;
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
int row_offset = row * ThreadMap::Delta::kRow
+ group * ThreadMap::Delta::kGroup
+ cluster * ThreadMap::Delta::kCluster;
bool row_guard = ((row_offset + thread_start_row_) < extent_row_);
AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset);
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
bool guard = row_guard && mask_.predicates[column];
cutlass::arch::global_load<
AccessType,
sizeof(AccessType)
>(
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn +
column],
(void *)&memory_pointer[column * ThreadMap::Delta::kColumn /
kElementsPerAccess],
guard);
}
if (row + 1 < ThreadMap::Iterations::kRow) {
byte_pointer += params_.increment_row;
}
}
if (group + 1 < ThreadMap::Iterations::kGroup) {
byte_pointer += params_.increment_group;
}
}
if (cluster + 1 < ThreadMap::Iterations::kCluster) {
byte_pointer += params_.increment_cluster;
}
}
}
/// Loads a fragment on the diagonal of a symmetric kernel to memory
CUTLASS_DEVICE
void load_symmetric_with_byte_offset(Fragment &frag, int64_t byte_offset) {
uint8_t *byte_pointer = byte_pointer_;
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
bool isLowerMode = (fill_mode == cutlass::FillMode::kLower) ? true : false;
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
int row_offset = row * ThreadMap::Delta::kRow
+ group * ThreadMap::Delta::kGroup
+ cluster * ThreadMap::Delta::kCluster;
bool row_guard = ((row_offset + thread_start_row_) < extent_row_);
AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset);
// Offset of row from beginning of the matrix per thread
size_t row_start_offset = (size_t)memory_pointer - matrix_start_addr;
// Absolute row index
int row_index = int(row_start_offset/params_.stride);
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
bool guard = row_guard && mask_.predicates[column];
// Offset of column from beginning of row per thread
size_t col_start_offset = row_start_offset +
(column * ThreadMap::Delta::kColumn / kElementsPerAccess) * sizeof(AccessType);
// Absolute column index
size_t col_index = (col_start_offset%params_.stride)/sizeof(AccessType);
guard = guard && ( (isLowerMode && row_index >= col_index) ||
(!isLowerMode && row_index <= col_index) );
cutlass::arch::global_load<
AccessType,
sizeof(AccessType)
>(
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn +
column],
(void *)&memory_pointer[column * ThreadMap::Delta::kColumn /
kElementsPerAccess],
guard);
// The imaginary parts of the diagonal elements of a complex element are assumed and set to zero
if (guard && kBlasMode == BlasMode::kHermitian && cutlass::is_complex<Element>::value) {
Element *scalar_ptr = reinterpret_cast<Element *>(frag_ptr);
if (row_index == col_index) {
scalar_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column] =
real(scalar_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column]);
}
}
}
if (row + 1 < ThreadMap::Iterations::kRow) {
byte_pointer += params_.increment_row;
}
}
if (group + 1 < ThreadMap::Iterations::kGroup) {
byte_pointer += params_.increment_group;
}
}
if (cluster + 1 < ThreadMap::Iterations::kCluster) {
byte_pointer += params_.increment_cluster;
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
if (fill_mode == cutlass::FillMode::kNone) {
load_with_byte_offset(frag, 0);
}
else {
load_symmetric_with_byte_offset(frag, 0);
}
}
/// Stores a fragment to memory
CUTLASS_DEVICE
void store_with_byte_offset(Fragment const &frag, int64_t byte_offset) {
uint8_t *byte_pointer = byte_pointer_;
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
int row_offset = row * ThreadMap::Delta::kRow
+ group * ThreadMap::Delta::kGroup
+ cluster * ThreadMap::Delta::kCluster;
bool row_guard = ((row_offset + thread_start_row_) < extent_row_);
AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset);
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
bool guard = row_guard && mask_.predicates[column];
cutlass::arch::global_store<AccessType, sizeof(AccessType)>(
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column],
(void *)&memory_pointer[column * ThreadMap::Delta::kColumn / kElementsPerAccess],
guard);
}
if (row + 1 < ThreadMap::Iterations::kRow) {
byte_pointer += params_.increment_row;
}
}
if (group + 1 < ThreadMap::Iterations::kGroup) {
byte_pointer += params_.increment_group;
}
}
if (cluster + 1 < ThreadMap::Iterations::kCluster) {
byte_pointer += params_.increment_cluster;
}
}
}
/// Stores a fragment on the diagonal of a symmetric kernel to memory
CUTLASS_DEVICE
void store_symmetric_with_byte_offset(Fragment const &frag, int64_t byte_offset) {
uint8_t *byte_pointer = byte_pointer_;
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
bool isLowerMode = (fill_mode == cutlass::FillMode::kLower) ? true : false;
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
int row_offset = row * ThreadMap::Delta::kRow
+ group * ThreadMap::Delta::kGroup
+ cluster * ThreadMap::Delta::kCluster;
bool row_guard = ((row_offset + thread_start_row_) < extent_row_);
AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset);
// Offset of row from beginning of the matrix per thread
size_t row_start_offset = (size_t)memory_pointer - matrix_start_addr;
// Absolute row index
int row_index = int(row_start_offset/params_.stride);
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
bool guard = row_guard && mask_.predicates[column];
// Offset of column from beginning of row per thread
size_t col_start_offset = row_start_offset +
(column * ThreadMap::Delta::kColumn / kElementsPerAccess) * sizeof(AccessType);
// Absolute column index
size_t col_index = (col_start_offset%params_.stride)/sizeof(AccessType);
guard = guard && ( (isLowerMode && row_index >= col_index) ||
(!isLowerMode && row_index <= col_index) );
// The imaginary parts of the diagonal elements of a complex element are assumed and set to zero
if (guard && kBlasMode == BlasMode::kHermitian && cutlass::is_complex<Element>::value) {
AccessType *frag_ptr_modify = const_cast<AccessType *>(frag_ptr);
Element *scalar_ptr = reinterpret_cast<Element *>(frag_ptr_modify);
if (row_index == col_index) {
scalar_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column] =
real(scalar_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column]);
}
}
cutlass::arch::global_store<AccessType, sizeof(AccessType)>(
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn +
column],
(void *)&memory_pointer[column * ThreadMap::Delta::kColumn /
kElementsPerAccess],
guard);
}
if (row + 1 < ThreadMap::Iterations::kRow) {
byte_pointer += params_.increment_row;
}
}
if (group + 1 < ThreadMap::Iterations::kGroup) {
byte_pointer += params_.increment_group;
}
}
if (cluster + 1 < ThreadMap::Iterations::kCluster) {
byte_pointer += params_.increment_cluster;
}
}
}
/// Stores a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
if (fill_mode == cutlass::FillMode::kNone) {
store_with_byte_offset(frag, 0);
}
else {
store_symmetric_with_byte_offset(frag, 0);
}
}
/// Advances to the next position to load or store
CUTLASS_HOST_DEVICE
PredicatedTileIteratorBlas3 &operator++() {
++state_[0];
byte_pointer_ += params_.advance_row;
thread_start_row_ += ThreadMap::Shape::kRow;
if (state_[0] == ThreadMap::Count::kRow) {
state_[0] = 0;
++state_[1];
byte_pointer_ += params_.advance_group;
thread_start_row_ += (ThreadMap::Shape::kGroup - 1) *
ThreadMap::Shape::kRow * ThreadMap::Count::kRow;
if (state_[1] == ThreadMap::Count::kGroup) {
state_[1] = 0;
++state_[2];
byte_pointer_ += params_.advance_cluster;
thread_start_row_ += ThreadMap::Count::kGroup *
ThreadMap::Shape::kGroup * ThreadMap::Count::kRow * ThreadMap::Shape::kRow;
if (state_[2] == ThreadMap::Count::kCluster) {
state_[2] = 0;
byte_pointer_ += params_.advance_tile;
}
}
}
return *this;
}
///< Efficiently disables all accesses guarded by mask
CUTLASS_DEVICE void clear_mask() {
mask_.clear();
}
///< Efficiently enables all accesses guarded by mask
CUTLASS_DEVICE void enable_mask() {
mask_.enable();
}
///< Sets the mask
CUTLASS_DEVICE void get_mask(Mask &mask) {
mask = mask_;
}
///< Sets the mask
CUTLASS_DEVICE void set_mask(Mask const &mask) {
mask_ = mask;
}
};
///////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/predicated_tile_iterator_blas3.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/predicated_tile_iterator_blas3.h",
"repo_id": "include",
"token_count": 8475
} | 28 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines basic structures needed for implementing the warp-scoped phase of the epilogue.
These quantities assume a 'column-major' arrangement of TensorOp instructions, of which
a row-oriented slice is visible per iteration.
*/
#pragma once
#include "cutlass/matrix_shape.h"
#include "cutlass/layout/matrix.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace warp {
////////////////////////////////////////////////////////////////////////////////
/// Policy details related to the epilogue
template <
typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape)
typename OperatorShape, ///< matrix multiply operation shape (concept: gemm:GemmShape)
typename Layout ///< target shared memory layout
>
struct TensorOpPolicy;
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for row-major
template <
typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape)
typename OperatorShape ///< matrix multiply operation shape (concept: gemm::GemmShape)
>
struct TensorOpPolicy<WarpShape, OperatorShape, layout::RowMajor> {
/// Number of operations
using OperatorCount = MatrixShape<
(WarpShape::kM + OperatorShape::kM - 1) / OperatorShape::kM,
(WarpShape::kN + OperatorShape::kN - 1) / OperatorShape::kN
>;
//
// Hard-coded constants regarding Tensor Operations
//
static int const kElementsPerAccess = 2;
static int const kRowsPerIteration = 8;
static bool const kDivisible =
!(WarpShape::kM % OperatorShape::kM) && !(WarpShape::kN % OperatorShape::kN);
//
// Derived quantities
//
// Number of 'externally visible' iterations per actual instruction
static int const kIterationsPerInstruction = OperatorShape::kM / kRowsPerIteration;
// Number of externally visible iterations
static int const kIterations = OperatorCount::kRow * kIterationsPerInstruction;
using TileIterations = MatrixShape<kIterations, 1>;
static int const kAccumulatorRowStride = kElementsPerAccess;
static int const kAccumulatorColumnStride = kElementsPerAccess * OperatorCount::kRow * kIterationsPerInstruction;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for column-major-interleaved
template <
typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape)
typename OperatorShape, ///< matrix multiply operation (concept: arch::Mma)
int InterleavedK ///< number of interleaved k
>
struct TensorOpPolicy<WarpShape, OperatorShape,
layout::ColumnMajorInterleaved<InterleavedK> > {
/// Number of operations
using OperatorCount = MatrixShape<WarpShape::kM / OperatorShape::kM,
WarpShape::kN / OperatorShape::kN>;
//
// Hard-coded constants regarding Tensor Operations
//
static int const kElementsPerAccess = 2;
static int const kRowsPerIteration = 8;
//
// Derived quantities
//
// Number of 'externally visible' iterations per actual instruction
static int const kIterationsPerInstruction =
OperatorShape::kM / kRowsPerIteration;
// Number of externally visible iterations
static int const kIterations = WarpShape::kN / InterleavedK *
OperatorCount::kRow *
kIterationsPerInstruction;
static int const kElementsPerIteration = InterleavedK / OperatorShape::kN * kElementsPerAccess;
static int const kAccessPerIteration = kElementsPerIteration / kElementsPerAccess;
// Number of externally visible iterations
//static int const kTileIterations = OperatorCount::kRow * kIterationsPerInstruction;
using TileIterations = MatrixShape<1, WarpShape::kN / InterleavedK>;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/warp/tensor_op_policy.h/0 | {
"file_path": "include/cutlass/epilogue/warp/tensor_op_policy.h",
"repo_id": "include",
"token_count": 1683
} | 29 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cute/algorithm/clear.hpp"
#include "cute/tensor.hpp"
//////////////////////////////////////////////////////////////////////////////
///////////////////////////////////FP8 Accumulation///////////////////////////
//////////////////////////////////////////////////////////////////////////////
/// It would promote (add) the results from the tensor core accumulators to the
/// main accumulators when the number of MMAs reaches the max number of MMA
/// interval specified by user, after that the tensor core accumulators are
/// zeroed.
//////////////////////////////////////////////////////////////////////////////
namespace cutlass::gemm::collective {
template <
class EngineAccum,
class LayoutAccum>
struct GmmaFP8Accumulation {
using TensorAccum = cute::Tensor<EngineAccum, LayoutAccum>;
static_assert(is_static<LayoutAccum>::value, "Accumulator Layout should be static");
static_assert(is_rmem<TensorAccum>::value , "Accumulator tensor must be rmem resident.");
private:
TensorAccum& accum_;
TensorAccum accum_temp_;
uint32_t accum_promotion_interval_; // defines the max num of executed MMAs after which accum should be promoted.
uint32_t mma_count_per_mainloop_iteration_; // num of MMAs per k_tile of mainloop
uint32_t mma_count_; // current executed MMAs
uint32_t reset_accum_flag_; // accum needs to be zeroed or not.
CUTLASS_DEVICE
void promote_core() {
warpgroup_wait<0>();
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < size(accum_); ++i) {
accum_(i) += accum_temp_(i);
}
}
public:
CUTLASS_DEVICE
GmmaFP8Accumulation(
TensorAccum &accum,
uint32_t accum_promotion_interval,
uint32_t mma_count_per_mainloop_iteration)
: accum_(accum),
accum_promotion_interval_(accum_promotion_interval),
mma_count_per_mainloop_iteration_(mma_count_per_mainloop_iteration),
mma_count_(0),
reset_accum_flag_(0)
{
accum_temp_ = cute::make_fragment_like(accum);
}
CUTLASS_DEVICE
TensorAccum& operator()() {
return accum_temp_;
}
/// prepare the MMA accumulators when initialization or zeroing is required.
CUTLASS_DEVICE
bool prepare_if_needed() {
return reset_accum_flag_;
}
/// promote (add) the results from the MMA accumulators to main accumulator if needed.
CUTLASS_DEVICE
void promote_if_needed() {
mma_count_ += mma_count_per_mainloop_iteration_;
reset_accum_flag_ = __shfl_sync(0xffffffff, mma_count_ == accum_promotion_interval_, 0);
if (reset_accum_flag_) {
promote_core();
mma_count_ = 0;
}
}
/// promote (add) the residue results from the MMA accumulators to main accumulator if needed.
CUTLASS_DEVICE
void promote_residue_if_needed() {
if (__shfl_sync(0xffffffff, mma_count_ > 0, 0)) {
promote_core();
}
}
};
} // namespace cutlass::gemm::collective
| include/cutlass/gemm/collective/fp8_accumulation.hpp/0 | {
"file_path": "include/cutlass/gemm/collective/fp8_accumulation.hpp",
"repo_id": "include",
"token_count": 1530
} | 30 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a pipelined Rank2K kernel. Does not compute batching or support split-K.
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/arch/arch.h"
#include "cutlass/device_kernel.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/gemm/kernel/rank_2k_universal.h"
#include "cutlass/gemm/kernel/default_rank_2k_universal.h"
#include "cutlass/gemm/device/default_gemm_configuration.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC,
/// Element type for internal accumulation
typename ElementAccumulator_ = ElementC_,
/// Operator class tag
typename OperatorClass_ = arch::OpClassTensorOp,
/// Tag indicating architecture to tune for
typename ArchTag_ = arch::Sm80,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementA_, ElementC_,
ElementAccumulator_>::ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementA_, ElementC_,
ElementAccumulator_>::WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementA_, ElementC_,
ElementAccumulator_>::InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementA_, ElementC_,
ElementAccumulator_>::EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle_ =
typename threadblock::GemmIdentityThreadblockSwizzle<>,
/// Number of stages used in the pipelined mainloop
int Stages =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementA_,
ElementC_, ElementAccumulator_>::kStages,
/// Access granularity of A matrix in units of elements
int AlignmentA =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementA_,
ElementC_, ElementAccumulator_>::kAlignmentA,
/// Access granularity of B matrix in units of elements
int AlignmentB =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementB_, ElementB_,
ElementC_, ElementAccumulator_>::kAlignmentB,
/// If true, kernel supports split-K with serial reduction
bool SplitKSerial = false,
/// Operation performed by SYRK
typename Operator_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::Operator,
/// Complex elementwise transformation
ComplexTransform TransformA = ComplexTransform::kNone,
/// Complex elementwise transformation
ComplexTransform TransformB = ComplexTransform::kNone,
/// Blas3 computation mode (symmetric/hermitian)
BlasMode BlasMode_ = BlasMode::kSymmetric>
class Rank2K {
public:
using ElementA = ElementA_;
using LayoutA = LayoutA_;
using ElementB = ElementB_;
using LayoutB = LayoutB_;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using ElementAccumulator = ElementAccumulator_;
using OperatorClass = OperatorClass_;
using ArchTag = ArchTag_;
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using EpilogueOutputOp = EpilogueOutputOp_;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using Operator = Operator_;
static FillMode const kFillModeC = FillModeC;
static int const kStages = Stages;
static int const kAlignmentA = AlignmentA;
static int const kAlignmentB = AlignmentB;
static int const kAlignmentC = EpilogueOutputOp::kCount;
static bool const kSplitKSerial = SplitKSerial;
static ComplexTransform const kTransformA = TransformA;
static ComplexTransform const kTransformB = TransformB;
static BlasMode const kBlasMode = BlasMode_;
static int const kUpdateRank = 2;
// static asserts for rank 2k update kernel
static_assert(platform::is_same<LayoutA, LayoutB>::value,
"Rank 2K update operator support same layouts for operandA and B");
/// Define the kernel
using Rank2Kkernel = typename kernel::DefaultRank2KUniversal<
ElementA,
LayoutA,
kTransformA,
kAlignmentA,
ElementB,
LayoutB,
kTransformB,
kAlignmentB,
ElementC,
LayoutC,
kFillModeC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
kStages,
kSplitKSerial,
Operator,
kBlasMode
>::Rank2Kkernel;
using Arguments = typename Rank2Kkernel::Arguments;
private:
/// Kernel parameters object
typename Rank2Kkernel::Params params_;
public:
/// Constructs the SYRK.
Rank2K() { }
/// Determines whether the SYRK can execute the given problem.
static Status can_implement(Arguments const &args) {
if (!kSplitKSerial && args.batch_count > 1) {
return Status::kErrorInvalidProblem;
}
Status status = Rank2Kkernel::can_implement(args);
if (FillModeC != FillMode::kLower && FillModeC != FillMode::kUpper) {
return Status::kErrorInvalidProblem;
}
if (status != Status::kSuccess) {
return status;
}
return Status::kSuccess;
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
size_t bytes = 0;
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord tiled_shape = threadblock_swizzle.get_tiled_shape(
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.batch_count);
if (kSplitKSerial && args.batch_count > 1) {
bytes += sizeof(int) * size_t(tiled_shape.m()) * size_t(tiled_shape.n());
}
return bytes;
}
/// Initializes SYRK state from arguments.
Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) {
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord grid_tiled_shape = threadblock_swizzle.get_tiled_shape(
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.batch_count);
if (kSplitKSerial) {
if (args.batch_count > 1) {
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
size_t bytes = get_workspace_size(args);
cudaError_t result = cudaMemsetAsync(workspace, 0, bytes, stream);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
}
else {
if (args.batch_count > 1) {
return Status::kErrorInvalidProblem;
}
}
int gemm_k_size = args.problem_size.k();
// Initialize the Params structure
params_ = typename Rank2Kkernel::Params{
args,
grid_tiled_shape,
gemm_k_size,
static_cast<int *>(workspace)
};
int smem_size = int(sizeof(typename Rank2Kkernel::SharedStorage));
if (smem_size >= (48 << 10)) {
cudaError_t result = cudaFuncSetAttribute(Kernel<Rank2Kkernel>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
return Status::kSuccess;
}
/// Lightweight update given a subset of arguments
Status update(Arguments const &args, void *workspace = nullptr) {
if (kSplitKSerial && args.batch_count > 1) {
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
}
size_t workspace_bytes = get_workspace_size(args);
if (workspace_bytes && !workspace) {
return Status::kErrorWorkspaceNull;
}
params_.update(args, workspace);
return Status::kSuccess;
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
ThreadblockSwizzle threadblock_swizzle;
dim3 grid = threadblock_swizzle.get_grid_shape(params_.grid_tiled_shape);
dim3 block(Rank2Kkernel::kThreadCount, 1, 1);
int smem_size = int(sizeof(typename Rank2Kkernel::SharedStorage));
cutlass::Kernel<Rank2Kkernel><<<grid, block, smem_size, stream>>>(params_);
cudaError_t result = cudaGetLastError();
return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal;
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for column-major output exchange operand.
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC,
/// Element type for internal accumulation
typename ElementAccumulator_,
/// Operator class tag
typename OperatorClass_,
/// Tag indicating architecture to tune for. This is the minimum SM that
/// supports the intended feature. The device kernel can be built
/// targeting any SM larger than this number.
typename ArchTag_,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape_,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape_,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape_,
/// Epilogue output operator
typename EpilogueOutputOp_,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle_,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Access granularity of A matrix in units of elements
int AlignmentA,
/// Access granularity of B matrix in units of elements
int AlignmentB,
/// If true, kernel supports split-K with serial reduction
bool SplitKSerial,
/// Operation performed by Rank2K update kernel
typename Operator_,
/// Complex elementwise transformation
ComplexTransform TransformA,
/// Complex elementwise transformation
ComplexTransform TransformB,
/// Blas3 computation mode (symmetric/hermitian)
BlasMode BlasMode_
>
class Rank2K<ElementA_, LayoutA_, ElementB_, LayoutB_, ElementC_,
layout::ColumnMajor, // partially specialized on LayoutC
FillModeC, ElementAccumulator_, OperatorClass_, ArchTag_, ThreadblockShape_,
WarpShape_, InstructionShape_, EpilogueOutputOp_,
ThreadblockSwizzle_, Stages, AlignmentA, AlignmentB,
SplitKSerial, Operator_, TransformA, TransformB, BlasMode_> {
public:
using ElementA = ElementA_;
using LayoutA = LayoutA_;
using ElementB = ElementB_;
using LayoutB = LayoutB_;
using ElementC = ElementC_;
using LayoutC = layout::ColumnMajor;
using ElementAccumulator = ElementAccumulator_;
using OperatorClass = OperatorClass_;
using ArchTag = ArchTag_;
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using EpilogueOutputOp = EpilogueOutputOp_;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using Operator = Operator_;
static FillMode const kFillModeC = FillModeC;
static int const kStages = Stages;
static int const kAlignmentA = AlignmentA;
static int const kAlignmentB = AlignmentB;
static int const kAlignmentC = EpilogueOutputOp::kCount;
static bool const kSplitKSerial = SplitKSerial;
static BlasMode const kBlasMode = BlasMode_;
static ComplexTransform const kTransformA = TransformA;
static ComplexTransform const kTransformB = TransformB;
static int const kUpdateRank = 2;
/// Define the kernel
using UnderlyingOperator = typename cutlass::gemm::device::Rank2K<
ElementB,
LayoutB,
ElementA,
LayoutA,
ElementC,
layout::RowMajor,
InvertFillMode<FillModeC>::mode,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
kStages,
kAlignmentB,
kAlignmentA,
kSplitKSerial,
Operator,
kTransformA,
kTransformB,
kBlasMode
>;
/// Argument structure
using Arguments = typename UnderlyingOperator::Arguments;
using Rank2Kkernel = typename UnderlyingOperator::Rank2Kkernel;
private:
UnderlyingOperator underlying_operator_;
public:
/// Constructs the Rank2K.
Rank2K() { }
/// Helper to construct a transposed equivalent for the underying Rank2K operator
static Arguments to_underlying_arguments(Arguments const &args) {
return args.transposed_problem();
}
/// Determines whether the Rank2K can execute the given problem.
static Status can_implement(Arguments const &args) {
return UnderlyingOperator::can_implement(to_underlying_arguments(args));
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
return UnderlyingOperator::get_workspace_size(to_underlying_arguments(args));
}
/// Computes the grid shape
static dim3 get_grid_shape(Arguments const &args) {
return UnderlyingOperator::get_grid_shape(to_underlying_arguments(args));
}
/// Computes the maximum number of active blocks per multiprocessor
static int maximum_active_blocks(int smem_capacity = -1) {
return UnderlyingOperator::maximum_active_blocks(smem_capacity);
}
/// Initializes Rank2K state from arguments.
Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) {
return underlying_operator_.initialize(to_underlying_arguments(args), workspace, stream);
}
/// Lightweight update given a subset of arguments
Status update(Arguments const &args, void *workspace = nullptr) {
return underlying_operator_.update(to_underlying_arguments(args), workspace);
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
return underlying_operator_.run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace, stream);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace Rank2K
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/device/rank_2k.h/0 | {
"file_path": "include/cutlass/gemm/device/rank_2k.h",
"repo_id": "include",
"token_count": 6106
} | 31 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/arch/memory.h"
#include "cutlass/arch/cache_operation.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/numeric_conversion.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA_,
typename LayoutA_,
typename ElementB_,
typename ElementC_,
typename ElementAccumulator_,
typename EpilogueOutputOp_,
int kElementsPerAccess_ = 1, ///< Number of elements involved in a global access.
int kThreadCount_ = 0, ///< Number of threads in the thread block.
/// It will be calculated automatically if set to 0.
int kThreadsPerRow_ = 0 ///< Number of threads in the k dimension.
/// It will be calculated automatically if set to 0.
>
struct Gemv;
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Specializations
//
/////////////////////////////////////////////////////////////////////////////////////////////////
// GEMV for column-major A matrix
template <
typename ElementA_,
typename ElementB_,
typename ElementC_,
typename ElementAccumulator_,
typename EpilogueOutputOp_,
int kElementsPerAccess_,
int kThreadCount_,
int kThreadsPerRow_
>
struct Gemv <
ElementA_,
layout::ColumnMajor,
ElementB_,
ElementC_,
ElementAccumulator_,
EpilogueOutputOp_,
kElementsPerAccess_,
kThreadCount_,
kThreadsPerRow_
>{
public:
using ElementA = ElementA_;
using LayoutA = layout::ColumnMajor;
using TensorRefA = TensorRef<ElementA, LayoutA>;
using ElementB = ElementB_;
using ElementC = ElementC_;
using ElementAccumulator = ElementAccumulator_;
using EpilogueOutputOp = EpilogueOutputOp_;
static ComplexTransform const kTransformA = ComplexTransform::kNone;
static ComplexTransform const kTransformB = ComplexTransform::kNone;
// thread block shape (kThreadCount, 1, 1)
static int const kThreadCount = (kThreadCount_ <= 0) ? 32 : kThreadCount_;
static int const kThreadsPerRow = (kThreadsPerRow_ <= 0) ? 1 : kThreadsPerRow_;
static int const kStages = 1;
static int const kAlignmentA = 1;
static int const kAlignmentB = 1;
static int const kAlignmentC = 1;
//
// Structures
//
/// Argument structure
struct Arguments {
MatrixCoord problem_size;
int32_t batch_count;
typename EpilogueOutputOp::Params output_op;
TensorRefA ref_A;
ElementB const *ptr_B;
ElementC const *ptr_C;
ElementC *ptr_D;
int64_t inc_B;
int64_t inc_C;
int64_t inc_D;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_C;
int64_t batch_stride_D;
//
// Methods
//
Arguments(): batch_count(0) { }
Arguments(
MatrixCoord problem_size,
int batch_count,
typename EpilogueOutputOp::Params output_op,
TensorRefA ref_A,
void const *ptr_B,
void const *ptr_C,
void *ptr_D,
int64_t inc_B,
int64_t inc_C,
int64_t inc_D,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C,
int64_t batch_stride_D
):
problem_size(problem_size),
batch_count(batch_count),
output_op(output_op),
ref_A(ref_A),
ptr_B(static_cast<ElementB const *>(ptr_B)),
ptr_C(static_cast<ElementC const *>(ptr_C)),
ptr_D(static_cast<ElementC *>(ptr_D)),
inc_B(inc_B),
inc_C(inc_C),
inc_D(inc_D),
batch_stride_A(batch_stride_A),
batch_stride_B(batch_stride_B),
batch_stride_C(batch_stride_C),
batch_stride_D(batch_stride_D)
{ }
Arguments(
MatrixCoord problem_size,
int batch_count,
typename EpilogueOutputOp::Params output_op,
TensorRefA ref_A,
void const *ptr_B,
void const *ptr_C,
void *ptr_D,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C,
int64_t batch_stride_D
):
Arguments(
problem_size,
batch_count,
output_op,
ref_A,
ptr_B,
ptr_C,
ptr_D,
1,
1,
1,
batch_stride_A,
batch_stride_B,
batch_stride_C,
batch_stride_D)
{ }
Arguments(
MatrixCoord problem_size,
typename EpilogueOutputOp::Params output_op,
TensorRefA ref_A,
void const *ptr_B,
void const *ptr_C,
void *ptr_D,
int64_t inc_B,
int64_t inc_C,
int64_t inc_D
):
Arguments(
problem_size,
1,
output_op,
ref_A,
ptr_B,
ptr_C,
ptr_D,
inc_B,
inc_C,
inc_D,
1,
1,
1,
1)
{ }
Status update(Arguments const &args) {
output_op = args.output_op;
ref_A = ref_A;
ptr_B = args.ptr_B;
ptr_C = args.ptr_C;
ptr_D = args.ptr_D;
return Status::kSuccess;
}
};
using Params = Arguments;
/// Shared memory storage structure
union SharedStorage {
};
public:
//
// Methods
//
CUTLASS_DEVICE
Gemv() { }
/// Determines whether kernel satisfies alignment
static Status can_implement(cutlass::MatrixCoord const & problem_size) {
return Status::kSuccess;
}
static Status can_implement(Arguments const &args) {
return can_implement(args.problem_size);
}
/// Executes one GEMV
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Loop over batch indices
for (int batch_idx = blockIdx.z; batch_idx < params.batch_count; batch_idx += gridDim.z) {
int i = blockIdx.x * kThreadCount + threadIdx.x;
ElementA const *ptr_A = params.ref_A.data() + i;
ElementB const *ptr_B = params.ptr_B;
ptr_A += batch_idx * params.batch_stride_A;
ptr_B += batch_idx * params.batch_stride_B;
ElementAccumulator accum = ElementAccumulator();
// Compute inner product
CUTLASS_PRAGMA_NO_UNROLL
for (int k = 0; k < params.problem_size.column(); ++k) {
// Fetch from A
ElementA a = ElementA();
if (i < params.problem_size.row()) {
a = *ptr_A;
}
ptr_A += params.ref_A.stride(0);
// Fetch from B
ElementB b = *ptr_B;
ptr_B += params.inc_B;
// Math
accum += ElementAccumulator(a) * ElementAccumulator(b);
}
//
// Epilogue phase
//
ElementC const *ptr_C = params.ptr_C + i * params.inc_C + batch_idx * params.batch_stride_C;
ElementC *ptr_D = params.ptr_D + i * params.inc_D + batch_idx * params.batch_stride_D;
EpilogueOutputOp output_op(params.output_op);
typename EpilogueOutputOp::FragmentAccumulator accum_fragment;
typename EpilogueOutputOp::FragmentOutput source_fragment;
typename EpilogueOutputOp::FragmentOutput output_fragment;
accum_fragment[0] = accum;
if (i < params.problem_size.row()) {
if (output_op.is_source_needed()) {
source_fragment[0] = *ptr_C;
output_fragment = output_op(accum_fragment, source_fragment);
}
else {
output_fragment = output_op(accum_fragment);
}
*ptr_D = output_fragment[0];
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// GEMV for row-major A matrix
template <
typename ElementA_,
typename ElementB_,
typename ElementC_,
typename ElementAccumulator_,
typename EpilogueOutputOp_,
int kElementsPerAccess_,
int kThreadCount_,
int kThreadsPerRow_
>
struct Gemv <
ElementA_,
layout::RowMajor,
ElementB_,
ElementC_,
ElementAccumulator_,
EpilogueOutputOp_,
kElementsPerAccess_,
kThreadCount_,
kThreadsPerRow_
>{
public:
using ElementA = ElementA_;
using LayoutA = layout::RowMajor;
using TensorRefA = TensorRef<ElementA, LayoutA>;
using ElementB = ElementB_;
using ElementC = ElementC_;
using ElementAccumulator = ElementAccumulator_;
using EpilogueOutputOp = EpilogueOutputOp_;
static ComplexTransform const kTransformA = ComplexTransform::kNone;
static ComplexTransform const kTransformB = ComplexTransform::kNone;
static FloatRoundStyle const Round = cutlass::FloatRoundStyle::round_to_nearest;
// number of return elements in a global access
static int const kElementsPerAccess = kElementsPerAccess_;
using FragmentA = Array<ElementA, kElementsPerAccess>;
using FragmentB = Array<ElementB, kElementsPerAccess>;
using FragmentCompute = Array<ElementAccumulator, kElementsPerAccess>;
// thread block shape (kThreadsPerRow, kThreadCount / kThreadsPerRow, 1)
static int const kThreadCount = (kThreadCount_ <= 0) ? 128 : kThreadCount_;
static int const kThreadsPerRow = (kThreadsPerRow_ <= 0) ?
std::min(static_cast<int>(kThreadCount / (kElementsPerAccess * sizeof(ElementA))), 16)
: kThreadsPerRow_;
//
// Structures
//
/// Argument structure
struct Arguments {
MatrixCoord problem_size;
int32_t batch_count;
typename EpilogueOutputOp::Params output_op;
TensorRefA ref_A;
ElementB const *ptr_B;
ElementC const *ptr_C;
ElementC *ptr_D;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_C;
int64_t batch_stride_D;
//
// Methods
//
Arguments(): batch_count(0) { }
Arguments(
MatrixCoord problem_size,
int32_t batch_count,
typename EpilogueOutputOp::Params output_op,
TensorRefA ref_A,
void const *ptr_B,
void const *ptr_C,
void *ptr_D,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C,
int64_t batch_stride_D
):
problem_size(problem_size),
batch_count(batch_count),
output_op(output_op),
ref_A(ref_A),
ptr_B(static_cast<ElementB const *>(ptr_B)),
ptr_C(static_cast<ElementC const *>(ptr_C)),
ptr_D(static_cast<ElementC *>(ptr_D)),
batch_stride_A(batch_stride_A),
batch_stride_B(batch_stride_B),
batch_stride_C(batch_stride_C),
batch_stride_D(batch_stride_D)
{ }
Arguments(
MatrixCoord problem_size,
typename EpilogueOutputOp::Params output_op,
TensorRefA ref_A,
void const *ptr_B,
void const *ptr_C,
void *ptr_D
):
Arguments(
problem_size,
1,
output_op,
ref_A,
ptr_B,
ptr_C,
ptr_D,
1,
1,
1,
1)
{ }
Status update(Arguments const &args) {
problem_size = args.problem_size;
batch_count = args.batch_count;
output_op = args.output_op;
ref_A = ref_A;
ptr_B = args.ptr_B;
ptr_C = args.ptr_C;
ptr_D = args.ptr_D;
batch_stride_A = args.batch_stride_A;
batch_stride_B = args.batch_stride_B;
batch_stride_C = args.batch_stride_C;
batch_stride_D = args.batch_stride_D;
return Status::kSuccess;
}
};
using Params = Arguments;
/// Shared memory storage structure
union SharedStorage {
};
public:
//
// Methods
//
CUTLASS_DEVICE
Gemv() {}
/// Determines whether kernel satisfies alignment
static Status can_implement(cutlass::MatrixCoord const &problem_size) {
if (problem_size.column() % kElementsPerAccess != 0) {
return Status::kErrorMisalignedOperand;
}
return Status::kSuccess;
}
static Status can_implement(Arguments const &args) {
return can_implement(args.problem_size);
}
/// Executes one GEMV
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Loop over batch indices
for (int batch_idx = blockIdx.z; batch_idx < params.batch_count; batch_idx += gridDim.z) {
int idx_col_k = threadIdx.x;
int idx_row_m = blockIdx.x * blockDim.y + threadIdx.y;
if (idx_row_m < params.problem_size.row()) {
// problem_size (row = m, column = k)
// matrix A (batch, m, k)
// vector B (batch, 1, k)
// vector C (batch, m, 1)
// vector D (batch, m, 1)
// move in the batch dimension
ElementA const *ptr_A = params.ref_A.data() + batch_idx * params.batch_stride_A;
ElementB const *ptr_B = params.ptr_B + batch_idx * params.batch_stride_B;
ElementC const *ptr_C = params.ptr_C + batch_idx * params.batch_stride_C;
ElementC *ptr_D = params.ptr_D + batch_idx * params.batch_stride_D;
// move in the k dimension
ptr_A += idx_col_k * kElementsPerAccess;
ptr_B += idx_col_k * kElementsPerAccess;
// move in the m dimension
ptr_A += idx_row_m * params.problem_size.column();
ptr_C += idx_row_m;
ptr_D += idx_row_m;
NumericArrayConverter<ElementAccumulator, ElementA, kElementsPerAccess, Round> srcA_converter;
NumericArrayConverter<ElementAccumulator, ElementB, kElementsPerAccess, Round> srcB_converter;
ElementAccumulator accum = 0.f;
FragmentB fragB;
FragmentA fragA;
int unroll_col_k = 0;
// rows of the rolling tile
int const tileA_k = kThreadsPerRow * kElementsPerAccess;
for (; unroll_col_k < params.problem_size.column() / tileA_k * tileA_k; unroll_col_k += tileA_k) {
// fetch from matrix A
arch::global_load<FragmentA,
sizeof(FragmentA),
arch::CacheOperation::LastUse>(fragA, (ptr_A + unroll_col_k), true);
// fetch from vector B
arch::global_load<FragmentB,
sizeof(FragmentB),
arch::CacheOperation::Always>(fragB, (ptr_B + unroll_col_k), true);
FragmentCompute fragB_Compute = srcB_converter(fragB);
FragmentCompute fragA_Compute = srcA_converter(fragA);
// Math
CUTLASS_PRAGMA_UNROLL
for (int e = 0; e < kElementsPerAccess; e++) {
accum += fragA_Compute.at(e) * fragB_Compute.at(e);
}
}
// calculate the rest of K elements
// each thread fetch 1 element each time
for (int k = unroll_col_k + idx_col_k; k < params.problem_size.column(); k += kThreadsPerRow) {
ElementB b = *(ptr_B - idx_col_k * kElementsPerAccess + k);
ElementA a = *(ptr_A - idx_col_k * kElementsPerAccess + k);
accum += ElementAccumulator(a) * ElementAccumulator(b);
}
EpilogueOutputOp output_op(params.output_op);
typename EpilogueOutputOp::FragmentOutput source_fragment;
// prefetch from source matrix C
if (output_op.is_source_needed()) {
source_fragment[0] = *(ptr_C);
}
typename EpilogueOutputOp::FragmentAccumulator accum_fragment;
typename EpilogueOutputOp::FragmentOutput output_fragment;
for (int mask = (kThreadsPerRow >> 1); mask > 0; mask >>= 1) {
accum += __shfl_xor_sync(0xFFFFFFFF, accum, mask, 32);
}
if (idx_col_k == 0) {
accum_fragment[0] = accum;
if (output_op.is_source_needed()) {
output_fragment = output_op(accum_fragment, source_fragment);
}
else {
output_fragment = output_op(accum_fragment);
}
*ptr_D = output_fragment[0];
}
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/kernel/gemv.h/0 | {
"file_path": "include/cutlass/gemm/kernel/gemv.h",
"repo_id": "include",
"token_count": 7933
} | 32 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/kernel_hardware_info.hpp"
#include "cute/arch/cluster_sm90.hpp"
#include "cutlass/arch/reg_reconfig.h"
#include "cutlass/arch/mma_sm90.h"
#include "cutlass/epilogue/collective/detail.hpp"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/dispatch_policy.hpp"
#include "cutlass/gemm/kernel/sm90_tile_scheduler.hpp"
#include "cutlass/pipeline/pipeline.hpp"
#include "cute/tensor.hpp"
///////////////////////////////////////////////////////////////////////////////
namespace cutlass::gemm::kernel {
///////////////////////////////////////////////////////////////////////////////
template <
class ProblemShape_,
class CollectiveMainloop_,
class CollectiveEpilogue_,
class TileScheduler_
>
class GemmUniversal<
ProblemShape_,
CollectiveMainloop_,
CollectiveEpilogue_,
TileScheduler_,
cute::enable_if_t<cute::is_base_of_v<KernelCpAsyncWarpSpecialized, typename CollectiveMainloop_::DispatchPolicy::Schedule>>>
{
public:
//
// Type Aliases
//
using ProblemShape = ProblemShape_;
static_assert(cute::rank(ProblemShape{}) == 3 or cute::rank(ProblemShape{}) == 4,
"ProblemShape{} should be <M,N,K> or <M,N,K,L>");
// Mainloop derived types
using CollectiveMainloop = CollectiveMainloop_;
using TileShape = typename CollectiveMainloop::TileShape;
using TiledMma = typename CollectiveMainloop::TiledMma;
using ArchTag = typename CollectiveMainloop::ArchTag;
using ElementA = typename CollectiveMainloop::ElementA;
using StrideA = typename CollectiveMainloop::StrideA;
using ElementB = typename CollectiveMainloop::ElementB;
using StrideB = typename CollectiveMainloop::StrideB;
using DispatchPolicy = typename CollectiveMainloop::DispatchPolicy;
using ElementAccumulator = typename CollectiveMainloop::ElementAccumulator;
using ClusterShape = typename DispatchPolicy::ClusterShape;
using MainloopArguments = typename CollectiveMainloop::Arguments;
using MainloopParams = typename CollectiveMainloop::Params;
static_assert(ArchTag::kMinComputeCapability >= 90);
// Epilogue derived types
using CollectiveEpilogue = CollectiveEpilogue_;
using ElementC = typename CollectiveEpilogue::ElementC;
using StrideC = typename CollectiveEpilogue::StrideC;
using ElementD = typename CollectiveEpilogue::ElementD;
using StrideD = typename CollectiveEpilogue::StrideD;
using EpilogueArguments = typename CollectiveEpilogue::Arguments;
using EpilogueParams = typename CollectiveEpilogue::Params;
static_assert(cute::is_void_v<TileScheduler_> or cute::is_same_v<TileScheduler_, PersistentScheduler>,
"Non-persistent warp-specialized kernel does not support specializing the tile scheduler.");
using TileSchedulerTag = TileScheduler_;
using TileScheduler = typename detail::TileSchedulerSelector<
TileScheduler_, ArchTag, TileShape, ClusterShape>::Scheduler;
using TileSchedulerArguments = typename TileScheduler::Arguments;
// Kernel level shared memory storage
struct SharedStorage {
union TensorStorage {
using MainloopTensorStorage = typename CollectiveMainloop::TensorStorage;
using EpilogueTensorStorage = typename CollectiveEpilogue::TensorStorage;
MainloopTensorStorage mainloop;
EpilogueTensorStorage epilogue;
} tensors;
struct PipelineStorage : cute::aligned_struct<16> {
using MainloopPipelineStorage = typename CollectiveMainloop::PipelineStorage;
using EpiLoadPipelineStorage = typename CollectiveEpilogue::PipelineStorage;
alignas(16) MainloopPipelineStorage mainloop;
alignas(16) EpiLoadPipelineStorage epi_load;
} pipelines;
};
static constexpr int SharedStorageSize = sizeof(SharedStorage);
using GmemTiledCopyA = typename CollectiveMainloop::GmemTiledCopyA;
using GmemTiledCopyB = typename CollectiveMainloop::GmemTiledCopyB;
static_assert(cute::size(GmemTiledCopyA{}) == cute::size(GmemTiledCopyB{}), "Number of threads in A/B tiled copies must be the same.");
static constexpr uint32_t NumLoadWarpGroups = cute::size(GmemTiledCopyA{}) / NumThreadsPerWarpGroup;
static constexpr uint32_t NumMmaWarpGroups = cute::size(TiledMma{}) / NumThreadsPerWarpGroup;
static constexpr uint32_t NumWarpGroups = NumLoadWarpGroups + NumMmaWarpGroups;
static_assert(NumWarpGroups == 2 || NumWarpGroups == 3, "Number of warp groups must be 2 or 3 for good performance.");
static constexpr uint32_t MaxThreadsPerBlock = NumWarpGroups * NumThreadsPerWarpGroup;
static constexpr uint32_t MinBlocksPerMultiprocessor = 1;
// Device side arguments
struct Arguments {
GemmUniversalMode mode{};
ProblemShape problem_shape{};
MainloopArguments mainloop{};
EpilogueArguments epilogue{};
KernelHardwareInfo hw_info{};
TileSchedulerArguments scheduler{};
};
// Kernel entry point API
struct Params {
GemmUniversalMode mode{};
ProblemShape problem_shape{};
MainloopParams mainloop{};
EpilogueParams epilogue{};
};
//
// Methods
//
// Convert to underlying arguments. In this case, a simple copy for the aliased type.
static
Params
to_underlying_arguments(Arguments const& args, void* workspace) {
(void) workspace;
auto problem_shape = args.problem_shape;
if constexpr (detail::IF_SWAP_AB<CollectiveMainloop>::value) {
// swap M/N
get<0>(problem_shape) = get<1>(args.problem_shape);
get<1>(problem_shape) = get<0>(args.problem_shape);
}
return {
args.mode,
problem_shape,
CollectiveMainloop::to_underlying_arguments(args.problem_shape, args.mainloop, workspace),
CollectiveEpilogue::to_underlying_arguments(args.problem_shape, args.epilogue, workspace)
};
}
CUTLASS_HOST_DEVICE static
bool
can_implement(Arguments const& args) {
bool implementable = (args.mode == GemmUniversalMode::kGemm) or
(args.mode == GemmUniversalMode::kBatched && cute::rank(ProblemShape{}) == 4);
if (!implementable) {
CUTLASS_TRACE_HOST(" CAN IMPLEMENT: Arguments or Problem Shape don't meet the requirements.\n");
return implementable;
}
implementable &= CollectiveMainloop::can_implement(args.problem_shape, args.mainloop);
implementable &= CollectiveEpilogue::can_implement(args.problem_shape, args.epilogue);
implementable &= TileScheduler::can_implement(args.scheduler);
return implementable;
}
static
size_t
get_workspace_size(Arguments const& args) {
return 0;
}
static
cutlass::Status
initialize_workspace(Arguments const& args, void* workspace = nullptr, cudaStream_t stream = nullptr,
CudaHostAdapter* cuda_adapter = nullptr) {
return Status::kSuccess;
}
// Computes the kernel launch grid shape based on runtime parameters
static dim3
get_grid_shape(Params const& params) {
auto cluster_shape = Shape<_1,_1,_1>{};
auto tile_shape = TileShape{};
auto problem_shape_MNKL = append<4>(params.problem_shape, Int<1>{});
return TileScheduler::get_tiled_cta_shape_mnl(
problem_shape_MNKL, tile_shape, cluster_shape);
}
static dim3
get_block_shape() {
return dim3(MaxThreadsPerBlock, 1, 1);
}
CUTLASS_DEVICE
void
operator()(Params const& params, char* smem_buf) {
using namespace cute;
using X = Underscore;
// Any Tensor Op MMA Atom in the WGMMA ISA is arch conditional to sm90a.
#if ! defined(__CUDA_ARCH_FEAT_SM90_ALL)
printf("ERROR : Arch conditional MMA instruction used without targeting sm90a compute capability. Aborting.\n");
#else
enum class WarpGroupRole {
Producer = 0,
Consumer = 1,
};
// Kernel level shared memory storage
SharedStorage& shared_storage = *reinterpret_cast<SharedStorage*>(smem_buf);
int thread_idx = int(threadIdx.x);
int warp_group_thread_idx = thread_idx % NumThreadsPerWarpGroup;
int warp_group_idx = canonical_warp_group_idx();
CUTLASS_ASSERT(warp_group_idx < NumWarpGroups);
WarpGroupRole warp_group_role = warp_group_idx < NumLoadWarpGroups ? WarpGroupRole::Producer : WarpGroupRole::Consumer;
// Mainloop Load pipeline
using MainloopPipeline = typename CollectiveMainloop::MainloopPipeline;
typename MainloopPipeline::Params mainloop_pipeline_params;
if (warp_group_role == WarpGroupRole::Producer) {
mainloop_pipeline_params.role = MainloopPipeline::ThreadCategory::Producer;
}
if (warp_group_role == WarpGroupRole::Consumer) {
mainloop_pipeline_params.role = MainloopPipeline::ThreadCategory::Consumer;
}
mainloop_pipeline_params.producer_arv_count = NumLoadWarpGroups * NumThreadsPerWarpGroup;
mainloop_pipeline_params.consumer_arv_count = NumMmaWarpGroups * NumThreadsPerWarpGroup;
MainloopPipeline mainloop_pipeline(shared_storage.pipelines.mainloop, mainloop_pipeline_params);
// Epilogue Load pipeline
using EpiLoadPipeline = typename CollectiveEpilogue::LoadPipeline;
typename EpiLoadPipeline::Params epi_load_pipeline_params;
if (warp_group_role == WarpGroupRole::Producer) {
epi_load_pipeline_params.role = EpiLoadPipeline::ThreadCategory::Producer;
}
if (warp_group_role == WarpGroupRole::Consumer) {
epi_load_pipeline_params.role = EpiLoadPipeline::ThreadCategory::Consumer;
}
epi_load_pipeline_params.producer_arv_count = NumLoadWarpGroups * NumThreadsPerWarpGroup;
epi_load_pipeline_params.consumer_arv_count = NumMmaWarpGroups * NumThreadsPerWarpGroup;
EpiLoadPipeline epi_load_pipeline(shared_storage.pipelines.epi_load, epi_load_pipeline_params);
// Epilogue Store pipeline
using EpiStorePipeline = typename CollectiveEpilogue::StorePipeline;
typename EpiStorePipeline::Params epi_store_pipeline_params;
epi_store_pipeline_params.always_wait = true;
EpiStorePipeline epi_store_pipeline(epi_store_pipeline_params);
// Initialize starting pipeline states for the collectives
// Epilogue store pipe is producer-only (consumer is TMA unit, waits via scoreboarding)
typename CollectiveMainloop::PipelineState mainloop_pipe_consumer_state;
typename CollectiveEpilogue::LoadPipelineState epi_load_pipe_consumer_state;
// For the DMA Load (producer) we start with an opposite phase
// i.e., we skip all waits since we know that the buffer is indeed empty
PipelineState mainloop_pipe_producer_state = cutlass::make_producer_start_state<MainloopPipeline>();
PipelineState epi_load_pipe_producer_state = cutlass::make_producer_start_state<EpiLoadPipeline>();
PipelineState epi_store_pipe_producer_state = cutlass::make_producer_start_state<EpiStorePipeline>();
// Preconditions
static_assert(cute::rank(StrideA{}) == 3, "StrideA must be rank-3: [M, K, L]. If batch mode is not needed, set L stride to Int<0>.");
static_assert(cute::rank(StrideB{}) == 3, "StrideB must be rank-3: [N, K, L]. If batch mode is not needed, set L stride to Int<0>.");
static_assert(cute::rank(StrideC{}) == 3, "StrideC must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>.");
static_assert(cute::rank(StrideD{}) == 3, "StrideD must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>.");
// Separate out problem shape for convenience
// Optionally append 1s until problem shape is rank-4 in case its is only rank-3 (MNK)
auto problem_shape_MNKL = append<4>(params.problem_shape, Int<1>{});
auto M = get<0>(problem_shape_MNKL);
auto N = get<1>(problem_shape_MNKL);
auto K = get<2>(problem_shape_MNKL);
auto L = get<3>(problem_shape_MNKL);
// Represent the full tensors
Tensor mA_mkl = make_tensor(make_gmem_ptr(params.mainloop.ptr_A), make_shape(M,K,L), params.mainloop.dA); //(m,k,l)
Tensor mB_nkl = make_tensor(make_gmem_ptr(params.mainloop.ptr_B), make_shape(N,K,L), params.mainloop.dB); //(n,k,l)
// Get the appropriate blocks for this thread block -- potential for thread block locality
auto blk_shape = TileShape{}; // (BLK_M,BLK_N,BLK_K)
TiledMma tiled_mma;
// Make tiled views, defer the slice
Tensor gA_mkl = local_tile(mA_mkl, blk_shape, make_coord(_,_,_), Step<_1, X,_1>{}); // (BLK_M,BLK_K,m,k,l)
Tensor gB_nkl = local_tile(mB_nkl, blk_shape, make_coord(_,_,_), Step< X,_1,_1>{}); // (BLK_N,BLK_K,n,k,l)
// Compute m_coord, n_coord, and l_coord with their post-tiled shapes
auto m_coord = idx2crd(int(blockIdx.x), shape<2>(gA_mkl));
auto n_coord = idx2crd(int(blockIdx.y), shape<2>(gB_nkl));
auto l_coord = idx2crd(int(blockIdx.z), shape<4>(gB_nkl));
auto blk_coord = make_coord(m_coord, n_coord, _, l_coord);
// Slice with m_coord and n_coord
Tensor gA = gA_mkl(_,_,m_coord,_,l_coord); // (BLK_M,BLK_K,k)
Tensor gB = gB_nkl(_,_,n_coord,_,l_coord); // (BLK_N,BLK_K,k)
// Get pipeline iterators and increments from tensor shapes
auto k_tile_iter = cute::make_coord_iterator(shape<2>(gA));
auto k_tile_count = size<2>(gA);
auto c_tile_count = CollectiveEpilogue::get_load_pipe_increment(blk_shape);
auto d_tile_count = CollectiveEpilogue::get_store_pipe_increment(blk_shape);
// Wait for all threads in the thread block
__syncthreads();
// In a warp specialized kernel, collectives expose data movement and compute operations separately
CollectiveMainloop collective_mainloop;
CollectiveEpilogue collective_epilogue{params.epilogue, shared_storage.tensors.epilogue};
if (warp_group_role == WarpGroupRole::Producer) {
// Compute tile residues for predication
auto m_max_coord = M - size<0>(gA) * get<0>(blk_coord); // M - BLK_M * m_coord
auto n_max_coord = N - size<0>(gB) * get<1>(blk_coord); // N - BLK_N * n_coord
auto k_residue = K - size<1>(gA) * size<2>(gA); // K - BLK_K * k_coord_max
auto residue_mnk = make_tuple(m_max_coord, n_max_coord, k_residue);
collective_mainloop.load(
mainloop_pipeline,
mainloop_pipe_producer_state,
gA,
gB,
k_tile_iter, k_tile_count,
residue_mnk,
thread_idx,
shared_storage.tensors.mainloop
);
// Update starting mainloop pipeline state for the pipeline drain
mainloop_pipe_producer_state.advance(k_tile_count);
// Make sure mainloop consumer has been waited upon before issuing epilogue load
collective_mainloop.load_tail(mainloop_pipeline, mainloop_pipe_producer_state);
if (collective_epilogue.is_producer_load_needed()) {
epi_load_pipe_producer_state =
collective_epilogue.load(
epi_load_pipeline,
epi_load_pipe_producer_state,
problem_shape_MNKL,
blk_shape,
blk_coord,
tiled_mma,
thread_idx,
shared_storage.tensors.epilogue
);
collective_epilogue.load_tail(epi_load_pipeline, epi_load_pipe_producer_state);
}
}
else if (warp_group_role == WarpGroupRole::Consumer) {
Tensor accumulators = partition_fragment_C(tiled_mma, take<0,2>(blk_shape)); // (MMA,MMA_M,MMA_N)
collective_mainloop.mma(
mainloop_pipeline,
mainloop_pipe_consumer_state,
accumulators,
k_tile_count,
warp_group_thread_idx,
shared_storage.tensors.mainloop,
params.mainloop
);
// Make sure the math instructions are done and free buffers before entering the epilogue
collective_mainloop.mma_tail(
mainloop_pipeline,
mainloop_pipe_consumer_state,
k_tile_count
);
// Epilogue and write to gD
collective_epilogue.store(
epi_load_pipeline,
epi_load_pipe_consumer_state,
epi_store_pipeline,
epi_store_pipe_producer_state,
problem_shape_MNKL,
blk_shape,
blk_coord,
accumulators,
tiled_mma,
warp_group_thread_idx,
shared_storage.tensors.epilogue
);
}
#endif
}
};
///////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::gemm::kernel
| include/cutlass/gemm/kernel/sm90_gemm_warpspecialized.hpp/0 | {
"file_path": "include/cutlass/gemm/kernel/sm90_gemm_warpspecialized.hpp",
"repo_id": "include",
"token_count": 6828
} | 33 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates exposing architecture support for multiply-add operations
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/thread/mma.h"
#include "cutlass/functional.h"
#include "cutlass/reduction/thread/reduce.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
/// Structure to compute the matrix product for HFMA
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC,
/// Type of GEMM inner vs outer product
bool
>
struct Mma_HFMA2;
/////////////////////////////
// Specialization for NNN //
/////////////////////////////
template <typename Shape_>
struct Mma_HFMA2 <
Shape_,
layout::ColumnMajor,
layout::ColumnMajor,
layout::ColumnMajor,
true
> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// A operand storage
using FragmentA = Array<half_t, Shape::kMK>;
/// B operand storage
using FragmentB = Array<half_t, Shape::kKN>;
/// C operand storage
using FragmentC = Array<half_t, Shape::kMN>;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
static_assert(
!(Shape::kM % 2),
"Mma_HFMA2 requires the M dimension to be divisible by 2."
);
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
/// Initialize output with input
D = C;
/// Use 1x1x1 HFMA2 sequence for bulk of computation
using Mma = arch::Mma<
gemm::GemmShape<2,1,1>,
1,
half_t,
layout::ColumnMajor,
half_t,
layout::ColumnMajor,
half_t,
layout::ColumnMajor,
arch::OpMultiplyAdd>;
Array<half_t, 2> *ptr_D = reinterpret_cast<Array<half_t, 2> *>(&D);
Array<half_t, 2> const *ptr_A = reinterpret_cast<Array<half_t, 2> const *>(&A);
Array<half_t, 1> const *ptr_B = reinterpret_cast<Array<half_t, 1> const *>(&B);
Mma mma;
CUTLASS_PRAGMA_UNROLL
for(auto k=0; k < Shape::kK / Mma::Shape::kK; k++){
CUTLASS_PRAGMA_UNROLL
for(auto m=0; m < Shape::kM / Mma::Shape::kM; m++){
CUTLASS_PRAGMA_UNROLL
for(auto n=0; n < Shape::kN / Mma::Shape::kN; n++){
Array<half_t, 2> tmp { ptr_D[n*Shape::kM/2 + m] };
mma(
tmp,
ptr_A[k*Shape::kM/2 + m],
ptr_B[n*Shape::kK + k],
tmp);
ptr_D[n*Shape::kM/2 + m] = tmp;
}
}
}
}
};
/////////////////////////////
// Specialization for NNT //
/////////////////////////////
template <typename Shape_>
struct Mma_HFMA2<
Shape_,
layout::ColumnMajor,
layout::ColumnMajor,
layout::RowMajor,
true
> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// A operand storage
using FragmentA = Array<half_t, Shape::kMK>;
/// B operand storage
using FragmentB = Array<half_t, Shape::kKN>;
/// C operand storage
using FragmentC = Array<half_t, Shape::kMN>;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
static_assert(
!(Shape::kN % 2),
"Mma_HFMA2 requires the N dimension to be divisible by 2."
);
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
/// Initialize output with input
D = C;
/// Use 1x2x1 HFMA2 sequence for bulk of computation
using Mma = arch::Mma<
gemm::GemmShape<1,2,1>,
1,
half_t,
layout::ColumnMajor,
half_t,
layout::ColumnMajor,
half_t,
layout::RowMajor,
arch::OpMultiplyAdd>;
Array<half_t, 2> *ptr_D = reinterpret_cast<Array<half_t, 2> *>(&D);
Array<half_t, 1> const *ptr_A = reinterpret_cast<Array<half_t, 1> const *>(&A);
Array<half_t, 2> const *ptr_B = reinterpret_cast<Array<half_t, 2> const *>(&B);
Mma mma;
CUTLASS_PRAGMA_UNROLL
for(auto k=0; k < Shape::kK / Mma::Shape::kK; k++){
CUTLASS_PRAGMA_UNROLL
for(auto n=0; n < Shape::kN / Mma::Shape::kN; n++){
CUTLASS_PRAGMA_UNROLL
for(auto m=0; m < Shape::kM / Mma::Shape::kM; m++){
Array<half_t, 2> tmp { ptr_D[m*Shape::kN/2 + n] };
Array<half_t, 2> tmp_B;
tmp_B[0] = ptr_B->at(2*n*Shape::kK + k);
tmp_B[1] = ptr_B->at((2*n+1)*Shape::kK + k);
mma(
tmp,
ptr_A[k*Shape::kM + m],
tmp_B,
tmp);
ptr_D[m*Shape::kN/2 + n] = tmp;
}
}
}
}
};
/////////////////////////////
// Specialization for NTN //
/////////////////////////////
template <typename Shape_>
struct Mma_HFMA2 <
Shape_,
layout::ColumnMajor,
layout::RowMajor,
layout::ColumnMajor,
true
> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// A operand storage
using FragmentA = Array<half_t, Shape::kMK>;
/// B operand storage
using FragmentB = Array<half_t, Shape::kKN>;
/// C operand storage
using FragmentC = Array<half_t, Shape::kMN>;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
static_assert(
!(Shape::kM % 2),
"Mma_HFMA2 requires the GEMM M dimension to be divisible by 2."
);
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
/// Initialize output with input
D = C;
using Mma = arch::Mma<
gemm::GemmShape<2,1,1>,
1,
half_t,
layout::ColumnMajor,
half_t,
layout::RowMajor,
half_t,
layout::ColumnMajor,
arch::OpMultiplyAdd>;
Array<half_t, 2> *ptr_D = reinterpret_cast<Array<half_t, 2> *>(&D);
Array<half_t, 2> const *ptr_A = reinterpret_cast<Array<half_t, 2> const *>(&A);
Array<half_t, 1> const *ptr_B = reinterpret_cast<Array<half_t, 1> const *>(&B);
Mma mma;
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < Shape::kK / Mma::Shape::kK; ++k) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < Shape::kM / Mma::Shape::kM; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Shape::kN / Mma::Shape::kN; ++n) {
Array<half_t, 2> tmp { ptr_D[m + n * Shape::kM/2] };
mma(
tmp,
ptr_A[m + k * Shape::kM/2],
ptr_B[k * Shape::kN + n],
tmp);
ptr_D[m + n * Shape::kM/2] = tmp;
}
}
}
}
};
/////////////////////////////
// Specialization for NTT //
/////////////////////////////
template <typename Shape_>
struct Mma_HFMA2<
Shape_,
layout::ColumnMajor,
layout::RowMajor,
layout::RowMajor,
true
> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// A operand storage
using FragmentA = Array<half_t, Shape::kMK>;
/// B operand storage
using FragmentB = Array<half_t, Shape::kKN>;
/// C operand storage
using FragmentC = Array<half_t, Shape::kMN>;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
static_assert(
!(Shape::kN % 2),
"Mma_HFMA2 requires the N dimension to be divisible by 2."
);
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
/// Initialize output with input
D = C;
/// Use 1x2x1 HFMA2 sequence for bulk of computation
using Mma = arch::Mma<
gemm::GemmShape<1,2,1>,
1,
half_t,
layout::ColumnMajor,
half_t,
layout::RowMajor,
half_t,
layout::RowMajor,
arch::OpMultiplyAdd>;
Array<half_t, 2> *ptr_D = reinterpret_cast<Array<half_t, 2> *>(&D);
Array<half_t, 1> const *ptr_A = reinterpret_cast<Array<half_t, 1> const *>(&A);
Array<half_t, 2> const *ptr_B = reinterpret_cast<Array<half_t, 2> const *>(&B);
Mma mma;
CUTLASS_PRAGMA_UNROLL
for(auto k=0; k < Shape::kK / Mma::Shape::kK; k++){
CUTLASS_PRAGMA_UNROLL
for(auto n=0; n < Shape::kN / Mma::Shape::kN; n++){
CUTLASS_PRAGMA_UNROLL
for(auto m=0; m < Shape::kM / Mma::Shape::kM; m++){
Array<half_t, 2> tmp { ptr_D[m*Shape::kN/2 + n] };
mma(
tmp,
ptr_A[k*Shape::kM + m],
ptr_B[k*Shape::kN/2 + n],
tmp);
ptr_D[m*Shape::kN/2 + n] = tmp;
}
}
}
}
};
/////////////////////////////
// Specialization for TNN //
/////////////////////////////
template <typename Shape_>
struct Mma_HFMA2 <
Shape_,
layout::RowMajor,
layout::ColumnMajor,
layout::ColumnMajor,
true
> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// A operand storage
using FragmentA = Array<half_t, Shape::kMK>;
/// B operand storage
using FragmentB = Array<half_t, Shape::kKN>;
/// C operand storage
using FragmentC = Array<half_t, Shape::kMN>;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
static_assert(
!(Shape::kM % 2),
"Mma_HFMA2 requires the M dimension to be divisible by 2."
);
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
/// Initialize output with input
D = C;
/// Use 1x1x1 HFMA2 sequence for bulk of computation
using Mma = arch::Mma<
gemm::GemmShape<2,1,1>,
1,
half_t,
layout::RowMajor,
half_t,
layout::ColumnMajor,
half_t,
layout::ColumnMajor,
arch::OpMultiplyAdd>;
Array<half_t, 2> *ptr_D = reinterpret_cast<Array<half_t, 2> *>(&D);
Array<half_t, 2> const *ptr_A = reinterpret_cast<Array<half_t, 2> const *>(&A);
Array<half_t, 1> const *ptr_B = reinterpret_cast<Array<half_t, 1> const *>(&B);
Mma mma;
CUTLASS_PRAGMA_UNROLL
for(auto k=0; k < Shape::kK / Mma::Shape::kK; k++){
CUTLASS_PRAGMA_UNROLL
for(auto m=0; m < Shape::kM / Mma::Shape::kM; m++){
CUTLASS_PRAGMA_UNROLL
for(auto n=0; n < Shape::kN / Mma::Shape::kN; n++){
Array<half_t, 2> tmp { ptr_D[n*Shape::kM/2 + m] };
Array<half_t, 2> tmp_A;
tmp_A[0] = ptr_A->at(2*m*Shape::kK + k);
tmp_A[1] = ptr_A->at((2*m+1)*Shape::kK + k);
mma(
tmp,
tmp_A,
ptr_B[n*Shape::kK + k],
tmp);
ptr_D[n*Shape::kM/2 + m] = tmp;
}
}
}
}
};
/////////////////////////////
// Specialization for TNT //
/////////////////////////////
template <typename Shape_>
struct Mma_HFMA2 <
Shape_,
layout::RowMajor,
layout::ColumnMajor,
layout::RowMajor,
true
> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// A operand storage
using FragmentA = Array<half_t, Shape::kMK>;
/// B operand storage
using FragmentB = Array<half_t, Shape::kKN>;
/// C operand storage
using FragmentC = Array<half_t, Shape::kMN>;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
static_assert(
!(Shape::kN % 2),
"Mma_HFMA2 requires the N dimension to be divisible by 2."
);
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
/// Initialize output with input
D = C;
/// Use 1x2x1 HFMA2 sequence for bulk of computation
using Mma = arch::Mma<
gemm::GemmShape<1,2,1>,
1,
half_t,
layout::RowMajor,
half_t,
layout::ColumnMajor,
half_t,
layout::RowMajor,
arch::OpMultiplyAdd>;
Array<half_t, 2> *ptr_D = reinterpret_cast<Array<half_t, 2> *>(&D);
Array<half_t, 1> const *ptr_A = reinterpret_cast<Array<half_t, 1> const *>(&A);
Array<half_t, 2> const *ptr_B = reinterpret_cast<Array<half_t, 2> const *>(&B);
Mma mma;
CUTLASS_PRAGMA_UNROLL
for(auto k=0; k < Shape::kK / Mma::Shape::kK; k++){
CUTLASS_PRAGMA_UNROLL
for(auto n=0; n < Shape::kN / Mma::Shape::kN; n++){
CUTLASS_PRAGMA_UNROLL
for(auto m=0; m < Shape::kM / Mma::Shape::kM; m++){
Array<half_t, 2> tmp { ptr_D[m*Shape::kN/2 + n] };
Array<half_t, 2> tmp_B;
tmp_B[0] = ptr_B->at(2*n*Shape::kK + k);
tmp_B[1] = ptr_B->at((2*n+1)*Shape::kK + k);
mma(
tmp,
ptr_A[m*Shape::kK + k],
tmp_B,
tmp);
ptr_D[m*Shape::kN/2 + n] = tmp;
}
}
}
}
};
/////////////////////////////
// Specialization for TTN //
/////////////////////////////
template <typename Shape_>
struct Mma_HFMA2 <
Shape_,
layout::RowMajor,
layout::RowMajor,
layout::ColumnMajor,
true
> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// A operand storage
using FragmentA = Array<half_t, Shape::kMK>;
/// B operand storage
using FragmentB = Array<half_t, Shape::kKN>;
/// C operand storage
using FragmentC = Array<half_t, Shape::kMN>;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
static_assert(
!(Shape::kM % 2),
"Mma_HFMA2 requires the M dimension to be divisible by 2."
);
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
/// Initialize output with input
D = C;
/// Use 1x2x1 HFMA2 sequence for bulk of computation
using Mma = arch::Mma<
gemm::GemmShape<2,1,1>,
1,
half_t,
layout::RowMajor,
half_t,
layout::RowMajor,
half_t,
layout::ColumnMajor,
arch::OpMultiplyAdd>;
Array<half_t, 2> *ptr_D = reinterpret_cast<Array<half_t, 2> *>(&D);
Array<half_t, 2> const *ptr_A = reinterpret_cast<Array<half_t, 2> const *>(&A);
Array<half_t, 1> const *ptr_B = reinterpret_cast<Array<half_t, 1> const *>(&B);
Mma mma;
CUTLASS_PRAGMA_UNROLL
for(auto k=0; k < Shape::kK / Mma::Shape::kK; k++){
CUTLASS_PRAGMA_UNROLL
for(auto m=0; m < Shape::kM / Mma::Shape::kM; m++){
CUTLASS_PRAGMA_UNROLL
for(auto n=0; n < Shape::kN / Mma::Shape::kN; n++){
Array<half_t, 2> tmp { ptr_D[n*Shape::kM/2 + m] };
Array<half_t, 2> tmp_A;
tmp_A[0] = ptr_A->at(2*m*Shape::kK + k);
tmp_A[1] = ptr_A->at((2*m+1)*Shape::kK + k);
mma(
tmp,
tmp_A,
ptr_B[k*Shape::kN + n],
tmp);
ptr_D[n*Shape::kM/2 + m] = tmp;
}
}
}
}
};
/////////////////////////////
// Specialization for TTT //
/////////////////////////////
template <typename Shape_>
struct Mma_HFMA2<
Shape_,
layout::RowMajor,
layout::RowMajor,
layout::RowMajor,
true
> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// A operand storage
using FragmentA = Array<half_t, Shape::kMK>;
/// B operand storage
using FragmentB = Array<half_t, Shape::kKN>;
/// C operand storage
using FragmentC = Array<half_t, Shape::kMN>;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
static_assert(
!(Shape::kN % 2),
"Mma_HFMA2 requires the N dimension to be divisible by 2."
);
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
/// Initialize output with input
D = C;
/// Use 1x2x1 HFMA2 sequence for bulk of computation
using Mma = arch::Mma<
gemm::GemmShape<1,2,1>,
1,
half_t,
layout::RowMajor,
half_t,
layout::RowMajor,
half_t,
layout::RowMajor,
arch::OpMultiplyAdd>;
Array<half_t, 2> *ptr_D = reinterpret_cast<Array<half_t, 2> *>(&D);
Array<half_t, 1> const *ptr_A = reinterpret_cast<Array<half_t, 1> const *>(&A);
Array<half_t, 2> const *ptr_B = reinterpret_cast<Array<half_t, 2> const *>(&B);
Mma mma;
CUTLASS_PRAGMA_UNROLL
for(auto k=0; k < Shape::kK / Mma::Shape::kK; k++){
CUTLASS_PRAGMA_UNROLL
for(auto n=0; n < Shape::kN / Mma::Shape::kN; n++){
CUTLASS_PRAGMA_UNROLL
for(auto m=0; m < Shape::kM / Mma::Shape::kM; m++){
Array<half_t, 2> tmp { ptr_D[m*Shape::kN/2 + n] };
mma(
tmp,
ptr_A[m*Shape::kK + k],
ptr_B[k*Shape::kN/2 + n],
tmp);
ptr_D[m*Shape::kN/2 + n] = tmp;
}
}
}
}
};
/////////////////////////////////////////////////////////////////////
// Specialization for TNT + Inner Product or 1x1x2K + LayoutC = T //
/////////////////////////////////////////////////////////////////////
template <typename Shape_, typename LayoutA, typename LayoutB>
struct Mma_HFMA2<
Shape_,
LayoutA,
LayoutB,
layout::RowMajor,
false
> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// A operand storage
using FragmentA = Array<half_t, Shape::kMK>;
/// B operand storage
using FragmentB = Array<half_t, Shape::kKN>;
/// C operand storage
using FragmentC = Array<half_t, Shape::kMN>;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
static_assert(
!(Shape::kK % 2),
"Mma_HFMA2 requires the K dimension to be divisible by 2."
);
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
/// Initialize output with input
D = C;
/// Use 1x1x2 HFMA2 sequence for bulk of computation
using GemmShape = gemm::GemmShape<1,1,2>;
Array<half_t, 1> *ptr_D = reinterpret_cast<Array<half_t, 1> *>(&D);
Array<half_t, 2> const *ptr_A = reinterpret_cast<Array<half_t, 2> const *>(&A);
Array<half_t, 2> const *ptr_B = reinterpret_cast<Array<half_t, 2> const *>(&B);
// Inner product is calculated using MACs, followed by final reduction
multiply_add<Array<half_t, 2>> mac;
cutlass::reduction::thread::Reduce< plus<half_t>, Array<half_t, 2> > reduce;
CUTLASS_PRAGMA_UNROLL
for(auto n=0; n < Shape::kN / GemmShape::kN; n++){
CUTLASS_PRAGMA_UNROLL
for(auto m=0; m < Shape::kM / GemmShape::kM; m++){
Array<half_t, 2> tmp_C;
tmp_C.clear();
Array<half_t, 1> *ptr_tmp_C = reinterpret_cast<Array<half_t, 1> *>(&tmp_C);
ptr_tmp_C[0] = ptr_D[n*Shape::kM + m];
CUTLASS_PRAGMA_UNROLL
for(auto k=0; k < Shape::kK / GemmShape::kK; k++){
tmp_C = mac(ptr_A[m*Shape::kK/2 + k], ptr_B[n*Shape::kK/2 + k], tmp_C);
}
Array<half_t, 1> res;
Array<half_t, 1> *ptr_res = &res;
res = reduce(tmp_C);
ptr_D[m*Shape::kN + n] = ptr_res[0];
}
}
}
};
/////////////////////////////////////////////////////////////////////
// Specialization for TNN + Inner Product or 1x1x2K + LayoutC = N //
/////////////////////////////////////////////////////////////////////
template <typename Shape_, typename LayoutA, typename LayoutB>
struct Mma_HFMA2<
Shape_,
LayoutA,
LayoutB,
layout::ColumnMajor,
false
> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// A operand storage
using FragmentA = Array<half_t, Shape::kMK>;
/// B operand storage
using FragmentB = Array<half_t, Shape::kKN>;
/// C operand storage
using FragmentC = Array<half_t, Shape::kMN>;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
static_assert(
!(Shape::kK % 2),
"Mma_HFMA2 requires the K dimension to be divisible by 2."
);
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
/// Initialize output with input
D = C;
/// Use 1x1x2 HFMA2 sequence for bulk of computation
using GemmShape= gemm::GemmShape<1,1,2>;
Array<half_t, 1> *ptr_D = reinterpret_cast<Array<half_t, 1> *>(&D);
Array<half_t, 2> const *ptr_A = reinterpret_cast<Array<half_t, 2> const *>(&A);
Array<half_t, 2> const *ptr_B = reinterpret_cast<Array<half_t, 2> const *>(&B);
// Inner product is calculated using MACs, followed by final reduction
multiply_add<Array<half_t, 2>> mac;
cutlass::reduction::thread::Reduce< plus<half_t>, Array<half_t, 2> > reduce;
CUTLASS_PRAGMA_UNROLL
for(auto n=0; n < Shape::kN / GemmShape::kN; n++){
CUTLASS_PRAGMA_UNROLL
for(auto m=0; m < Shape::kM / GemmShape::kM; m++){
Array<half_t, 2> tmp_C;
tmp_C.clear();
Array<half_t, 1> *ptr_tmp_C = reinterpret_cast<Array<half_t, 1> *>(&tmp_C);
ptr_tmp_C[0] = ptr_D[n*Shape::kM + m];
CUTLASS_PRAGMA_UNROLL
for(auto k=0; k < Shape::kK / GemmShape::kK; k++){
tmp_C = mac(ptr_A[m*Shape::kK/2 + k], ptr_B[n*Shape::kK/2 + k], tmp_C);
}
Array<half_t, 1> res;
Array<half_t, 1> *ptr_res = &res;
res = reduce(tmp_C);
ptr_D[n*Shape::kM + m] = ptr_res[0];
}
}
}
};
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_, typename LayoutA, typename LayoutB, typename LayoutC
>
struct Mma<
Shape_,
half_t,
LayoutA,
half_t,
LayoutB,
half_t,
LayoutC,
arch::OpMultiplyAdd
> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// Data type of operand A
using ElementA = half_t;
/// Data type of operand B
using ElementB = half_t;
/// Element type of operand C
using ElementC = half_t;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
/// A operand storage
using FragmentA = Array<ElementA, Shape::kMK>;
/// B operand storage
using FragmentB = Array<ElementB, Shape::kKN>;
/// C operand storage
using FragmentC = Array<ElementC, Shape::kMN>;
static bool const a_row_major = platform::is_same< LayoutA, layout::RowMajor>::value;
static bool const b_column_major = platform::is_same< LayoutB, layout::ColumnMajor>::value;
static bool const c_row_major = platform::is_same< LayoutC, layout::RowMajor>::value;
static bool const c_column_major = platform::is_same< LayoutC, layout::ColumnMajor>::value;
static bool const m_mod2 = !(Shape::kM % 2);
static bool const n_mod2 = !(Shape::kN % 2);
static bool const k_mod2 = !(Shape::kK % 2);
// HFMA based MMA optimizations are of 2 types :
// 1. Inner product
// 2. Outer product
// It is chosen based on LayoutC (for outer product gemm) or
// Using LayoutA and LayoutB or shape=1x1x2K (for inner product gemms)
// If all fails, we choose the generic MMA
static bool const use_outer_prod = (c_column_major && m_mod2) || (c_row_major && n_mod2);
static bool const use_inner_prod = (a_row_major && b_column_major && k_mod2) || (Shape::kM==1 && Shape::kN==1 && k_mod2);
static bool const use_optimized = (use_outer_prod || use_inner_prod);
using ArchMmaOperator = typename platform::conditional< use_optimized,
detail::Mma_HFMA2<Shape, LayoutA, LayoutB, LayoutC, use_outer_prod>,
MmaGeneric <Shape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, Operator>
>::type;
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
ArchMmaOperator mma;
mma(D, A, B, C);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
/// Determines whether to enable thread::Gemm<> specializations compatible with SM50
template <
typename LayoutA,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB>
struct EnableMma_Crow_SM60 {
static bool const kIsConventionalLayout =
(platform::is_same<LayoutA, layout::RowMajor>::value ||
platform::is_same<LayoutA, layout::ColumnMajor>::value) &&
(platform::is_same<LayoutB, layout::RowMajor>::value ||
platform::is_same<LayoutB, layout::ColumnMajor>::value);
static bool const value = kIsConventionalLayout;
};
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes matrix product when C is row-major
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
typename LayoutA_,
typename LayoutB_
>
struct Mma<
Shape_,
half_t,
LayoutA_,
half_t,
LayoutB_,
half_t,
layout::RowMajor,
arch::OpMultiplyAdd,
typename platform::enable_if<detail::EnableMma_Crow_SM60<
LayoutA_,
LayoutB_
>::value>::type>{
using Shape = Shape_;
using ElementA = half_t;
using LayoutA = LayoutA_;
using ElementB = half_t;
using LayoutB = LayoutB_;
using ElementC = half_t;
using LayoutC = layout::RowMajor;
using Operator = arch::OpMultiplyAdd;
using TransposeMma = Mma<
GemmShapeTranspose<Shape>,
half_t,
typename layout::LayoutTranspose<LayoutB>::type,
half_t,
typename layout::LayoutTranspose<LayoutA>::type,
half_t,
layout::ColumnMajor,
arch::OpMultiplyAdd,
bool>;
using FragmentA = Array<ElementA, Shape::kMK>;
using FragmentB = Array<ElementB, Shape::kKN>;
using FragmentC = Array<ElementC, Shape::kMN>;
using ArchMmaOperator = typename TransposeMma::ArchMmaOperator;
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
TransposeMma mma;
mma(D, B, A, C);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/thread/mma_sm60.h/0 | {
"file_path": "include/cutlass/gemm/thread/mma_sm60.h",
"repo_id": "include",
"token_count": 12422
} | 34 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a double-buffered threadblock-scoped GEMM kernel.
*/
#pragma once
#include "cutlass/aligned_buffer.h"
#include "cutlass/arch/memory.h"
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
#include "cutlass/gemm/threadblock/mma_base.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Iterates over tiles of A operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorA_,
/// Iterates over tiles of A operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorA_,
/// Cache operation for operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorB_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB_,
/// Cache operation for operand B
cutlass::arch::CacheOperation::Kind CacheOpB,
/// Data type of accumulator matrix
typename ElementC_,
/// Data type of accumulator matrix
typename LayoutC_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// Number of stages,
int Stages,
/// Use zfill or predicate for out-of-bound cp.async
SharedMemoryClearOption SharedMemoryClear = SharedMemoryClearOption::kNone,
/// Used for partial specialization
typename Enable = bool>
class MmaMultistage :
public MmaBase<Shape_, Policy_, Stages> {
public:
///< Base class
using Base = MmaBase<Shape_, Policy_, Stages>;
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
///< Iterates over tiles of A operand in global memory
using IteratorA = IteratorA_;
///< Iterates over tiles of B operand in global memory
using IteratorB = IteratorB_;
///< Data type of accumulator matrix
using ElementC = ElementC_;
///< Layout of accumulator matrix
using LayoutC = LayoutC_;
///< Policy describing tuning details
using Policy = Policy_;
using SmemIteratorA = SmemIteratorA_;
using SmemIteratorB = SmemIteratorB_;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB;
//
// Dependent types
//
/// Fragment of accumulator tile
using FragmentC = typename Policy::Operator::FragmentC;
/// Warp-level Mma
using Operator = typename Policy::Operator;
/// Minimum architecture is Sm80 to support cp.async
using ArchTag = arch::Sm80;
/// Complex transform on A operand
static ComplexTransform const kTransformA = Operator::kTransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB = Operator::kTransformB;
/// Internal structure exposed for introspection.
struct Detail {
/// Number of cp.async instructions to load one stage of operand A
static int const AsyncCopyIterationsPerStageA =
IteratorA::ThreadMap::Iterations::kCount;
/// Number of cp.async instructions to load one stage of operand B
static int const AsyncCopyIterationsPerStageB =
IteratorB::ThreadMap::Iterations::kCount;
/// Number of stages
static int const kStages = Stages;
/// Number of cp.async instructions to load on group of operand A
static int const kAccessesPerGroupA =
(AsyncCopyIterationsPerStageA + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations;
/// Number of cp.async instructions to load on group of operand B
static int const kAccessesPerGroupB =
(AsyncCopyIterationsPerStageB + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations;
// Optional staged-accumulation (e.g., tf32x3 kernels) for improved numerical
// accuracy, where each mainloop iteration first accumulates into a temporary
// set of freshly-cleared accumulators, which are subsequently added to the
// final accumulator set.
static bool const kStagedAccumulation = arch::detail::UseStagedAccumulation<Operator>::value;
};
private:
// Structure encapsulating pipeline state live from one iteration to the next
struct PipeState {
using WarpLoadedFragmentA = typename Operator::FragmentA;
using WarpLoadedFragmentB = typename Operator::FragmentB;
using WarpTransformedFragmentA = typename Operator::TransformedFragmentA;
using WarpTransformedFragmentB = typename Operator::TransformedFragmentB;
/// Temporary accumulator to facilitate staged-accumulation
FragmentC tmp_accum_;
/// Pair of A fragments used to overlap shared memory loads and math instructions
WarpLoadedFragmentA warp_loaded_frag_A_[2];
WarpTransformedFragmentA warp_transformed_frag_A_[2];
/// Pair of B fragments used to overlap shared memory loads and math instructions
WarpLoadedFragmentB warp_loaded_frag_B_[2];
WarpTransformedFragmentB warp_transformed_frag_B_[2];
};
private:
//
// Data members
//
/// Warp-level MMA operator
Operator warp_mma_;
/// Iterator to write threadblock-scoped tile of A operand to shared memory
SmemIteratorA smem_iterator_A_;
/// Iterator to write threadblock-scoped tile of B operand to shared memory
SmemIteratorB smem_iterator_B_;
/// Shared memory write stage index
int smem_write_stage_idx_;
/// Shared memory read stage index
int smem_read_stage_idx_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
MmaMultistage(
///< Shared storage needed for internal use by threadblock-scoped GEMM
typename Base::SharedStorage &shared_storage,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx
):
Base(shared_storage, thread_idx, warp_idx, lane_idx),
smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx),
smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx),
smem_write_stage_idx_(0),
smem_read_stage_idx_(0)
{
// Compute warp location within threadblock tile by mapping the warp_id to
// three coordinates:
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_m = warp_idx_mn % Base::WarpCount::kM;
int warp_idx_n = warp_idx_mn / Base::WarpCount::kM;
// Add per-warp offsets in units of warp-level tiles
this->warp_tile_iterator_A_.add_tile_offset(
{warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
this->warp_tile_iterator_B_.add_tile_offset(
{Base::kWarpGemmIterations * warp_idx_k, warp_idx_n});
}
/// Advance shared memory read-iterators to the next stage
CUTLASS_DEVICE
void advance_smem_read_stage()
{
++smem_read_stage_idx_;
if (smem_read_stage_idx_ == Base::kStages) {
// Wrap back around to the 'start' of the circular buffer in shared memory
this->warp_tile_iterator_A_.add_tile_offset({0, -Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations});
this->warp_tile_iterator_B_.add_tile_offset({-Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations, 0});
smem_read_stage_idx_ = 0;
}
}
/// Advance global memory read-iterators and shared memory write-iterators to the stage
CUTLASS_DEVICE
void advance_smem_write_stage(
IteratorA &iterator_A,
IteratorB &iterator_B)
{
// Advance global iterators
iterator_A.add_tile_offset({0, 1});
iterator_B.add_tile_offset({1, 0});
// Advance shared iterators
smem_iterator_A_.add_tile_offset({0, 1});
smem_iterator_B_.add_tile_offset({1, 0});
// Increment shared memory write stage index
++smem_write_stage_idx_;
if (smem_write_stage_idx_ == Base::kStages) {
// Wrap back around to the 'start' of the circular buffer in shared memory
smem_iterator_A_.add_tile_offset({0, -Base::kStages});
smem_iterator_B_.add_tile_offset({-Base::kStages, 0});
smem_write_stage_idx_ = 0;
}
}
CUTLASS_DEVICE
void copy_tiles_and_advance(IteratorA &iterator_A, IteratorB &iterator_B,
int group_start_A = 0, int group_start_B = 0) {
iterator_A.set_iteration_index(group_start_A *
IteratorA::kAccessesPerVector);
this->smem_iterator_A_.set_iteration_index(group_start_A);
// Async Copy for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupA; ++j) {
if (group_start_A + j < Detail::AsyncCopyIterationsPerStageA) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(
this->smem_iterator_A_.get());
int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value *
IteratorA::ThreadMap::kElementsPerAccess /
IteratorA::kAccessesPerVector / 8;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) {
auto gmem_ptr = iterator_A.get();
if (SharedMemoryClear == SharedMemoryClearOption::kZfill) {
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>(
dst_ptr + v, gmem_ptr, iterator_A.valid());
} else {
cutlass::arch::cp_async<kSrcBytes, kCacheOpA>(
dst_ptr + v, gmem_ptr, iterator_A.valid());
}
++iterator_A;
}
++this->smem_iterator_A_;
}
}
iterator_B.set_iteration_index(group_start_B *
IteratorB::kAccessesPerVector);
this->smem_iterator_B_.set_iteration_index(group_start_B);
// Async Copy for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupB; ++j) {
if (group_start_B + j < Detail::AsyncCopyIterationsPerStageB) {
typename IteratorB::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB::AccessType *>(
this->smem_iterator_B_.get());
int const kSrcBytes = sizeof_bits<typename IteratorB::Element>::value *
IteratorB::ThreadMap::kElementsPerAccess /
IteratorB::kAccessesPerVector / 8;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) {
auto gmem_ptr = iterator_B.get();
if (SharedMemoryClear == SharedMemoryClearOption::kZfill) {
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>(
dst_ptr + v, gmem_ptr, iterator_B.valid());
} else {
cutlass::arch::cp_async<kSrcBytes, kCacheOpB>(
dst_ptr + v, gmem_ptr, iterator_B.valid());
}
++iterator_B;
}
++this->smem_iterator_B_;
}
}
}
/// GEMM prologue. Bootstrap the global->shared memory pipeline by fetching
/// the global fragments needed by the first kStages-1 threadblock mainloop iterations
CUTLASS_DEVICE
void prologue(
IteratorA &iterator_A, ///< [in|out] iterator over A operand in global memory
IteratorB &iterator_B, ///< [in|out] iterator over B operand in global memory
int &gemm_k_iterations) ///< [in|out] number of threadblock mainloop iterations remaining
{
// Issue several complete stages
CUTLASS_PRAGMA_UNROLL
for (int stage = 0; stage < Base::kStages - 1; ++stage, --gemm_k_iterations) {
// Disable global fetching if done with global fetch iterations
iterator_A.clear_mask(gemm_k_iterations == 0);
iterator_B.clear_mask(gemm_k_iterations == 0);
iterator_A.set_iteration_index(0);
this->smem_iterator_A_.set_iteration_index(0);
// Async Copy for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(
this->smem_iterator_A_.get());
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) {
int const kSrcBytes =
sizeof_bits<typename IteratorA::Element>::value *
IteratorA::ThreadMap::kElementsPerAccess /
IteratorA::kAccessesPerVector / 8;
int src_bytes = (iterator_A.valid() ? kSrcBytes : 0);
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>(
dst_ptr + v, iterator_A.get(), iterator_A.valid());
++iterator_A;
}
++this->smem_iterator_A_;
}
iterator_B.set_iteration_index(0);
this->smem_iterator_B_.set_iteration_index(0);
// Async Copy for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) {
typename IteratorB::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB::AccessType *>(
this->smem_iterator_B_.get());
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) {
int const kSrcBytes =
sizeof_bits<typename IteratorB::Element>::value *
IteratorB::ThreadMap::kElementsPerAccess /
IteratorB::kAccessesPerVector / 8;
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>(
dst_ptr + v, iterator_B.get(), iterator_B.valid());
++iterator_B;
}
++this->smem_iterator_B_;
}
// Move to the next write stage
advance_smem_write_stage(iterator_A, iterator_B);
// Defines the boundary of a stage of cp.async.
cutlass::arch::cp_async_fence();
}
// Optionally clear the remaining stages of SMEM. This is a functional requirement for
// some kernels so that all accumulator elements outside the GEMM footprint are zero.
if (SharedMemoryClear == SharedMemoryClearOption::kClearLastStage) {
/// Iterator to write threadblock-scoped tile of A operand to shared memory
SmemIteratorA last_smem_iterator_A(this->smem_iterator_A_);
typename IteratorA::AccessType zero_A;
zero_A.clear();
last_smem_iterator_A.set_iteration_index(0);
// Async Copy for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(
last_smem_iterator_A.get());
*dst_ptr = zero_A;
++last_smem_iterator_A;
}
/// Iterator to write threadblock-scoped tile of B operand to shared memory
SmemIteratorB last_smem_iterator_B(this->smem_iterator_B_);
typename IteratorB::AccessType zero_B;
zero_B.clear();
last_smem_iterator_B.set_iteration_index(0);
// Async Copy for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) {
typename IteratorB::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB::AccessType *>(
last_smem_iterator_B.get());
*dst_ptr = zero_B;
++last_smem_iterator_B;
}
}
}
/// Wait until we have at least one completed global fetch stage
CUTLASS_DEVICE
void gmem_wait()
{
// Wait until we have at least one committed global fetch stage. (#uncommitted = Base::kStages - 1 - #committed)
cutlass::arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
}
/// Perform a threadblock mainloop iteration of matrix multiply-accumulate
CUTLASS_DEVICE
void mac_loop_iter(
PipeState &pipe_state, ///< [in|out] loop-carried pipeline state
FragmentC &accum, ///< [in|out] destination accumulator tile
IteratorA &iterator_A, ///< [in|out] iterator over A operand in global memory
IteratorB &iterator_B, ///< [in|out] iterator over B operand in global memory
int &gemm_k_iterations) ///< [in|out] number of threadblock mainloop iterations remaining
{
// Unroll the warp-level MMA tiles of a threadblock's mainloop iteration
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k) {
// Load the next warp-tile's A fragment from shared memory
this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_A_.load(pipe_state.warp_loaded_frag_A_[(warp_mma_k + 1) % 2]);
++this->warp_tile_iterator_A_;
// Load the next warp-tile's B fragment from shared memory
this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_B_.load(pipe_state.warp_loaded_frag_B_[(warp_mma_k + 1) % 2]);
++this->warp_tile_iterator_B_;
// Except for the first warp-tile, all warp-tiles convert their incoming shared memory fragments as necessary
if (warp_mma_k > 0) {
warp_mma_.transform(
pipe_state.warp_transformed_frag_A_[warp_mma_k % 2],
pipe_state.warp_transformed_frag_B_[warp_mma_k % 2],
pipe_state.warp_loaded_frag_A_[warp_mma_k % 2],
pipe_state.warp_loaded_frag_B_[warp_mma_k % 2]);
}
// Execute the current warp-tile of MMA operations
if (Detail::kStagedAccumulation) {
warp_mma_(
pipe_state.tmp_accum_,
pipe_state.warp_transformed_frag_A_[warp_mma_k % 2],
pipe_state.warp_transformed_frag_B_[warp_mma_k % 2],
pipe_state.tmp_accum_
);
if (warp_mma_k == 0) {
plus<FragmentC> plus_accum;
accum = plus_accum(accum, pipe_state.tmp_accum_);
pipe_state.tmp_accum_.clear();
}
} else {
warp_mma_(
accum,
pipe_state.warp_transformed_frag_A_[warp_mma_k % 2],
pipe_state.warp_transformed_frag_B_[warp_mma_k % 2],
accum
);
}
// Except for the last warp-tile, all warp-tiles issue their share of
// global->shared fragment copies
if (warp_mma_k < Base::kWarpGemmIterations - 1) {
int group_start_iteration_A, group_start_iteration_B;
group_start_iteration_A = warp_mma_k * Detail::kAccessesPerGroupA;
group_start_iteration_B = warp_mma_k * Detail::kAccessesPerGroupB;
copy_tiles_and_advance(
iterator_A,
iterator_B,
group_start_iteration_A,
group_start_iteration_B);
}
// The second-to-last warp-tile also:
// - performs the last warp-tile's share of global->shared fragment copies
// - moves to the next global fetch stage
if (warp_mma_k + 2 == Base::kWarpGemmIterations) {
// Performs the last warp-tile's share of global->shared fragment copies
int group_start_iteration_A = (warp_mma_k + 1) * Detail::kAccessesPerGroupA;
int group_start_iteration_B = (warp_mma_k + 1) * Detail::kAccessesPerGroupB;
copy_tiles_and_advance(
iterator_A,
iterator_B,
group_start_iteration_A,
group_start_iteration_B);
// Inserts a memory fence between stages of cp.async instructions.
cutlass::arch::cp_async_fence();
// Wait until we have at least one completed global fetch stage
gmem_wait();
// Move to the next global fetch stage
advance_smem_write_stage(iterator_A, iterator_B);
advance_smem_read_stage();
// Disable global fetching when done with global fetch iterations
--gemm_k_iterations;
iterator_A.clear_mask(gemm_k_iterations == 0);
iterator_B.clear_mask(gemm_k_iterations == 0);
}
// The last warp-tile also converts the shared memory fragments used by
// the first warp-tile of the next iteration, if necessary (so we can
// immediately start issuing MMA instructions at the top of the loop )
if (warp_mma_k + 1 == Base::kWarpGemmIterations) {
warp_mma_.transform(
pipe_state.warp_transformed_frag_A_[(warp_mma_k + 1) % 2],
pipe_state.warp_transformed_frag_B_[(warp_mma_k + 1) % 2],
pipe_state.warp_loaded_frag_A_[(warp_mma_k + 1) % 2],
pipe_state.warp_loaded_frag_B_[(warp_mma_k + 1) % 2]);
}
}
}
/// Perform the specified number of threadblock mainloop iterations of matrix
/// multiply-accumulate. Assumes prologue has been initiated.
CUTLASS_DEVICE
void gemm_iters(
int gemm_k_iterations, ///< number of threadblock mainloop iterations
FragmentC &accum, ///< [in|out] accumulator tile
IteratorA &iterator_A, ///< [in|out] iterator over A operand in global memory
IteratorB &iterator_B) ///< [in|out] iterator over B operand in global memory
{
PipeState pipe_state;
// Disable global fetching if done with global fetch iterations
iterator_A.clear_mask(gemm_k_iterations == 0);
iterator_B.clear_mask(gemm_k_iterations == 0);
// Load first warp-tile's A fragment from shared memory
this->warp_tile_iterator_A_.set_kgroup_index(0);
this->warp_tile_iterator_A_.load(pipe_state.warp_loaded_frag_A_[0]);
++this->warp_tile_iterator_A_;
// Load first warp-tile's B fragment from shared memory
this->warp_tile_iterator_B_.set_kgroup_index(0);
this->warp_tile_iterator_B_.load(pipe_state.warp_loaded_frag_B_[0]);
++this->warp_tile_iterator_B_;
// Transform, if necessary, the first warp-tile's shared memory fragments
warp_mma_.transform(
pipe_state.warp_transformed_frag_A_[0],
pipe_state.warp_transformed_frag_B_[0],
pipe_state.warp_loaded_frag_A_[0],
pipe_state.warp_loaded_frag_B_[0]);
if (Detail::kStagedAccumulation) {
pipe_state.tmp_accum_.clear();
}
// Mainloop
CUTLASS_GEMM_LOOP
for (; gemm_k_iterations > (-Base::kStages + 1);) {
mac_loop_iter(
pipe_state,
accum,
iterator_A,
iterator_B,
gemm_k_iterations);
}
if (Detail::kStagedAccumulation) {
plus<FragmentC> plus_accum;
accum = plus_accum(accum, pipe_state.tmp_accum_);
}
// Commit and drain all pending and predicated cp.async pnz from the GEMM mainloop
cutlass::arch::cp_async_fence();
cutlass::arch::cp_async_wait<0>();
__syncthreads();
}
/// Prepares the class for another prologue.
CUTLASS_DEVICE
void wind_down()
{
// Catch-up the smem-read iterator to the smem-write iterator (so this class can be reused for another tile's prologue)
// First, increment remaining warp tiles to get to the next full stage. (Ideally we would
// just decrement one tile, but not all iterators implement --() decrement.)
#pragma unroll
for (int warp_mma_k = 1; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k)
{
this->warp_tile_iterator_A_.set_kgroup_index(warp_mma_k);
this->warp_tile_iterator_B_.set_kgroup_index(warp_mma_k);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
}
smem_read_stage_idx_++;
// Then wrap back two full stages (one for the tile advancing we just did, and one to catch the write iterators)
static const int kStageIters = Policy::kPartitionsK * Base::kWarpGemmIterations;
if (smem_read_stage_idx_ > 1)
{
this->warp_tile_iterator_A_.add_tile_offset({0, (-2 * kStageIters)});
this->warp_tile_iterator_B_.add_tile_offset({(-2 * kStageIters), 0});
}
else
{
this->warp_tile_iterator_A_.add_tile_offset({0, ((Base::kStages - 2) * kStageIters)});
this->warp_tile_iterator_B_.add_tile_offset({((Base::kStages - 2) * kStageIters), 0});
}
smem_read_stage_idx_ = smem_write_stage_idx_;
}
/// Perform a threadblock-scoped matrix multiply-accumulate
CUTLASS_DEVICE
void operator()(
///< problem size of GEMM
int gemm_k_iterations,
///< destination accumulator tile
FragmentC &accum,
///< iterator over A operand in global memory
IteratorA iterator_A,
///< iterator over B operand in global memory
IteratorB iterator_B,
///< initial value of accumulator
FragmentC const &src_accum) {
// Prologue (start fetching iterations of global fragments into shared memory)
prologue(iterator_A, iterator_B, gemm_k_iterations);
// Wait until we have at least one completed global fetch stage
gmem_wait();
// Initialize destination accumulators with source accumulators
accum = src_accum;
// Perform the MAC-iterations
gemm_iters(gemm_k_iterations, accum, iterator_A, iterator_B);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/threadblock/mma_multistage.h/0 | {
"file_path": "include/cutlass/gemm/threadblock/mma_multistage.h",
"repo_id": "include",
"token_count": 10949
} | 35 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing warp-level matrix multiply-accumulate operations targeting
Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/platform/platform.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/arch/mma_sm80.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/mma.h"
#include "cutlass/gemm/warp/mma_tensor_op_policy.h"
#include "cutlass/gemm/warp/mma_tensor_op.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
enum class TensorFloat32Op {
k3xTF32,
k4xTF32
};
template <
/// Floating-point rounding style
FloatRoundStyle RoundBigA_,
/// Floating-point rounding style
FloatRoundStyle RoundSmallA_,
/// Floating-point rounding style
FloatRoundStyle RoundBigB_ = RoundBigA_,
/// Floating-point rounding style
FloatRoundStyle RoundSmallB_ = RoundSmallA_,
/// Precision for TensorFloat32Op
// (k3xTF32: BigxBig, BigxSmall, SmallxBig)
// (k4xTF32: BigxBig, BigxSmall, SmallxBig, SmallxSmall)
TensorFloat32Op Precision_ = TensorFloat32Op::k3xTF32
>
struct FastF32 {
static FloatRoundStyle const kRoundBigA = RoundBigA_;
static FloatRoundStyle const kRoundSmallA = RoundSmallA_;
static FloatRoundStyle const kRoundBigB = RoundBigB_;
static FloatRoundStyle const kRoundSmallB = RoundSmallB_;
static TensorFloat32Op const kPrecision = Precision_;
};
namespace detail {
template<
int N,
FloatRoundStyle RoundBig = FloatRoundStyle::round_toward_zero,
FloatRoundStyle RoundSmall = FloatRoundStyle::round_half_ulp_truncate
>
struct ConvertAndPackAccurateF32 {
/// Rounding styles for big and small part
static FloatRoundStyle const kRoundBig = RoundBig;
static FloatRoundStyle const kRoundSmall = RoundSmall;
/// Converter type
using Converter = NumericConverterFastF32<kRoundBig, kRoundSmall>;
/// Source fragement
using SourceFragment = Array<float, N>;
/// Destination fragment
using DestinationFragment = Array<tfloat32_t, N>;
/// Converter Fragment holding two tfloat32_t elements for every float
using ConverterFragment = Array<tfloat32_t, 2>;
/// Index in fargments for the big and small part
static int const kBigIndex = 0;
static int const kSmallIndex = 1;
CUTLASS_HOST_DEVICE
void operator()(SourceFragment const &source,
DestinationFragment &dst_big,
DestinationFragment &dst_small) {
Converter convert_;
ConverterFragment result_;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
// convert source to result fragment
result_ = convert_(source[i]);
// store converted result fragments to destination fragment
dst_big[i] = result_[kBigIndex];
dst_small[i] = result_[kSmallIndex];
}
}
};
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename ElementA_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Data type of B elements
typename ElementB_,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Element type of C matrix
typename ElementC_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
typename Policy_,
/// Number of partitions along K dimension
int PartitionsK_ = 1,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor = false,
/// Used for partial specialization
typename Enable = bool
>
class MmaTensorOpFastF32;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for float*float+float => float using TF32 TensorOps
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
typename Policy_,
/// Number of partitions along K dimension
int PartitionsK_,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor,
/// Used for partial specialization
typename Enable
>
class MmaTensorOpFastF32<
Shape_,
float, LayoutA_,
float, LayoutB_,
float, LayoutC_,
Policy_, PartitionsK_,
AccumulatorsInRowMajor, Enable> {
public:
/// Shape of warp-level matrix operation (concept: GemmShape)
using Shape = Shape_;
/// Data type of multiplicand A
using ElementA = float;
/// Layout of multiplicand A
using LayoutA = LayoutA_;
/// Data type of multiplicand B
using ElementB = float;
/// Layout of multiplicand B
using LayoutB = LayoutB_;
/// Data type of accumulator matrix C
using ElementC = float;
/// Layout of accumulator matrix C
using LayoutC = LayoutC_;
/// Shape of the warp in units of thread (concept: MmaLanePolicySimt)
using Policy = Policy_;
/// Underlying matrix multiply operator (concept: arch::Mma)
using ArchMmaOperator = typename Policy::Operator;
/// Indicates math operator
using MathOperator = arch::OpMultiplyAddFastF32;
/// Architecture tag from underlying instruction
using ArchTag = typename ArchMmaOperator::ArchTag;
/// Indicates class of matrix operator
using OperatorClass = arch::OpClassTensorOp;
/// Shape of underlying instruction
using InstructionShape = typename ArchMmaOperator::Shape;
/// Complex transform on A operand
static ComplexTransform const kTransformA = ComplexTransform::kNone;
/// Complex transform on B operand
static ComplexTransform const kTransformB = ComplexTransform::kNone;
/// Number of threads participating in warp-level matrix product
static int const kThreadCount = 32;
/// Number of partitions along K dimension
static int const kPartitionsK = PartitionsK_;
/// Tune F32 to TF32 big small conversion for float operation
/// Different combination of big small conversin can cause different tradeoff
/// between speed and accuracy. Generally, use round_half_ulp_truncate can
/// improve the performance but hur the accuracy.
using MmaFastF32 = FastF32 <
FloatRoundStyle::round_toward_zero, // kRoundBigA
FloatRoundStyle::round_half_ulp_truncate, // kRoundSmallA
FloatRoundStyle::round_toward_zero, // kRoundBigB
FloatRoundStyle::round_half_ulp_truncate, // kRoundSmallB
TensorFloat32Op::k3xTF32 // Number of TF32 operations
>;
public:
/// Iterates over the A operand in memory
using IteratorA = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
Operand::kA,
ElementA,
LayoutA,
MatrixShape<ArchMmaOperator::Shape::kM, ArchMmaOperator::Shape::kK>,
Policy::OpDelta::kRow,
kThreadCount,
kPartitionsK
>;
/// Storage for A tile
using FragmentA = typename IteratorA::Fragment;
/// Storage for transformed A tile
using TransformedFragmentA =
Array<typename ArchMmaOperator::ElementA, FragmentA::kElements * 2>;
/// Fragment bisecting big and small sections
using AccessTypeFragmentA =
Array<typename ArchMmaOperator::ElementA, FragmentA::kElements>;
/// Iterates over the B operand in memory
using IteratorB = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
Operand::kB,
ElementB,
LayoutB,
MatrixShape<ArchMmaOperator::Shape::kK, ArchMmaOperator::Shape::kN>,
Policy::OpDelta::kRow,
kThreadCount,
kPartitionsK
>;
/// Storage for B tile
using FragmentB = typename IteratorB::Fragment;
/// Storage for transformed B tile
using TransformedFragmentB =
Array<typename ArchMmaOperator::ElementB, FragmentB::kElements * 2>;
/// Fragment bisecting big and small sections
using AccessTypeFragmentB =
Array<typename ArchMmaOperator::ElementB, FragmentB::kElements>;
/// Index in fargments for the big and small part
static int const kBigIndex = 0;
static int const kSmallIndex = 1;
/// Iterates over the C operand in memory
using IteratorC = MmaTensorOpAccumulatorTileIterator<
MatrixShape<Shape::kM, Shape::kN>, ElementC, LayoutC,
typename ArchMmaOperator::Shape, typename Policy::OpDelta>;
/// Storage for C tile
using FragmentC = typename IteratorC::Fragment;
/// Number of mma operations performed
using MmaIterations = MatrixShape<
(Shape::kM + ArchMmaOperator::Shape::kM - 1) / ArchMmaOperator::Shape::kM,
(Shape::kN + ArchMmaOperator::Shape::kN - 1) / ArchMmaOperator::Shape::kN
>;
public:
/// Underlying matrix multiply operator (concept: arch::Mma)
ArchMmaOperator mma;
public:
//
// Methods
//
/// Ctor
CUTLASS_DEVICE
MmaTensorOpFastF32() {}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
TransformedFragmentA const &A,
TransformedFragmentB const &B,
FragmentC const &C
) const {
AccessTypeFragmentA const *ptr_A = reinterpret_cast<AccessTypeFragmentA const*>(&A);
AccessTypeFragmentB const *ptr_B = reinterpret_cast<AccessTypeFragmentB const*>(&B);
//
// Accumulate in place
//
D = C;
mma_operator(D, ptr_A[kSmallIndex], ptr_B[kBigIndex], D);
mma_operator(D, ptr_A[kBigIndex], ptr_B[kSmallIndex], D);
mma_operator(D, ptr_A[kBigIndex], ptr_B[kBigIndex], D);
if (MmaFastF32::kPrecision == TensorFloat32Op::k4xTF32)
mma_operator(D, ptr_A[kSmallIndex], ptr_B[kSmallIndex], D);
}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void mma_operator(
FragmentC &D,
AccessTypeFragmentA const &A,
AccessTypeFragmentB const &B,
FragmentC const &C
) const {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
using MmaOperandA = typename ArchMmaOperator::FragmentA;
using MmaOperandB = typename ArchMmaOperator::FragmentB;
using MmaOperandC = typename ArchMmaOperator::FragmentC;
MmaOperandA const *ptr_A = reinterpret_cast<MmaOperandA const *>(&A);
MmaOperandB const *ptr_B = reinterpret_cast<MmaOperandB const *>(&B);
MmaOperandC *ptr_D = reinterpret_cast<MmaOperandC *>(&D);
// Serpentine visitation order maximizing reuse of Ra
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < MmaIterations::kRow; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
// This allows to reuse of Rb when at serpentine turns
int n_serpentine = ((m % 2) ? (MmaIterations::kColumn - 1 - n) : n);
if (AccumulatorsInRowMajor) { // matrix B is reordered
mma(
ptr_D[n_serpentine + m * MmaIterations::kColumn],
ptr_A[m],
ptr_B[n_serpentine],
ptr_D[n_serpentine + m * MmaIterations::kColumn]);
} else {
mma(
ptr_D[m + n_serpentine * MmaIterations::kRow],
ptr_A[m],
ptr_B[n_serpentine],
ptr_D[m + n_serpentine * MmaIterations::kRow]);
}
} // end n loop
} // end m loop
#else
assert(0);
#endif
}
/// Transform the mma operands to the required types
CUTLASS_DEVICE
void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B,
FragmentA const &A, FragmentB const &B) const {
//
// Define conversions from source type to instruction type
//
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
detail::ConvertAndPackAccurateF32<
FragmentA::kElements / 2,
MmaFastF32::kRoundBigA,
MmaFastF32::kRoundSmallA> convert_A;
detail::ConvertAndPackAccurateF32<
FragmentB::kElements,
MmaFastF32::kRoundBigB,
MmaFastF32::kRoundSmallB> convert_B;
Array<typename ArchMmaOperator::ElementB, FragmentB::kElements> *ptr_dst_B =
reinterpret_cast<Array<typename ArchMmaOperator::ElementB, FragmentB::kElements> *>(&dst_B);
convert_B(B, ptr_dst_B[0], ptr_dst_B[1]);
Array<typename ArchMmaOperator::ElementA, FragmentA::kElements / 2> *ptr_dst_A =
reinterpret_cast<Array<typename ArchMmaOperator::ElementA, FragmentA::kElements / 2> *>(&dst_A);
Array<ElementA, FragmentA::kElements / 2> const *ptr_A =
reinterpret_cast<Array<ElementA, FragmentA::kElements / 2> const *>(&A);
convert_A(ptr_A[0], ptr_dst_A[0], ptr_dst_A[2]);
convert_A(ptr_A[1], ptr_dst_A[1], ptr_dst_A[3]);
#else
assert(0);
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/warp/mma_tensor_op_fast_f32.h/0 | {
"file_path": "include/cutlass/gemm/warp/mma_tensor_op_fast_f32.h",
"repo_id": "include",
"token_count": 5508
} | 36 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines a canonical coordinate for rank=2 matrices offering named indices.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/coord.h"
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// MatrixCoord wraps Coord<2, int> to provide a helper for accessing named dimensions. Classes
/// expecting a coordinate in the rank=2 index space of a matrix should use MatrixCoord.
struct MatrixCoord : public Coord<2, int> {
public:
/// Integer-valued index
using Index = int;
/// Base type is a Coord of rank=2
using Base = Coord<2, Index>;
/// LongIndex type
using LongIndex = typename Base::LongIndex;
private:
/// Rows dimension
static int const kRow = 0;
/// Columns dimension
static int const kColumn = 1;
public:
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
MatrixCoord() { }
/// Constructs from Coord<2>
CUTLASS_HOST_DEVICE
MatrixCoord(Coord<2, Index> const &coord): Base(coord) { }
/// Helper to construct from a row and column
CUTLASS_HOST_DEVICE
MatrixCoord(Index row, Index column): Base(make_Coord(row, column)) { }
/// Helper to construct from a row and column, which are LongIndex based
CUTLASS_HOST_DEVICE
MatrixCoord(LongIndex row, LongIndex column): Base(make_Coord(Index(row), Index(column))) { }
/// Returns the row of the coordinate
CUTLASS_HOST_DEVICE
Index const & row() const { return this->at(kRow); }
/// Returns the row of the coordinate
CUTLASS_HOST_DEVICE
Index & row() { return this->at(kRow); }
/// Returns the column of the coordinate
CUTLASS_HOST_DEVICE
Index const & column() const { return this->at(kColumn); }
/// Returns the column of the coordinate
CUTLASS_HOST_DEVICE
Index & column() { return this->at(kColumn); }
//
// Coord operators
//
/// Element-wise addition
CUTLASS_HOST_DEVICE
MatrixCoord operator+(Base const& b) const {
return MatrixCoord(Base::operator+(b));
}
/// Element-wise subtraction
CUTLASS_HOST_DEVICE
MatrixCoord operator-(Base const& b) const {
return MatrixCoord(Base::operator-(b));
}
/// Element-wise multiplication
CUTLASS_HOST_DEVICE
MatrixCoord operator*(Base const& b) const {
return MatrixCoord(Base::operator*(b));
}
/// Element-wise division
CUTLASS_HOST_DEVICE
MatrixCoord operator/(Base const& b) const {
return MatrixCoord(Base::operator/(b));
}
/// In-place addition
CUTLASS_HOST_DEVICE
MatrixCoord& operator+=(Base const& b) {
Base::operator+=(b);
return *this;
}
/// In-place subtraction
CUTLASS_HOST_DEVICE
MatrixCoord& operator-=(Base const& b) {
Base::operator-=(b);
return *this;
}
/// In-place multiplication
CUTLASS_HOST_DEVICE
MatrixCoord& operator*=(Base const& b) {
Base::operator*=(b);
return *this;
}
/// In-place division
CUTLASS_HOST_DEVICE
MatrixCoord& operator/=(Base const& b) {
Base::operator/=(b);
return *this;
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
| include/cutlass/matrix_coord.h/0 | {
"file_path": "include/cutlass/matrix_coord.h",
"repo_id": "include",
"token_count": 1488
} | 37 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Kernel performing a final reduction for softmax
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/functional.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/arch/memory.h"
#include "cutlass/arch/memory_sm75.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace reduction {
namespace kernel {
template <
typename ElementNorm_,
typename ElementSum_,
typename ElementSoftmaxCompute_,
typename ThreadblockShape_,
bool GroupedProblem = false
>
class ApplySoftmaxFinalReduction {
public:
using ElementNorm = ElementNorm_;
using ElementSum = ElementSum_;
using ElementSoftmaxCompute = ElementSoftmaxCompute_;
using ThreadblockShape = ThreadblockShape_;
static const bool isGroupedProblem = GroupedProblem;
//
// Arguments
//
struct Arguments {
cutlass::gemm::GemmCoord* problem_sizes{nullptr};
cutlass::gemm::GemmCoord problem_size{};
ElementNorm* block_Norm{nullptr};
ElementSum* block_Sum{nullptr};
int64_t* offset_Norm_Device{nullptr};
int64_t* offset_Sum_Device{nullptr};
int64_t batch_stride_Max{0};
int64_t batch_stride_Sum{0};
//
// Methods
//
Arguments() { }
// Non-grouped constructor without batching
Arguments(
cutlass::gemm::GemmCoord problem_size,
ElementNorm* block_Norm,
ElementSum* block_Sum
):
problem_size(problem_size),
block_Norm(block_Norm),
block_Sum(block_Sum),
problem_sizes(nullptr),
offset_Norm_Device(nullptr),
offset_Sum_Device(nullptr),
batch_stride_Max(0),
batch_stride_Sum(0)
{
}
// Non-grouped constructor with batching
Arguments(
cutlass::gemm::GemmCoord problem_size,
ElementNorm* block_Norm,
ElementSum* block_Sum,
int64_t batch_stride_Max,
int64_t batch_stride_Sum
):
problem_size(problem_size),
block_Norm(block_Norm),
block_Sum(block_Sum),
batch_stride_Max(batch_stride_Max),
batch_stride_Sum(batch_stride_Sum),
problem_sizes(nullptr),
offset_Norm_Device(nullptr),
offset_Sum_Device(nullptr)
{
}
// Grouped constructor
Arguments(
cutlass::gemm::GemmCoord *problem_sizes,
ElementNorm* block_Norm,
ElementSum* block_Sum,
int64_t* offset_Norm_Device,
int64_t* offset_Sum_Device
):
problem_sizes(problem_sizes),
problem_size(cutlass::gemm::GemmCoord(0, 0, 0)),
block_Norm(block_Norm),
block_Sum(block_Sum),
offset_Norm_Device(offset_Norm_Device),
offset_Sum_Device(offset_Sum_Device)
{
}
};
struct SharedStorage {
};
//
// Params struct
//
struct Params {
Arguments args;
//
// Methods
//
Params() { }
Params(Arguments const &args_): args(args_) { }
};
private:
public:
CUTLASS_DEVICE
ApplySoftmaxFinalReduction() { }
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
apply(params, shared_storage);
}
private:
/// Full reduction
CUTLASS_DEVICE
void apply(Params const ¶ms, SharedStorage &shared_storage) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int bdim = blockDim.x;
int block_batch = blockIdx.z;
// defining three vars for a general reduction module
cutlass::gemm::GemmCoord problem_size = isGroupedProblem ? params.args.problem_sizes[bid] : params.args.problem_size;
int m_dim_in_loop = isGroupedProblem ? problem_size.m() : tid + bdim;
int access_offset = isGroupedProblem ? 0 : bid * bdim;
if (!isGroupedProblem && access_offset + tid >= problem_size.m()) return;
ElementNorm *curr_ptr_Max = isGroupedProblem ? \
params.args.block_Norm + params.args.offset_Norm_Device[bid] : \
params.args.block_Norm + block_batch * params.args.batch_stride_Max;
ElementSum *curr_ptr_Sum = isGroupedProblem ? \
params.args.block_Sum + params.args.offset_Sum_Device[bid] : \
params.args.block_Sum + block_batch * params.args.batch_stride_Sum;
int threadblock_num = (problem_size.n() + ThreadblockShape::kN - 1) / ThreadblockShape::kN;
using ConvertSumOutput = cutlass::NumericConverter<ElementSum, ElementSoftmaxCompute>;
using ConvertNormOutput = cutlass::NumericConverter<ElementNorm, ElementSoftmaxCompute>;
using ConvertSum = cutlass::NumericConverter<ElementSoftmaxCompute, ElementSum>;
using ConvertNorm = cutlass::NumericConverter<ElementSoftmaxCompute, ElementNorm>;
ConvertSum convert_sum;
ConvertNorm convert_norm;
ConvertSumOutput convert_sum_output;
ConvertNormOutput convert_norm_output;
uint32_t float_max_bits = 0xff7fffff;
float min_float = reinterpret_cast<float const &>(float_max_bits);
CUTLASS_PRAGMA_UNROLL
for (int idx_m = tid; idx_m < m_dim_in_loop; idx_m += bdim) {
ElementNorm *access_n = curr_ptr_Max + idx_m + access_offset;
ElementSum *access_s = curr_ptr_Sum + idx_m + access_offset;
ElementNorm *access_n_bak = access_n;
ElementSum *access_s_bak = access_s;
ElementSoftmaxCompute max_val = ElementSoftmaxCompute(min_float);
ElementSoftmaxCompute sum_val = ElementSoftmaxCompute(0);
ElementNorm fetch_n;
ElementSum fetch_s;
CUTLASS_PRAGMA_UNROLL
for (int idx_n = 0; idx_n < threadblock_num; idx_n++) {
cutlass::arch::global_load<ElementNorm, sizeof(ElementNorm)>(fetch_n, access_n, true);
max_val = cutlass::fast_max(max_val, convert_norm(fetch_n));
access_n += problem_size.m();
}
access_n = access_n_bak;
CUTLASS_PRAGMA_UNROLL
for (int idx_n = 0; idx_n < threadblock_num; idx_n++) {
cutlass::arch::global_load<ElementNorm, sizeof(ElementNorm)>(fetch_n, access_n, true);
cutlass::arch::global_load<ElementSum, sizeof(ElementSum)>(fetch_s, access_s, true);
sum_val += convert_sum(fetch_s) * cutlass::fast_exp(convert_norm(fetch_n) - max_val);
access_n += problem_size.m();
access_s += problem_size.m();
}
ElementSoftmaxCompute inv_sum = cutlass::constants::one<ElementSoftmaxCompute>() / sum_val;
access_n = access_n_bak;
access_s = access_s_bak;
access_n[0] = convert_norm_output(max_val);
access_s[0] = convert_sum_output(inv_sum);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace reduction
} // namespace cutlass
| include/cutlass/reduction/kernel/reduce_softmax_final.h/0 | {
"file_path": "include/cutlass/reduction/kernel/reduce_softmax_final.h",
"repo_id": "include",
"token_count": 3398
} | 38 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines a matrix object intended for storing data in registers and operations within
a CUDA thread.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/matrix_coord.h"
namespace cutlass {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Per-thread matrix object storing a packed matrix
template <
typename Element,
int Rows,
int Columns,
typename Layout = layout::RowMajor
>
class Matrix : public Array<Element, Rows * Columns> {
public:
// Verify layout refers to a rank=2 matrix.
static_assert(
Layout::kRank == 2,
"Layout type must refer to a rank=2 matrix");
/// Base type
using Base = Array<Element, Rows * Columns>;
/// Element type
using Element = Element_;
/// Number of rows
static int const kRows = Rows;
/// Number of columns
static int const kColumns = Columns;
/// Layout within the array
using Layout = Layout_;
/// Reference type to an element
using Reference = Element &;
/// Logical rank of tensor index space
static int const kRank = 2;
/// Index type
using Index = typename Layout::Index;
/// Long index used for pointer offsets
using LongIndex = typename Layout::LongIndex;
/// Coordinate in logical tensor space
using TensorCoord = typename Layout::TensorCoord;
/// Stride type
using Stride = typename Layout::Stride;
/// TensorRef to matrix object
using TensorRef = TensorRef<Element, kRank, Layout>;
/// TensorRef to constant matrix object
using ConstTensorRef = typename TensorRef::ConstTensorRef;
/// TensorRef to matrix object
using TensorView = TensorView<Element, kRank, Layout>;
/// TensorRef to constant matrix object
using ConstTensorView = typename TensorView::ConstTensorView;
/// Diagonal vector
using Diagonal = Vector<Element, __NV_STD_MIN(kRows, kColumns)>;
private:
public:
//
// Methods
//
/// Returns the size of the object
CUTLASS_HOST_DEVICE
static MatrixCoord extent() {
return make_Coord(kRows, kColumns);
}
/// Returns the layout object
CUTLASS_HOST_DEVICE
static Layout layout() {
return Layout::packed(extent());
}
/// Ctor
CUTLASS_HOST_DEVICE
Matrix() { }
/// Ctor
CUTLASS_HOST_DEVICE
Matrix(Diagonal const &diag) {
}
/// Returns a TensorRef pointing to the first element of the tensor.
CUTLASS_HOST_DEVICE
TensorRef ref() {
return TensorRef(this->data(), layout());
}
/// Returns a TensorRef pointing to the first element of the tensor.
CUTLASS_HOST_DEVICE
ConstTensorRef const_ref() const {
return ConstTensorRef(this->data(), layout());
}
/// Returns a TensorRef pointing to the first element of the tensor.
CUTLASS_HOST_DEVICE
TensorView view() {
return TensorView(ref(), extent());
}
/// Returns a TensorView to const data
CUTLASS_HOST_DEVICE
ConstTensorView const_view() const {
return ConstTensorView(const_ref(), extent());
}
/// Returns a reference to the element at a given Coord
CUTLASS_HOST_DEVICE
Reference at(MatrixCoord const& coord) const {
typename Base::size_type offset_(layout().offset(coord));
return Base::at(offset_);
}
/// Returns the number of scalar elements needed to store tensor.
CUTLASS_HOST_DEVICE
LongIndex capacity() const {
return LongIndex(Base::size());
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Column vector defined as a matrix with exactly one column
template <
typename Element,
int Rows,
typename Layout = layout::ColumnMajor
>
using ColumnVector = Matrix<Element, Rows, 1, Layout>;
/// Row vector defined as a matrix with exactly one row
template <
typename Element,
int Columns,
typename Layout = layout::RowMajor
>
using RowVector = Matrix<Element, 1, Columns, Layout>;
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace cutlass
| include/cutlass/thread/matrix.h/0 | {
"file_path": "include/cutlass/thread/matrix.h",
"repo_id": "include",
"token_count": 1676
} | 39 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of tiles from pitch-linear rank=2 tensors.
This iterator uses masks to guard out-of-bounds accesses and visits the last "residue" tile
first, with the objective of minimizing predicate mask updates during steady-state operation.
A precomputed "Params" object minimizes the amount of state that must be stored in registers,
and integer addition is used to advance the pointer through memory.
*/
#pragma once
#include "cutlass/transform/threadblock/predicated_tile_access_iterator_2dthreadtile.h"
#include "cutlass/transform/thread/transpose.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// PredicatedTileIterator2dThreadTile
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
/// Regular tile iterator using a precomputed control structure to minimize register liveness
/// and integer arithmetic.
///
/// Layout is assumed to be invariant at the time the precomputed "Params" object is constructed.
///
/// Base pointer and tensor extents may be specified at the time the iterator is constructed.
/// Subsequently, they are assumed to be immutable.
///
/// Adding a logical coordinate offset may be performed at the time the iterator is constructed.
/// Subsequent additions to logical coordinate offset may be performed but are relatively expensive.
///
/// Vistitation order is intended to first visit a "residual" tile that may be partially full in
/// both the advance dimension and the steady-state dimension. This is assumed to be the last
/// tile in the iteration sequence. Advancing an iterator that has just been constructed moves to
/// the first tile that is full in the advance dimension and recomputes predicates. Subsequent
/// accesses may be performed without updating internal predicates and are efficient in terms of
/// live register state and pointer arithmetic instructions.
///
/// To be efficient, this assumes the iteraor will be dereferenced and advanced at least once
/// outside any looping structure to minimize integer arithmetic.
///
/// Acceses out of bounds are safe so long as `clear_mask()` is called prior to dereferencing
/// the iterator.
///
///
/// Example:
///
/// An efficient pipeline structure may be constructed as follows:
///
// template <typename Iterator>
// __global__ void kernel(
// typename Iterator::Params params,
// typename Iterator::Element *ptr,
// TensorCoord extent) {
//
// typename Iterator::Fragment fragment;
//
// TensorCoord threadblock_offset(0, 0);
//
// Iterator iter(params, ptr, extent, threadIdx.x, threadblock_offsets);
//
//
// fragment = *iter; // load "residue" tile first
// ++iter; // advance to first "steady state" tile and update internal masks
//
//
// #pragma unroll
// for (int i = Remaining - 1; i >= 0; --i) {
//
// f(fragment);
//
// if (!i) {
// iter.clear_mask(); // light-weight operation to clear masks - subsequent loads become NO-OPs.
// }
//
// fragment = *iter; // load tile during "steady state" phase
// ++iter; // advance to next tile - lightweight due to steady-state masks
// }
// }
//
// void host(TensorView<Element, 2, layout::PitchLinear> view) {
//
// using Iterator = transform::threadblock::PredicatedTileIterator2dThreadTile;
//
// typename Iterator::Params params(view.layout());
//
// kernel<Iterator>(params, view.data());
// }
///
///
template <
typename Shape,
typename Element,
typename Layout,
int AdvanceRank,
typename ThreadMap,
bool Transpose = false
>
class PredicatedTileIterator2dThreadTile;
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileIterator2dThreadTile for pitch-linear data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, bool Transpose_>
class PredicatedTileIterator2dThreadTile<Shape_, Element_, layout::PitchLinear, AdvanceRank, ThreadMap_, Transpose_> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::PitchLinear;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
/// Type used for internal memory accesses
/// extra set of parenthesis is needed for VS compiler
struct alignas((ThreadMap::kElementsPerAccess * sizeof_bits<Element>::value /
8)) AccessType {
Array<Element, ThreadMap::kElementsPerAccess> storage;
static int const kElements = ThreadMap::kElementsPerAccess;
};
/// Optinally this fragment can be 4x4 transposed
using Transform = thread::Transpose< ThreadMap::Iterations::kCount * ThreadMap::ThreadAccessShape::kCount , layout::PitchLinearShape<4,4>, Element>;
static bool const transpose = Transpose_;
/// Underlying iterator to compute the addresses
using TileAccessIterator =
PredicatedTileAccessIterator2dThreadTile<Shape, Element, Layout, kAdvanceRank,
ThreadMap, AccessType>;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<Element, ThreadMap::Iterations::kCount *
ThreadMap::ThreadAccessShape::kCount>;
/// Predicate vector stores mask to guard accesses
using Mask = typename TileAccessIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
public:
using Base = typename TileAccessIterator::Params::Base;
friend PredicatedTileIterator2dThreadTile;
private:
/// Parameters object
typename TileAccessIterator::Params params_;
public:
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout) : params_(layout) { }
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(Base const &base)
: params_(base) {}
};
private:
/// Internal pointer type permits fast address arithmetic
using BytePointer = char *;
private:
//
// Data members
//
/// Data member to the tile access iterator
TileAccessIterator address_iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset,
int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization
)
: address_iterator_(params.params_, pointer, extent, thread_id,
threadblock_offset) {}
/// Construct a PredicatedTileIterator2dThreadTile with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileIterator2dThreadTile(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
address_iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile &operator++() {
if (kAdvanceRank)
address_iterator_.add_tile_offset({0, 1});
else
address_iterator_.add_tile_offset({1, 0});
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile operator++(int) {
PredicatedTileIterator2dThreadTile self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { address_iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { address_iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { address_iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { address_iterator_.get_mask(mask); }
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for (int ts = 0; ts < ThreadMap::ThreadAccessShape::kStrided; ts++){
int access_idx = ts + c * ThreadMap::ThreadAccessShape::kStrided + \
s * ThreadMap::Iterations::kContiguous * ThreadMap::ThreadAccessShape::kStrided;
address_iterator_.set_iteration_index(access_idx);
if (address_iterator_.valid()) {
frag_ptr[access_idx] =
*(address_iterator_.get() + pointer_offset);
}
++address_iterator_;
}
}
}
if (transpose) {
Transform t;
t.transform(frag, frag);
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) { load_with_pointer_offset(frag, 0); }
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for (int ts = 0; ts < ThreadMap::ThreadAccessShape::kStrided; ts++){
int access_idx = ts + c * ThreadMap::ThreadAccessShape::kStrided + \
s * ThreadMap::Iterations::kContiguous * ThreadMap::ThreadAccessShape::kStrided;
address_iterator_.set_iteration_index(access_idx);
if (address_iterator_.valid()) {
*(address_iterator_.get() + pointer_offset) = frag_ptr[access_idx];
}
++address_iterator_;
}
}
}
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); }
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileIterator2dThreadTile for pitch-linear data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
bool Transpose_
>
class PredicatedTileIterator2dThreadTile<Shape_, Element_, layout::ColumnMajor, AdvanceRank, ThreadMap_, Transpose_> {
public:
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
static bool const Transpose = Transpose_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedTileIterator2dThreadTile<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>,
Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 0 : 1),
ThreadMap,
Transpose
>;
using AccessType = typename UnderlyingIterator::AccessType;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<Element, ThreadMap::Iterations::kCount * ThreadMap::ThreadAccessShape::kCount>;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileIterator2dThreadTile;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
CUTLASS_HOST_DEVICE
Params() { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout): params_(layout::PitchLinear(layout.stride(0))) {}
CUTLASS_HOST_DEVICE
Params(typename UnderlyingIterator::Params::Base const &base)
: params_(base) {}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset, and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id, ///< ID of each participating thread
TensorCoord const &threadblock_offset, ///< Initial offset of threadblock
int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization
):
iterator_(
params.params_,
pointer,
layout::PitchLinearCoord(extent.row(), extent.column()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.row(), threadblock_offset.column())
) { }
/// Construct a PredicatedTileIterator2dThreadTile with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
): PredicatedTileIterator2dThreadTile(params, pointer, extent, thread_id, make_Coord(0, 0)) { }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the iterator's
/// internal pointer is reverted to the first "steady state" tile. Subsequent calls
/// are lightweight and must only update the internal pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the iterator's
/// internal pointer is reverted to the first "steady state" tile. Subsequent calls
/// are lightweight and must only update the internal pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile operator++(int) {
PredicatedTileIterator2dThreadTile self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
iterator_.clear_mask(enable);
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
iterator_.enable_mask();
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) {
iterator_.set_mask(mask);
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) {
iterator_.get_mask(mask);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileIterator2dThreadTile for pitch-linear data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
bool Transpose_
>
class PredicatedTileIterator2dThreadTile<Shape_, Element_, layout::RowMajor, AdvanceRank, ThreadMap_, Transpose_> {
public:
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
static bool const Transpose = Transpose_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedTileIterator2dThreadTile<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>,
Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 1 : 0),
ThreadMap,
Transpose
>;
using AccessType = typename UnderlyingIterator::AccessType;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<Element, ThreadMap::Iterations::kCount * ThreadMap::ThreadAccessShape::kCount>;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileIterator2dThreadTile;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
CUTLASS_HOST_DEVICE
Params() { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout): params_(layout::PitchLinear(layout.stride(0))) { }
CUTLASS_HOST_DEVICE
Params(typename UnderlyingIterator::Params::Base const &base)
: params_(base) {}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset, and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id, ///< ID of each participating thread
TensorCoord const &threadblock_offset, ///< Initial offset of threadblock
int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization
):
iterator_(
params.params_,
pointer,
layout::PitchLinearCoord(extent.column(), extent.row()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.column(), threadblock_offset.row())
) { }
/// Construct a PredicatedTileIterator2dThreadTile with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
): PredicatedTileIterator2dThreadTile(params, pointer, extent, thread_id, make_Coord(0, 0)) { }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the iterator's
/// internal pointer is reverted to the first "steady state" tile. Subsequent calls
/// are lightweight and must only update the internal pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the iterator's
/// internal pointer is reverted to the first "steady state" tile. Subsequent calls
/// are lightweight and must only update the internal pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile operator++(int) {
PredicatedTileIterator2dThreadTile self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
iterator_.clear_mask(enable);
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
iterator_.enable_mask();
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) {
iterator_.set_mask(mask);
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) {
iterator_.get_mask(mask);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/transform/threadblock/predicated_tile_iterator_2dthreadtile.h/0 | {
"file_path": "include/cutlass/transform/threadblock/predicated_tile_iterator_2dthreadtile.h",
"repo_id": "include",
"token_count": 9013
} | 40 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*!
\file
\brief Defines an unsigned 128b integer with several operators to support 64-bit integer division.
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cstdint>
#else
#include <cstdint>
#include <cstdlib>
#include <cmath>
#include <type_traits>
#include <stdexcept>
#endif
#include "cutlass/cutlass.h"
/// Optionally enable GCC's built-in type
#if (defined(__x86_64) || defined (__aarch64__)) && !(defined(__CUDA_ARCH__) && ((__CUDACC_VER_MAJOR__ <= 10) || ((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ <= 4)))) && defined(__GNUC__)
#define CUTLASS_UINT128_NATIVE
#elif defined(_MSC_VER) && defined(_M_AMD64) && !(defined(__CUDA_ARCH__) && ((__CUDACC_VER_MAJOR__ <= 10) || ((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ <= 4))))
#define CUTLASS_INT128_ARITHMETIC
#include <intrin.h>
#if _MSC_VER >= 1920 && !defined(__CUDA_ARCH__)
#define CUTLASS_INT128_ARITHMETIC_DIV
#include <immintrin.h>
#endif
#endif
namespace cutlass {
///! Unsigned 128b integer type
struct alignas(16) uint128_t
{
/// Size of one part of the uint's storage in bits
static constexpr int storage_bits_ = 64;
struct hilo
{
uint64_t lo;
uint64_t hi;
};
// Use a union to store either low and high parts or, if present, a built-in 128b integer type.
union {
struct hilo hilo_;
#if defined(CUTLASS_UINT128_NATIVE)
unsigned __int128 native;
#endif // defined(CUTLASS_UINT128_NATIVE)
};
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
uint128_t() : hilo_{0, 0} {}
/// Constructor from uint64
CUTLASS_HOST_DEVICE
uint128_t(uint64_t lo_) : hilo_{lo_, 0} {}
/// Constructor from two 64b unsigned integers
CUTLASS_HOST_DEVICE
uint128_t(uint64_t lo_, uint64_t hi_) : hilo_{lo_, hi_} {}
/// Optional constructor from native value
#if defined(CUTLASS_UINT128_NATIVE)
uint128_t(unsigned __int128 value) : native(value) { }
#endif
/// Lossily cast to uint64
CUTLASS_HOST_DEVICE
explicit operator uint64_t() const
{
return hilo_.lo;
}
CUTLASS_HOST_DEVICE
static void exception()
{
#if defined(__CUDA_ARCH__)
asm volatile (" brkpt;\n");
#else
// throw std::runtime_error("Not yet implemented.");
abort();
#endif
}
/// Add
CUTLASS_HOST_DEVICE
uint128_t operator+(uint128_t const& rhs) const
{
uint128_t y{};
#if defined(CUTLASS_UINT128_NATIVE)
y.native = native + rhs.native;
#else
y.hilo_.lo = hilo_.lo + rhs.hilo_.lo;
y.hilo_.hi = hilo_.hi + rhs.hilo_.hi + (y.hilo_.lo < hilo_.lo);
#endif
return y;
}
/// Subtract
CUTLASS_HOST_DEVICE
uint128_t operator-(uint128_t const& rhs) const
{
uint128_t y{};
#if defined(CUTLASS_UINT128_NATIVE)
y.native = native - rhs.native;
#else
y.hilo_.lo = hilo_.lo - rhs.hilo_.lo;
y.hilo_.hi = hilo_.hi - rhs.hilo_.hi - (rhs.hilo_.lo && y.hilo_.lo > hilo_.lo);
#endif
return y;
}
/// Multiply by unsigned 64b integer yielding 128b integer
CUTLASS_HOST_DEVICE
uint128_t operator*(uint64_t const& rhs) const
{
uint128_t y{};
#if defined(CUTLASS_UINT128_NATIVE)
y.native = native * rhs;
#elif defined(CUTLASS_INT128_ARITHMETIC)
// Multiply by the low part
y.hilo_.lo = _umul128(hilo_.lo, rhs, &y.hilo_.hi);
// Add the high part and ignore the overflow
uint64_t overflow{0};
y.hilo_.hi += _umul128(hilo_.hi, rhs, &overflow);
#else
CUTLASS_UNUSED(rhs);
exception();
#endif
return y;
}
/// Divide 128b operation by 64b operation yielding a 64b quotient
CUTLASS_HOST_DEVICE
uint64_t operator/(uint64_t const& divisor) const
{
uint64_t quotient{0};
#if defined(CUTLASS_UINT128_NATIVE)
quotient = uint64_t(native / divisor);
#elif defined(CUTLASS_INT128_ARITHMETIC_DIV)
// implemented using MSVC's arithmetic intrinsics
uint64_t remainder{0};
quotient = _udiv128(hilo_.hi, hilo_.lo, divisor, &remainder);
#else
CUTLASS_UNUSED(divisor);
exception();
#endif
return quotient;
}
/// Divide 128b operation by 64b operation yielding a 64b quotient
CUTLASS_HOST_DEVICE
uint64_t operator%(uint64_t const& divisor) const
{
uint64_t remainder{0};
#if defined(CUTLASS_UINT128_NATIVE)
remainder = uint64_t(native % divisor);
#elif defined(CUTLASS_INT128_ARITHMETIC_DIV)
// implemented using MSVC's arithmetic intrinsics
(void)_udiv128(hilo_.hi, hilo_.lo, divisor, &remainder);
#else
CUTLASS_UNUSED(divisor);
exception();
#endif
return remainder;
}
/// Computes the quotient and remainder in a single method.
CUTLASS_HOST_DEVICE
uint64_t divmod(uint64_t &remainder, uint64_t divisor) const
{
uint64_t quotient{0};
#if defined(CUTLASS_UINT128_NATIVE)
quotient = uint64_t(native / divisor);
remainder = uint64_t(native % divisor);
#elif defined(CUTLASS_INT128_ARITHMETIC_DIV)
// implemented using MSVC's arithmetic intrinsics
quotient = _udiv128(hilo_.hi, hilo_.lo, divisor, &remainder);
#else
CUTLASS_UNUSED(remainder);
CUTLASS_UNUSED(divisor);
exception();
#endif
return quotient;
}
/// Left-shifts a 128b unsigned integer
CUTLASS_HOST_DEVICE
uint128_t operator<<(int sh) const
{
if (sh == 0) {
return *this;
}
else if (sh >= storage_bits_) {
return uint128_t(0, hilo_.lo << (sh - storage_bits_));
}
else {
return uint128_t(
(hilo_.lo << sh),
(hilo_.hi << sh) | uint64_t(hilo_.lo >> (storage_bits_ - sh))
);
}
}
/// Right-shifts a 128b unsigned integer
CUTLASS_HOST_DEVICE
uint128_t operator>>(int sh) const
{
if (sh == 0) {
return *this;
}
else if (sh >= storage_bits_) {
return uint128_t((hilo_.hi >> (sh - storage_bits_)), 0);
}
else {
return uint128_t(
(hilo_.lo >> sh) | (hilo_.hi << (storage_bits_ - sh)),
(hilo_.hi >> sh)
);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/uint128.h/0 | {
"file_path": "include/cutlass/uint128.h",
"repo_id": "include",
"token_count": 2983
} | 41 |
[README](../../README.md#documentation) > **CUTLASS 3.0 GEMM Backwards Compatibility**
# CUTLASS 3.0 GEMM Backwards Compatibility
Although CUTLASS 3.0 restructures the GEMM hierarchy and introduces new types for the
threadblock layer and below, we intend the entire source code to be usable in user applications.
We expect users to be able to `#include` any source file from CUTLASS 3.0, whether
they implement the 2.x or the 3.x API, without breaking user builds. This means that a single
translation unit should be able to contain any valid kernel regardless of its API version. The
sections below discuss how `device` and `kernel` layer type names are made compatible across the
two API versions, and what the users can expect out of the `threadblock` layer API going forward.
## Compatible Device API
The entry point for CUTLASS's Device GEMM API
is the class
`cutlass::gemm::device::GemmUniversalAdapter`.
This class lives in the header file
[include/cutlass/gemm/device/gemm_universal_adapter.h](/include/cutlass/gemm/device/gemm_universal_adapter.h).
`GemmUniversalAdapter` is a "universal adapter"
and serves as a common device interface
for both CUTLASS 3.x and CUTLASS 2.x kernels.
Its template parameter `GemmKernel`,
the GEMM kernel type, can be any of the following:
* `cutlass::gemm::kernel::GemmUniversal`,
implementing CUTLASS 3.x API kernels;
* `cutlass::gemm::kernel::GemmUniversal`,
implementing CUTLASS 2.x API kernels;
* Any valid CUTLASS 2.x `kernel` layer GEMM that
was previously composable with `device::GemmUniversalAdapter`
Users implementing new kernels in either API should prefer
using `kernel::GemmUniversal` as the kernel type
and compose it with `device::GemmUniversalAdapter`.
Users with existing `kernel::Gemm` kernels
can continue to use them as template arguments
of `device::GemmUniversalAdapter`. They can adopt
`GemmUniversal` as a gradual migration path,
since `GemmUniversal` accepts either 3.0 or 2.x collectives.
Please see the [next section for `kernel::GemmUniversal`](#compatible-kernel-api) for details.
`GemmUniversalAdapter` presents a single
host-side interface to both 3.0 and 2.x kernels.
CUTLASS accomplishes this by
specializing `GemmUniversalAdapter`'s implementation
on either 2.x API implementing kernel layer GEMMs, or 3.x API
implementing kernel layer GEMMs (as detected by `gemm::detail::IsCutlass3GemmKernel`
discussed below). As a result, `GemmUniversalAdapter`'s behavior
might differ between the two specializations.
### Device API design differences
In CUTLASS 2.x, the Device API was more closely tied
to the Kernel API. In CUTLASS 3.0, the Device API
accepts any kernel type that meets the Kernel API
interface requirements. CUTLASS 3.0's Device API code is
parameterized by the kernel type, but this code
is *generic*; the same code works for any kernel type.
The device layer compatibility interface, `device::GemmUniversalAdapter`,
also provides reflective mappings from 3.0-specific types
back to the closest possible 2.x equivalent types. This is [discussed further in the section below](#conversions-between-2x-tags-and-30-types).
CUTLASS 3.0's `device::GemmUniversalAdapter` also exposes some new APIs that the 2.x `device::GemmUniversalAdapter` implementation does not. Most notably, this includes the ability to bypass the `GemmKernel::Arguments` to `GemmKernel::Params` lowering.
```c++
// Primary run() entry point API that is static allowing users to create and manage their own params.
static Status
run(Params& params, cudaStream_t stream = nullptr);
```
This new API is useful for the following scenarios.
* Running again does not require reinvoking `GemmKernel::to_underlying_arguments()`
* Manual control over construction of `GemmKernel::Params` for custom kernels with custom stride types
* Fully static problem shapes and strides for bespoke kernels where no argument mapping needs to take place
## Compatible Kernel API
CUTLASS 3.x API shares the kernel layer API with CUTLASS 2.x
through the single entry point type `cutlass::gemm::kernel::GemmUniversal`.
All kernel layer GEMMs are viewed as a composition of a collective mainloop
and a collective epilogue.
**`kernel::GemmUniversal` implements both 2.x and 3.x APIs**
The entry point for CUTLASS's kernel API is the class
`cutlass::gemm::kernel::GemmUniversal`.
This class' declaration lives in the header file
[include/cutlass/gemm/kernel/gemm_universal.hpp](/include/cutlass/gemm/kernel/gemm_universal.hpp).
```c++
/*
* Stateless universal device GEMM kernel type that treats GEMM as
* a composition of a collective mainloop and a collective epilogue.
* SFIANE shims both 2.x and 3.0 API kernels based on ProblemShapeOrThreadblockMma_.
**/
template <
class ProblemShapeOrThreadblockMma_,
class CollectiveMainloopOrEpilogue_,
class CollectiveEpilogueOrThreadblockSwizzle_,
class TileScheduler_ = void,
class Enable = void
>
class GemmUniversal;
```
We call this class "universal" because it can be built
using either the CUTLASS 3.0 or the 2.x mainloops and epilogues.
If `GemmUniversal`'s first template argument
(`ProblemShapeOrThreadblockMma_`) is a `cute::tuple`,
then `GemmUniversal` assumes that
the remaining three template arguments
(the mainloop, epilogue, and grid swizzle)
implement the 3.0 APIs.
Otherwise, `GemmUniversal` assumes that
the remaining three template arguments
implement the 2.x APIs.
All the template arguments must be either
CUTLASS 3.0 or CUTLASS 2.x types. For example,
`GemmUniversal` does not permit using
a 2.x mainloop with a 3.0 collective epilogue.
CUTLASS 3.x implements various embodiments of `kernel::GemmUniversal`.
Each kernel layer schedule is specialized
for a GEMM scheduling algorithm and GPU architecture.
Specializations of `kernel::GemmUniversal` for 3.0 APIs live in
any of various `gemm_*.hpp` files in the directory
[include/cutlass/gemm/kernel/](../../include/cutlass/gemm/kernel/).
The specialization to which to dispatch is decided through the dispatch policy's `Schedule` type.
Specializations for 2.x APIs live in the header file
[include/cutlass/gemm/kernel/gemm_universal.h](../../include/cutlass/gemm/kernel/gemm_universal.h).
### Kernel API design differences
The CUTLASS 2.x Kernel API was more closely tied
to the Device API, as we mentioned above.
In particular, the 2.x Device API specified the grid shape
used to launch the Kernel API.
In CUTLASS 3.0, the Kernel API controls its own grid shape,
while the device adapter simply queries the kernel with which it needs to be launched.
This change is required to support various kernel schedules
that may need their own schedule specific grid planning logic.
For example, persistent kernel schedules generally only launch with
as many threadblocks as the number of multiprocessors on the GPU.
All CUTLASS 3 `kernel::GemmUniversal` specializations expose the following (static) API:
```c++
// Returns true if the kernel can execute the provided GEMM arguments.
static bool
can_implement(Arguments const& args);
// Returns a dim3 representing the threadblock shape.
static dim3
get_block_shape();
// Returns a dim3 representing the grid shape in terms of threadblocks.
static dim3
get_grid_shape(Params const& params);
```
The device adapter simply queries the kernel for these three before launching it on the device.
CUTLASS 3.0 provides a meta-function to detect whether a `cutlass::gemm::kernel::*` implements
the 3.x API or 2.x API:
```c++
// include/cutlass/gemm/gemm.h
namespace cutlass:gemm::detail {
// The following metafunction is used to detect whether a
// `kernel::Gemm` or `kernel::GemmUniversal` implements the CUTLASS 3.x API,
// by checking whether the problem shape type is aliased within.
template <class GemmKernel, class = void>
struct IsCutlass3GemmKernel;
} // namespace cutlass:gemm::detail
```
Users can dispatch their generic code against 2.x and 3.x specializations with
this as a type trait for the kernel API version.
## Threadblock API and Inner Loops
Much of the CUTLASS 3 GEMM hierarchy for mainloops and inner loops diverges
from that of CUTLASS 2.x. With that also comes the introduction of the
`cutlass::gemm::collective` layer as a direct replacement and a superset
of the 2.x `cutlass::gemm::threadblock` layer. Going forward,
CUTLASS 3.x will discontinue new developments in the following namespaces.
* `cutlass::*::threadblock::*`
* `cutlass::*::warp::*`
* `cutlass::gemm::thread::*`
* `cutlass::arch::*` (except `barrier.h`)
`cutlass::gemm::collective`s are a superset of the threadblock layer where
all new mainloops will be developed. Users should look to the `CollectiveMma` type
if they wish to author custom mainloop code in the 3.x API.
Similarly, for the GEMM inner loops, `cute::MMA_Atom`s replace the
`gemm::warp` and `gemm::thread` layer code. Going forward, all new PTX instructions
and associated metadata development will occur directly inside [`cute/arch/*.hpp`](/include/cute/arch/) and [`cute/atom/*.hpp`](/include/cute/atom/).
The desired inner loop MMA iteration order and tiling can be achieved through careful
selection of the atom layout, value layout, and permutations of the `cute::TiledMma`.
For epilogues, the `cutlass::epilogue::collective` layer replaces `cutlass::threadblock::collective`. However, the thread-level epilogue elementwise operations
in `cutlass::epilogue::thread` will continue to be used in 3.x kernels as well, albeit, with
a more idiomatic epilogue vectorization strategy.
[Example 50](/examples/50_hopper_gemm_with_epilogue_swizzle/50_hopper_gemm_with_epilogue_swizzle.cu)
shows how to use 2.x epilogue thread operators with 3.0 API kernels.
## Porting from 2.x to 3.0 API
### CUTLASS 2.x layout tags and CUTLASS 3.0 major modes
CUTLASS 2.x and CUTLASS 3.0 use both
different wording and different types
to describe the permitted layouts
of GEMM's input matrices A and B.
CUTLASS 3.0 does not use the terms "column major"
or "row major" to describe matrix layouts.
Starting with CUTLASS 3.0, adoption of CuTe allows us to decouple
* the coordinate mode order (logical shape) of layouts from
* the index space stride order of the backing storage.
In line with our switch to a conceptual GEMM hierarchy, we view the major modes not from a BLAS-3 perspective.
Rather, we divide the modes into two categories.
* "Inner modes" or "K-modes" are contracted over during the GEMM.
Therefore, they are not present in the output tensor.
* "Outer modes" or "MN-modes" are preserved in the output.
Now, instead of `RowMajor` or `ColumnMajor`, whose major stride depends on whether we are referring to the
A or the B matrix, we uniformly employ the "K major" or "MN major" terminology and enforce the convention of all tensors having the shape `[M/N, K, L]` regardless of which mode is major. That is,
* the input matrix A has shape M x K,
* the input matrix B has shape N x K, and
* the input/output matrices C/D have shape M x N.
Note that this convention for B
differs from the BLAS's GEMM interface,
which specifies that B has shape K x N.
CUTLASS 3.0 uses these names of the modes
to specify which mode of a matrix has stride 1.
For the matrix A,
* "M major" means that the matrix is stride 1
in the M mode, and
* "K major" means that the matrix is stride 1
in the K mode.
For the matrix B,
* "N major" means that the matrix is stride 1
in the N mode (which for B is mode 0,
because the convention is that B is N x K); and
* "K major" means that the matrix is stride 1
in the K mode (which for B is mode 1).
CUTLASS 2.x defines "layout tag" classes
`cutlass::layout::ColumnMajor` and `cutlass::layout::RowMajor`,
that live in the header file
[`cutlass/layout/matrix.h`](/include/cutlass/layout/matrix.h).
The interpretation of these layouts in GEMM
depends on whether they are applied
to the input matrix A or B. For the matrix A, "column major" means
that mode corresponding to M extent has stride 1,
and "row major" means that mode corresponding to K extent has stride 1.
This is the usual computer science definition
of column major and row major for a rank-2 array.
For the matrix B, the opposite holds:
"column major" means that mode corresponding to N extent has stride 1,
and "row major" means that mode corresponding to K extent has stride 1.
Using the convention of `[outer, inner, batch]` mode order for tensor logical shapes
avoids potential confusion with the meaning of column major and row major
changing depending on whether they are applied to A or B.
The table below summarizes our mode order convention and
mapping of 2.x layout tags to corresponding M-major, N-major, or K-major strides.
| Matrix | CUTLASS 2.x layout | 2.x Shape | Logical major mode| 3.x Shape/Stride | Major ordinal |
| --- | --- | --- | --- | --- | --- |
| A | `ColumnMajor` | M x K | M major | M x K x L | 0 (outer) |
| A | `RowMajor` | M x K | K major | M x K x L | 1 (inner) |
| B | `RowMajor` | K x N | N major | N x K x L | 0 (outer) |
| B | `ColumnMajor` | K x N | K major | N x K x L | 1 (inner) |
| C | `ColumnMajor` | M x N | M major | M x N x L | 0 (outer) |
| C | `RowMajor` | M x N | N major | M x N x L | 1 (inner) |
Notice that in CUTLASS 3.0, interpretation of layouts no longer changes based on
whether we are talking about the A or B matrix. M and N major inputs always have a
static size-1 stride in their 0th (outer) mode. Similarly, K major inputs
always contain the static size-1 stride in their 1st mode. This uniformity in stride order
allows us to represent tensor layouts much more cleanly and treat both A and B equally in our interfaces.
See for example the following snippet from our [`kernel/sm70_gemm.hpp`](/include/cutlass/gemm/kernel/sm70_gemm.hpp)
for Ampere kernel schedules.
```c++
// Represent the full tensors
Tensor mA_mkl = make_tensor(make_gmem_ptr(params.mainloop.ptr_A), make_shape(M,K,L), params.mainloop.dA); // (m,k,l)
Tensor mB_nkl = make_tensor(make_gmem_ptr(params.mainloop.ptr_B), make_shape(N,K,L), params.mainloop.dB); // (n,k,l)
// Get batch slice
Tensor mA_mk = mA_mkl(_,_,get<3>(blk_coord_mnkl)); // (m,k)
Tensor mB_nk = mB_nkl(_,_,get<3>(blk_coord_mnkl)); // (n,k)
// Slice to get the tiles for which this thread block is responsible
Tensor gA = local_tile(mA_mk, blk_shape, take<0,3>(blk_coord_mnkl), Step<_1, X,_1>{}); // (BLK_M,BLK_K,k)
Tensor gB = local_tile(mB_nk, blk_shape, take<0,3>(blk_coord_mnkl), Step< X,_1,_1>{}); // (BLK_N,BLK_K,k)
```
As seem in this snippet, all input tensors have the logical shape `[outer, inner, batch]`,
and the strides could represent either outer or inner
(or any other complex hierarchical stride) major storage.
CuTe layouts always maintain the logical consistency of the coordinate spaces regardless of the strides.
By convention, in CUTLASS 3.0, we treat the M and N mode as the 0th mode,
and K mode as the 1st mode of the stride.
### Conversions between 2.x tags and 3.0 types
Starting with CUTLASS 3.0, all layouts are described using
`cute::Shape` and `cute::Stride` which compose into a `cute::Layout<Shape, Stride>`.
In CUTLASS 2.x, various layout tags such as `cutlass::layout::RowMajor` are used to specialize
template implementations. These tag types only encode information about the tensor strides,
as 2.x layouts did not incorporate any concept of tensor shape in the layout tags themselves.
Users may find a need to convert between CUTLASS 2.x layout tags, and 3.0
CuTe stride types. CUTLASS 3.0 `gemm::collective::CollectiveBuilder` interfaces
also accept these 2.x layout tags as input parameters in their template API as a convenience for users.
At every entry point into CUTLASS 3.0, these tags get converted to their corresponding CuTe Stride type with
metafunctions that best approximate their corresponding `cute::Stride`.
* `cutlass::gemm::detail::TagToStrideA_t<LayoutTag>`
* `cutlass::gemm::detail::TagToStrideB_t<LayoutTag>`
* `cutlass::gemm::detail::TagToStrideC_t<LayoutTag>`
By convention, and to match user expectations, the `cute::Stride` types that these
map onto always contain one static mode corresponding to the layout tag, and two 64-bit
dynamic stride modes corresponding to the minor mode and the batch mode. Batch
mode is included by default as all CUTLASS 3.0 kernels support packed batch-mode GEMMs
out of the box.
The [`cutlass/gemm/gemm.h#440`](../../include/cutlass/gemm/gemm.h#440)
header file includes functions
that can be useful for converting
from CUTLASS 3.0 `cute::Stride`s back to CUTLASS 2.x layout tags.
* `cutlass::gemm::detail::StrideToLayoutTagA_t<CuteStride>`
* `cutlass::gemm::detail::StrideToLayoutTagB_t<CuteStride>`
* `cutlass::gemm::detail::StrideToLayoutTagC_t<CuteStride>`
These metafunctions take the CuTe Stride as a template parameter and
attempt to find the size-1 stride in the idiomatic M, N, or K modes
to best approximate a corresponding 2.x layout tag type.
Note that this may not work in general for any `cute::Stride`
as the mapping between the stride and tag type is not bijective.
These mapping utilities are kept in a `detail` namespace
as we do not guarantee stability of their implementation.
Their behavior may change in future releases as we add new features.
However, we do expect these type names to remain stable. For users who want
these 2.x reflective types from an assembled kernel with a more stable API,
the specialization of `cutlass::gemm::device::GemmUniversalAdapter`
for CUTLASS 3.0 kernel provides all aliases for all 2.x type aliases
in addition to the layout tags. You can see how they are used in the header file
[`cutlass/gemm/device/gemm_universal_adapter.h`](/include/cutlass/gemm/device/gemm_universal_adapter.h).
Here is an excerpt.
```c++
// Map back to 2.x type as best as possible
using LayoutA = gemm::detail::StrideToLayoutTagA_t<typename GemmKernel::StrideA>;
using LayoutB = gemm::detail::StrideToLayoutTagB_t<typename GemmKernel::StrideB>;
using LayoutC = gemm::detail::StrideToLayoutTagC_t<typename GemmKernel::StrideC>;
using LayoutD = gemm::detail::StrideToLayoutTagC_t<typename GemmKernel::StrideD>;
// Legacy: Assume MultiplyAdd only since we do not use this tag type in 3.0
using MathOperator = cutlass::arch::OpMultiplyAdd;
// If our TiledMMA's instruction thread layout size is larger than 1,
// we know it's a tensorop
using OperatorClass = std::conditional_t<
(cute::size(typename GemmKernel::TiledMma::AtomThrID{}) > 1),
cutlass::arch::OpClassTensorOp, cutlass::arch::OpClassSimt>;
// Assume TiledMma's ShapeMNK is the same as 2.x's ThreadblockShape
using ThreadblockShape = cutlass::gemm::GemmShape<
cute::size<0>(TileShape{}),
cute::size<1>(TileShape{}),
cute::size<2>(TileShape{})>;
using ClusterShape = cutlass::gemm::GemmShape<
cute::size<0>(typename GemmKernel::DispatchPolicy::ClusterShape{}),
cute::size<1>(typename GemmKernel::DispatchPolicy::ClusterShape{}),
cute::size<2>(typename GemmKernel::DispatchPolicy::ClusterShape{})>;
// We get the instruction shape directly from our TiledMma's atom shape
using InstructionShape = cutlass::gemm::GemmShape<
cute::size<0>(typename CollectiveMainloop::TiledMma::AtomShape_MNK{}),
cute::size<1>(typename CollectiveMainloop::TiledMma::AtomShape_MNK{}),
cute::size<2>(typename CollectiveMainloop::TiledMma::AtomShape_MNK{})>;
static int constexpr kStages = CollectiveMainloop::DispatchPolicy::Stages;
static int const kThreadCount = GemmKernel::MaxThreadsPerBlock;
// Warp shape is not a primary API type in 3.x,
// but we can best approximate it by inspecting the TiledMma
// For this, we make the assumption that we always have 4 warps along M,
// and the rest along N, with none along K. We also always round up
// the warp count to 4 if the tiled mma is smaller than 128 threads.
static constexpr int WarpsInMma = std::max(4, CUTE_STATIC_V(cute::size(typename GemmKernel::TiledMma{})) / 32);
static constexpr int WarpsInMmaM = 4;
static constexpr int WarpsInMmaN = cute::ceil_div(WarpsInMma, WarpsInMmaM);
using WarpCount = cutlass::gemm::GemmShape<WarpsInMmaM, WarpsInMmaN, 1>;
using WarpShape = cutlass::gemm::GemmShape<
CUTE_STATIC_V(cute::tile_size<0>(typename CollectiveMainloop::TiledMma{})) / WarpsInMmaM,
CUTE_STATIC_V(cute::tile_size<1>(typename CollectiveMainloop::TiledMma{})) / WarpsInMmaN,
CUTE_STATIC_V(cute::tile_size<2>(typename CollectiveMainloop::TiledMma{}))>;
// Inspect TiledCopy for A and B to compute the alignment size
static int constexpr kAlignmentA = gemm::detail::get_alignment_count_from_gmem_tiled_copy<
typename CollectiveMainloop::GmemTiledCopyA, ElementA>();
static int constexpr kAlignmentB = gemm::detail::get_alignment_count_from_gmem_tiled_copy<
typename CollectiveMainloop::GmemTiledCopyB, ElementB>();
```
CUTLASS's library and profiler use these reflective interfaces to
obtain the kernel's configuration parameters. Users can use these to approximate the CUTLASS 2.x types
for 3.0 API kernels. However, the reflective interfaces cannot always match the types exactly,
as the mappings are not always bijective.
# Copyright
Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
```
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
```
| media/docs/cutlass_3x_backwards_compatibility.md/0 | {
"file_path": "media/docs/cutlass_3x_backwards_compatibility.md",
"repo_id": "media",
"token_count": 7261
} | 42 |
![ALT](../images/gemm-hierarchy-with-epilogue-no-labels.png "CUTLASS Tile Iterator Concepts")
[README](../../README.md#documentation) > **Tile Iterator Concepts**
# Tile Iterator Concepts
Note: CUTLASS 3.0 deprecates all tile access iterators in favour of CuTe's single
vocabulary type `cute::Tensor`, which is parameterized on `cute::Layout`.
`cute::Tensor`s can therefore be manipulated with the same layout algebra as all CuTe layouts.
This removes the need for bespoke types that encapsulate iterator properties.
The following text thus only applies to legacy CUTLASS 2.x API and related types.
CUTLASS 2.x implements generic algorithms on tiles of matrix or tensors of constant size. These may
be considered as partitions of tensors of infinite size, with a range of partitions accessible
by _tile iterators_.
Various data structures may make operations such as random access to tiles inexpensive,
while data structures may not offer random access at all. For example, iterating over a linked
list of matrices requires sequential traversal. Algorithms implemented in terms of sequences of tiles
should require only the minimum set of operators be defined for tile iterators.
This document describes a set of C++ concepts which may be used to define tile iterators used
by CUTLASS algorithms. ("Concept" here does not refer to a C++20 concept that uses the `concept` keyword.
Rather, it refers to a set of requirements on a type.)
Each concept specifies members and type definitions that a tile iterator
must implement. Frequently, a tile iterator implements several concepts, and its members are
the union of the members from each individual concept. These definitions were inspired by
[Boost "New style" iterator concepts](https://www.boost.org/doc/libs/1_40_0/libs/iterator/doc/new-iter-concepts.html).
The set of all possible combinations of these concepts is quite large, however most tile iterator
templates can be described by one of several combinations. The section
Frequently Used Tile Iterator Concepts describes several common interfaces used throughout CUTLASS.
## Definitions
**_Base Tile Iterator Concept_.** All tile iterators must describe an _Element_ type as well as a _Shape_.
```c++
/// Base concept for all tile iterators
struct TileIteratorConcept {
using Element; ///< Element type composing tile (concept: numeric type or Array<>)
using Shape; ///< Shape type describing extent of tile. The shape concept depends
/// on iterator implementation.
};
```
**_Contiguous Memory Tile Iterator Concept_.** Iterators over tiles stored arbitrarily within
a continuous block of data in memory. Linear offset in units of _Element_ may be added to
internally held pointers to 'move' the iterator in memory.
```c++
/// Tile iterator over partitions of a tensor in contiguous memory which may be referenced via a
/// TensorRef object.
struct ContiguousMemoryTileIterator : public TileIteratorConcept {
using Index; ///< index type used to add pointer offsets
/// Adds a linear offset in units of Element to internal pointer(s) into tensor
CUTLASS_DEVICE
void add_pointer_offset(Index pointer_offset);
};
```
**_Readable Tile Iterator Concept_.** Iterators that may be read from define a `Fragment` type holding
each thread's part of the data to be loaded. An explicit `load()` method reads the tile from memory,
and places each thread's part in its `Fragment` object.
```c++
/// Tile iterator capable of loading tiles from memory into fragments
struct ReadableTileIteratorConcept {
using Fragment; ///< fragment object derived from cutlass::Array<Element, N>
CUTLASS_DEVICE
void load(Fragment &frag); ///< loads a fragment from memory
};
```
**_Readable Contiguous Tile Iterator Concept_.** Iterators reading from contiguous memory
support an optional pointer offset that is added to any internally managed pointers before
performing the load. This provides a convenient method to fold an offset in with load
operations.
```c++
/// Union of the following tile iterator concepts:
///
/// - ReadableTileIteratorConcept
/// - ContiguousMemoryTileIterator
///
struct ReadableContiguousTileIteratorConcept :
public ReadableTileIteratorConcept,
public ContiguousMemoryTileIterator {
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
Fragment &frag, ///< fragment to load from the tensor
Index pointer_offset); ///< loads a tile with a linear offset
};
```
**_Writeable Tile Iterator Concept_.** Iterators that may write to memory define a `Fragment` type holding
each thread's part of the data to be written. An explicit `store()` method writes the tile to memory.
```c++
/// Tile iterator capable of storing tiles from memory
struct WriteableTileIteratorConcept {
using Fragment; ///< fragment object derived from cutlass::Array<Element, N>
/// Stores a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag); ///< stores a fragment to memory
};
```
**_Writeable Contiguous Tile Iterator Concept_.** Iterators writing to contiguous memory
support an optional pointer offset that is added to any internally managed pointers before
performing the store operation. This provides a convenient method to fold an offset into the
store.
```c++
/// Union of the following tile iterator concepts:
///
/// - WriteableTileIteratorConcept
/// - ContiguousMemoryTileIterator
///
struct WriteableContiguousTileIteratorConcept :
public WriteableTileIteratorConcept,
public ContiguousMemoryTileIterator {
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void store_with_pointer_offset(
Fragment const &frag, ///< fragment to store to the tensor
Index pointer_offset); ///< stores a tile with a linear offset
};
```
**_Forward Tile Iterator Concept_.** This concept offers traversal "forward" by one tile in
a pre-defined sequence. Often, this sequence is relevant to the context in which the iterator
was defined, such as along the _K_ dimension of a GEMM operation. Equality operators are defined
to determine whether two iterators point to the same tile.
```c++
/// Tile iterator that may be incremented along a traversal sequence.
struct ForwardTileIteratorConcept {
CUTLASS_DEVICE bool operator==(TileIterator const &it); ///< true if iterators point to same tile, false if otherwise
CUTLASS_DEVICE bool operator!=(TileIterator const &it); ///< false if iterators point to same tile, true if otherwise
CUTLASS_DEVICE ForwardTileIteratorConcept & operator++(); ///< pre-increment - advance to next tile in sequence
CUTLASS_DEVICE ForwardTileIteratorConcept operator++(int); ///< post-increment - advance to next tile in sequence
};
```
**_Bidirectional Tile Iterator Concept_.** This concept permits traversal both forward and backward.
```c++
/// Tile iterator which may be traverse in both directions along a defined sequence.
struct BidirectionalTileIteratorConcept : public ForwardTileIteratorConcept {
CUTLASS_DEVICE
BidirectionalTileIteratorConcept & operator--(); ///< pre-decrement - traverse to previous tile in sequence
CUTLASS_DEVICE
BidirectionalTileIteratorConcept operator--(int); ///< post-decrement - traverse to previous tile in sequence
};
```
**_Random Access Tile Iterator Concept_.** This iterator defines random access operations in the logical
coordinate system of the underlying tensor. Thus, tensors must have a defined _Layout_ with associated
_TensorCoord_ coordinate describing logical position within the tensor and _TensorRef_ reference type.
It may be advanced forward or backwards by an offset specified as units of whole tiles along each dimension.
```c++
/// Tile iterator offering random access to tiles in contiguous memory.
struct RandomAccessTileIteratorConcept :
public BidirectionalTileIteratorConcept,
public ContiguousMemoryTileIterator {
using Layout; ///< Layout object mapping
using TensorRef; ///< Tensor Reference object
using TensorCoord; ///< Logical coordinate in referenced tensor
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
RandomAccessTileIteratorConcept & add_tile_offset(TensorCoord const &tile_offset);
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
RandomAccessTileIteratorConcept & operator+=(TensorCoord const &tile_offset);
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
RandomAccessTileIteratorConcept & operator-=(TensorCoord const &tile_offset);
};
```
**_Readable Random Access Tile Iterator Concept_.** Readable random access iterators
accept an additional tile offset in logical coordinate space when loading fragments.
```c++
/// Loads a fragment with a logical coordinate offset in units of whole tiles.
struct ReadableRandomAccessTileIteratorConcept :
public RandomAccessTileIteratorConcept,
public ReadableTileIteratorConcept {
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
Fragment &frag, ///< fragment to load from the tensor
TensorCoord const &tile_offset); ///< loads a tile with a logical offset in units of whole tiles
};
```
**_Readable Random Access Contiguous Tile Iterator Concept_.** Readable random access iterators
accept an additional tile offset in logical coordinate space when loading fragments.
```c++
/// Loads a fragment with a logical coordinate offset in units of whole tiles.
struct ReadableRandomAccessContiguousTileIteratorConcept :
public ReadableRandomAccessTileIteratorConcept,
ReadableContiguousTileIteratorConcept {
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
Fragment &frag, ///< fragment to load from the tensor
TensorCoord const &tile_offset, ///< loads a tile with a logical offset in units of whole tiles
Index pointer_offset); ///< loads a tile with a logical offset AND a pointer offset
};
```
**_Writeable Random Access Tile Iterator Concept_.** Writeable random access iterators
accept an additional tile offset in logical coordinate space when storing fragments.
```c++
/// Stores a fragment with a logical coordinate offset in units of whole tiles.
struct WriteableRandomAccessTileIteratorConcept :
public RandomAccessTileIteratorConcept,
public WriteableContiguousTileIteratorConcept {
/// Stores a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void store(
Fragment const &frag, ///< fragment to store to the location pointed to by the tensor
TensorCoord const &tile_offset); ///< stores a tile with a given offset from the current iterator
};
```
**_Writeable Random Access Contiguous Tile Iterator Concept_.** Writeable random access iterators
accept an additional tile offset in logical coordinate space when storing fragments.
```c++
/// Stores a fragment with a logical coordinate offset in units of whole tiles.
struct WriteableRandomAccessContiguousTileIteratorConcept :
public WriteableRandomAccessTileIteratorConcept,
public WriteableContiguousTileIteratorConcept {
/// Stores a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void store(
Fragment const &frag, ///< fragment to store to the location pointed to by the tensor
TensorCoord const &tile_offset, ///< stores a tile with a logical offset in units of whole tiles
Index pointer_offset); ///< stores a tile witha logical offset AND a pointer offset
};
```
**_Masked Tile Iterator Concept_.** Matrix and tensors may not always be multiples of whole tiles.
Masked tile iterators define a `Mask` type which may be used to guard accesses to memory. The
semantics and interface of this `Mask` are implementation-defined details of each tile iterator,
but several convenience methods are defined for interacting with the mask such as efficiently
clearing or enabling all guarded memory accesses.
```c++
/// Supports iterating over tiles that are not 'whole' in memory. Iterator maintains a mask object
/// which guards against out-of-bounds access.
///
/// Note, this concept definition does not formally define operations on the mask or methods it
/// supports. These remain implementation-dependent details of iterators implementing this concept.
struct MaskedTileIteratorConcept {
using Mask; ///< mask object used to guard against acceses.
CUTLASS_DEVICE void clear_mask(); ///< efficiently disables all accesses guarded by mask
CUTLASS_DEVICE void enable_mask(); ///< efficiently enables all accesses guarded by mask
CUTLASS_DEVICE void get_mask(Mask &mask); ///< gets the mask
CUTLASS_DEVICE void set_mask(Mask const &mask); ///< sets the mask
};
```
## Frequently Used Tile Iterator Concepts
This section describes several frequently used compositions of the basic tile iterator concepts. They are
listed here as complete type declarations for convenience of the reader.
**_Writeable, Readable, Forward, Contiguous Memory Tile Iterator Concept_.**
This combines several of the basic iterator concepts to
yield a tile iterator capable of loading and storing tiles as well as advancing forward along a traversal sequence.
```c++
/// This tile iterator embodies several of the above:
///
/// - ForwardTileIteratorConcept
/// - ReadableContiguousTileIteratorConcept
/// - WriteableContiguousTileIteratorConcept
///
/// It is restated explicitly for convenience of the reader.
///
struct WriteableReadableForwardContiguousTileIteratorConcept {
//
// Data types
//
using Element; ///< Element type composing tile.
using Shape; ///< Shape type describing extent of tile. The shape concept depends
/// on iterator implementation
using Index; ///< index type used as base for TensorCoord
using Fragment; ///< fragment object derived from cutlass::Array<Element, N>
//
// Methods
//
/// Adds a linear offset in units of Element to internal pointer(s) into tensor
CUTLASS_DEVICE
void add_pointer_offset(Index offset);
/// true if iterators point to same tile, false if otherwise
CUTLASS_DEVICE bool operator==(WriteableReadableForwardContiguousTileIteratorConcept const &it);
///< false if iterators point to same tile, true if otherwise
CUTLASS_DEVICE bool operator!=(WriteableReadableForwardContiguousTileIteratorConcept const &it);
/// pre-increment - traverse to next tile in sequence
CUTLASS_DEVICE
WriteableReadableForwardContiguousTileIteratorConcept &
operator++();
///< post-increment - traverse to next tile in sequence
CUTLASS_DEVICE
WriteableReadableForwardContiguousTileIteratorConcept
operator++(int);
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag); ///< fragment to be loaded from memory
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
Fragment &frag, ///< fragment to be loaded from memory
Index pointer_offset); ///< linear offset (in units of Element) when loading
/// Stores a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag); ///< fragment to store to memory
/// Stores a fragment from memory with additional logical offset
CUTLASS_DEVICE
void store_with_pointer_offset(
Fragment const &frag, ///< fragment to store to memory
Index pointer_offset); ///< linear offset (in units of Element) when storing
};
```
**_Writeable, Readable, Random Access, Contiguous Memory Tile Iterator Concept_.**
This combines several of the basic iterator concepts to
yield a tile iterator with random access suitable for loading matrix operands for GEMM.
```c++
/// This tile iterator embodies several of the above:
///
/// - ReadableRandomAccessContiguousTileIteratorConcept
/// - WriteableRandomAccessContiguousTileIteratorConcept
///
/// It is restated explicitly for convenience of the reader.
///
struct WriteableReadableRandomAccessContiguousTileIteratorConcept {
//
// Data types
//
using Element; ///< Element type composing tile.
using Shape; ///< Shape type describing extent of tile. The shape concept depends
/// on iterator implementation
using Layout; ///< Layout object mapping
using TensorRef; ///< Tensor Reference object
using TensorCoord; ///< Logical coordinate in referenced tensor
using Index; ///< index type used as base for TensorCoord
using Fragment; ///< fragment object derived from cutlass::Array<Element, N>
//
// Methods
//
/// Adds a linear offset in units of Element to internal pointer(s) into tensor
CUTLASS_DEVICE
void add_pointer_offset(Index pointer_offset);
/// true if iterators point to same tile, false if otherwise
CUTLASS_DEVICE bool operator==(WriteableReadableRandomAccessContiguousTileIteratorConcept const &it);
///< false if iterators point to same tile, true if otherwise
CUTLASS_DEVICE bool operator!=(WriteableReadableRandomAccessContiguousTileIteratorConcept const &it);
/// pre-increment - traverse to next tile in sequence
CUTLASS_DEVICE
WriteableReadableRandomAccessContiguousTileIteratorConcept &
operator++();
///< post-increment - traverse to next tile in sequence
CUTLASS_DEVICE
WriteableReadableRandomAccessContiguousTileIteratorConcept
operator++(int);
/// pre-decrement - traverse to previous tile in sequence
CUTLASS_DEVICE
WriteableReadableRandomAccessContiguousTileIteratorConcept &
operator--();
///< post-decrement - traverse to previous tile in sequence
CUTLASS_DEVICE
WriteableReadableRandomAccessContiguousTileIteratorConcept
operator--(int);
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
WriteableReadableRandomAccessContiguousTileIteratorConcept & operator+=(TensorCoord const &tile_offset);
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
WriteableReadableRandomAccessContiguousTileIteratorConcept & operator-=(TensorCoord const &tile_offset);
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag); ///< fragment to be loaded from memory
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
Fragment &frag, ///< fragment to be loaded from memory
Index pointer_offset); ///< linear offset (in units of Element) when loading
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
Fragment &frag, ///< fragment to be loaded from memory
TensorCoord const &tile_offset); ///< loads a tile with a logical offset in units of whole tiles
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
Fragment &frag, ///< fragment to be loaded from memory
TensorCoord const &tile_offset, ///< loads a tile with a logical offset in units of whole tiles
Index pointer_offset); ///< loads a tile with a logical offset AND a pointer offset
/// Stores a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag); ///< fragment to store to memory
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void store_with_pointer_offset(
Fragment const &frag, ///< fragment to store to memory
Index pointer_offset); ///< linear offset (in units of Element) when loading
/// Stores a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void store(
Fragment const &frag, ///< fragment to store to memory
TensorCoord const &tile_offset); ///< stores with logical offset in units of whole tiles
/// Stores a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void store(
Fragment const &frag, ///< fragment to store to memory
TensorCoord const &tile_offset, ///< stores with logical offset in units of whole tiles
Index pointer_offset);
};
```
# Copyright
Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
```
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
```
| media/docs/tile_iterator_concept.md/0 | {
"file_path": "media/docs/tile_iterator_concept.md",
"repo_id": "media",
"token_count": 6783
} | 43 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Ease-of-use interface for constructing, compiling, and running GEMMs.
The ``GroupedGemm`` interface is meant to allow one to easily instantiate, compile, and run
grouped GEMM operations in CUTLASS via Python, without specifying many configuration parameters.
Under the hood, the interface will select sensible default parameters for the many template
parameters for CUTLASS grouped GEMMs.
Note: optimal performance is not to be expected from this interface. To achieve optimal
performance, one should specify and tune each configuration parameter.
The simplest example of using this interface is the following:
.. highlight:: python
.. code-block:: python
# As, Bs, Cs, and Ds are torch/numpy/cupy tensor objects
plan = cutlass.op.GroupedGemm(element=cutlass.DataType.f16, layout=cutlass.LayoutType.RowMajor)
plan.run([A0, A1], [B0, B1], [C0, C1], [D0, D1])
"""
from cutlass_library import DataTypeSize
from cuda import cuda
from cutlass.backend.gemm_operation import (
GemmGroupedArguments,
GemmOperationGrouped,
)
from cutlass.backend.library import (
SchedulerMode,
TensorDescription,
TileDescription,
)
from cutlass.op.gemm import Gemm
from cutlass.shape import GemmCoord
from cutlass.utils import check, datatypes
class GroupedGemm(Gemm):
"""
Constructs a ``GroupedGemm`` object.
The data types and layouts of operands A, B, and C, along with the data type of output D
and that used for accumulation, are bound to the ``GroupedGemm`` object throughout its lifetime --
these are not to be changed after a ``GroupedGemm`` has been constructed.
The constructor has optional parameters for flexibly setting these parameters. Please see the constructor
for ``Gemm`` for examples of these.
:param cc: compute capability of device to generate kernels for
:type cc: int
:param A: tensor representing data type and layout of operands A
:param B: tensor representing data type and layout of operands B
:param C: tensor representing data type and layout of operands C
:param D: tensor representing data type and layout of operands D
:param alpha: scalar paramter alpha from GEMM computation that scales the product of operands A and B
:param beta: scalar parameter beta from GEMM operation that scales operand C
:param element_accumulator: data type to be used in accumulation of the product of operands A and B
:type element_accumulator: cutlass.DataType
:param element: generic data type to be used for operands A, B, C, D, as well as the accumulation data type
:type element: cutlass.DataType
:param layout: generic layout type to be used for operands A, B, C, and D
:type layout: cutlass.LayoutType
:param element_A: data type to be used for operand A
:type element_A: cutlass.DataType
:param element_B: data type to be used for operand B
:type element_B: cutlass.DataType
:param element_C: data type to be used for operand C
:type element_C: cutlass.DataType
:param element_D: data type to be used for operand D
:type element_D: cutlass.DataType
:type layout_A: layout of operand A
:param layout_A: cutlass.LayoutType
:type layout_B: layout of operand B
:param layout_B: cutlass.LayoutType
:type layout_C: layout of operand C
:param layout_C: cutlass.LayoutType
:type layout_D: layout of operand D
:param layout_D: cutlass.LayoutType
"""
def __init__(
self, A=None, B=None, C=None, D=None,
alpha=1.0, beta=0.0, element_accumulator=None,
element=None, layout=None,
element_A=None, element_B=None, element_C=None, element_D=None,
layout_A=None, layout_B=None, layout_C=None,
cc: int = None,
):
super().__init__(
A=A, B=B, C=C, D=D,
alpha=alpha, beta=beta,
element_accumulator=element_accumulator,
element=element, layout=layout,
element_A=element_A, element_B=element_B,
element_C=element_C, element_D=element_D,
layout_A=layout_A, layout_B=layout_B, layout_C=layout_C,
cc=cc
)
# Grouped GEMM specializations for SM90 are currently unavailable. Revert to using SM80
if self.current_cc == 90:
self._reset_options(80)
self._reset_operations(reset_epilogue=False)
self.name = "grouped_gemm"
@Gemm.swizzling_functor.setter
def swizzling_functor(self, swizzling_functor):
"""
Sets the swizzling functor to the type specified by `swizzling_functor`
"""
raise Exception('Grouped GEMM does not currently support different swizzling functors')
def construct(self, tile_description: TileDescription = None,
alignment_A: int = None,
alignment_B: int = None,
alignment_C: int = None) -> GemmOperationGrouped:
"""
Constructs a ``cutlass.backend.GemmOperationGrouped`` based on the input parameters and current
kernel specification of the ``Gemm`` object.
:param tile_description: tile description specifying shapes and operand types to use in the kernel
:type tile_description: cutlass.backend.TileDescription
:param alignment_A: alignment of operand A
:type alignment_A: int
:param alignment_B: alignment of operand B
:type alignment_B: int
:param alignment_C: alignment of operand C
:type alignment_C: int
:return: operation that was constructed
:rtype: cutlass.backend.GemmOperationGrouped
"""
alignment_A = check.alignment_or_default(alignment_A, max(self.possible_operations.alignments("A")))
alignment_B = check.alignment_or_default(alignment_B, max(self.possible_operations.alignments("B")))
alignment_C = check.alignment_or_default(alignment_C, max(self.possible_operations.alignments("C")))
self.epilogue_functor = self._reset_epilogue_functor_alignment(alignment_C, self.epilogue_functor)
tensor_A = TensorDescription(self._element_a, self._layout_b, alignment_A)
tensor_B = TensorDescription(self._element_b, self._layout_b, alignment_B)
tensor_C = TensorDescription(self._element_c, self._layout_c, alignment_C)
if tile_description is None:
op = self.possible_operations.operations(alignment_A, alignment_B, alignment_C, self._math_operation)[0]
tile_description = datatypes.td_from_profiler_op(op)
else:
valid, err_str = self._valid_tile_description(tile_description)
if not valid:
raise Exception(f"Invalid tile description. {err_str}")
self.tile_description = tile_description
operation = GemmOperationGrouped(
arch=self.current_cc,
tile_description=tile_description,
A=tensor_A, B=tensor_B, C=tensor_C,
epilogue_functor=self.epilogue_functor,
swizzling_functor=self._swizzling_functor,
precompute_mode=SchedulerMode.Device)
return operation
def run(self, A, B, C, D,
alpha=None, beta=None, sync: bool = True,
print_module: bool = False,
stream: cuda.CUstream = cuda.CUstream(0)) -> GemmGroupedArguments:
"""
Runs the kernel currently specified.
By default, this call returns only once the kernel has completed. To launch the kernel
and immediately return, set ``sync=False``. In this case, it is the responsibility of the
caller to syncrhonize the results of the kernel before attempting to access outputs
by calling ``sync()`` on the arguments returned from this call.
:param A: list of tensors representing data type and layout of operand A
:type A: list
:param B: list of tensors representing data type and layout of operand B
:type B: list
:param C: list of tensors representing data type and layout of operand C
:type C: list
:param D: list of tensors representing data type and layout of operand D
:type D: list
:param alpha: scalar paramter alpha from GEMM computation that scales the product of operands A and B
:param beta: scalar parameter beta from GEMM operation that scales operand C
:param sync: whether the call should wait for the kernel to complete before returning
:type sync: bool
:param print_module: whether to print the emitted C++ code
:type print_module: bool
:param stream: cuda stream, defaults to cuda.cuda.CUstream(0)
:type stream: :class:`cuda.cuda.CUstream`
:return: arguments passed in to the kernel
:rtype: cutlass.backend.GemmGroupedArguments
"""
super().run_setup()
if len(A) != len(B) or len(A) != len(C) or len(A) != len(D):
raise Exception("Lengths of A, B, C, and D lists must be equal")
problem_sizes = []
As, Bs, Cs, Ds = ([None] * len(A) for _ in range(4))
for i in range(len(A)):
As[i] = self._verify_tensor(A[i], self.A, self._element_a, self._layout_a, "A")
Bs[i] = self._verify_tensor(B[i], self.B, self._element_b, self._layout_b, "B")
Cs[i] = self._verify_tensor(C[i], self.C, self._element_c, self._layout_c, "C")
Ds[i] = self._verify_tensor(D[i], self.D, self._element_d, self._layout_d, "D")
problem_sizes.append(GemmCoord(A[i].shape[0], B[i].shape[1], A[i].shape[1]))
alpha = self._verify_scalar(alpha, self.alpha, self._element_c, "alpha")
beta = self._verify_scalar(beta, self.beta, self._element_c, "beta")
alignment_a = min((self.possible_operations.find_alignment(A.shape, self._layout_a, operand="A") for A in As))
alignment_b = min((self.possible_operations.find_alignment(B.shape, self._layout_b, operand="B") for B in Bs))
alignment_c = min((self.possible_operations.find_alignment(C.shape, self._layout_c, operand="C") for C in Cs))
self.compile(self.tile_description, alignment_A=alignment_a, alignment_B=alignment_b,
alignment_C=alignment_c, print_module=print_module)
arguments = GemmGroupedArguments(
operation=self.operation,
problem_sizes=problem_sizes,
A=As, B=Bs, C=Cs, D=Ds,
output_op=self.operation.epilogue_type(alpha, beta),
stream=stream
)
self.operation.run(arguments)
if sync:
arguments.sync()
return arguments
| python/cutlass/op/gemm_grouped.py/0 | {
"file_path": "python/cutlass/op/gemm_grouped.py",
"repo_id": "python",
"token_count": 4658
} | 44 |
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Utilities for emitting Rank2K kernels
"""
import enum
import functools
import operator
import os.path
import shutil
try:
import builtins
if hasattr(builtins, "CUTLASS_IGNORE_PACKAGE") and CUTLASS_IGNORE_PACKAGE == True:
raise ImportError("Disabling attempt to import cutlass_library")
from cutlass_library.library import *
except ImportError:
from library import *
###################################################################################################
#
# Data structure modeling a Rank K update operation
#
###################################################################################################
#
class Rank2KOperation:
#
def __init__(self, rank_k_kind, arch, tile_description, A, C, element_epilogue, \
epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity8, \
blas_mode = BlasMode.symmetric):
self.blas_mode = blas_mode
self.operation_kind = OperationKind.Rank2K
self.arch = arch
self.tile_description = tile_description
self.rank_k_kind = rank_k_kind
# tensor A and B have same data type and layout
self.A = A
self.B = A
self.C = C
self.element_epilogue = element_epilogue
self.epilogue_functor = epilogue_functor
self.swizzling_functor = swizzling_functor
#
def is_complex(self):
complex_operators = [
MathOperation.multiply_add_complex,
MathOperation.multiply_add_complex_gaussian,
MathOperation.multiply_add_complex_fast_f32
]
return self.tile_description.math_instruction.math_operation in complex_operators
return False
#
def is_mixed_input(self):
return self.A.element != self.B.element
#
def is_planar_complex(self):
return False
#
def accumulator_type(self):
accum = self.tile_description.math_instruction.element_accumulator
if self.is_complex():
return get_complex_from_real(accum)
return accum
#
def short_math_name(self):
if self.tile_description.math_instruction.math_operation == MathOperation.multiply_add_complex_gaussian:
return "g%s" % ShortDataTypeNames[self.accumulator_type()]
return ShortDataTypeNames[self.accumulator_type()]
#
def core_name(self):
''' The basic operation kind is prefixed with a letter indicating the accumulation type. '''
inst_shape = ''
inst_operation = ''
intermediate_type = ''
math_operations_map = {
MathOperation.xor_popc: 'xor',
MathOperation.and_popc: 'and'
}
if self.tile_description.math_instruction.opcode_class == OpcodeClass.TensorOp or \
self.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp:
math_op = self.tile_description.math_instruction.math_operation
math_op_string = math_operations_map[math_op] if math_op in math_operations_map.keys() else ''
inst_shape = "%d%d%d" % tuple(self.tile_description.math_instruction.instruction_shape)
inst_shape += math_op_string
if self.tile_description.math_instruction.element_a != self.A.element and \
self.tile_description.math_instruction.element_a != self.tile_description.math_instruction.element_accumulator:
intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a]
operation_name = 'syr2k' if self.blas_mode == BlasMode.symmetric else 'her2k'
return "%s%s%s%s" % (self.short_math_name(), inst_shape, intermediate_type, operation_name)
#
def extended_name(self):
''' Append data types if they differ from compute type. '''
if self.is_complex():
extended_name = "${core_name}"
else:
if self.C.element != self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${element_c}_${core_name}_${element_a}"
elif self.C.element == self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${core_name}_${element_a}"
else:
extended_name = "${core_name}"
extended_name = SubstituteTemplate(extended_name, {
'element_a': DataTypeNames[self.A.element],
'element_c': DataTypeNames[self.C.element],
'core_name': self.core_name()
})
return extended_name
#
def layout_name(self):
if self.is_complex() or self.is_planar_complex():
return "%s" % (
ShortComplexLayoutNames[(self.A.layout, self.A.complex_transform)]
)
return "%s" % (ShortLayoutTypeNames[self.A.layout])
#
def fill_mode_name(self):
return "%s" % (ShortFillModeNames[self.C.fill_mode])
#
def procedural_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
threadblock = self.tile_description.procedural_name()
opcode_class_name = OpcodeClassNames[self.tile_description.math_instruction.opcode_class]
alignment = max([self.A.alignment, self.C.alignment])
return SubstituteTemplate(
"cutlass_${opcode_class}_${extended_name}_${threadblock}_${layout}_${fill_mode}_align${alignment}",
{
'opcode_class': opcode_class_name,
'extended_name': self.extended_name(),
'threadblock': threadblock,
'layout': self.layout_name(),
'fill_mode': self.fill_mode_name(),
'alignment': "%d" % self.A.alignment,
}
)
#
def configuration_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
return self.procedural_name()
###################################################################################################
#
# Emits single instances of a CUTLASS device-wide operator
#
###################################################################################################
#
class EmitRank2KUniversalInstance:
''' Responsible for emitting a CUTLASS template definition'''
def __init__(self):
self.rank_k_template = """
// Rank K operator ${operation_name}
using Operation_${operation_name} =
typename cutlass::gemm::device::Rank2K<
${element_a}, ${layout_a},
${element_b}, ${layout_b},
${element_c}, ${layout_c}, ${fill_mode},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue}
>,
${swizzling_functor},
${stages},
${align_a},
${align_b},
${split_k_serial},
${math_operation}
>;
"""
self.rank_k_complex_template = """
// Rank K operator ${operation_name}
using Operation_${operation_name} =
typename cutlass::gemm::device::Rank2K<
${element_a}, ${layout_a},
${element_b}, ${layout_b},
${element_c}, ${layout_c}, ${fill_mode},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue}
>,
${swizzling_functor},
${stages},
${align_a},
${align_b},
${split_k_serial},
${math_operation},
${transform_a},
${transform_b},
${blas_mode}
>;
"""
def emit(self, operation):
threadblock_shape = operation.tile_description.threadblock_shape
warp_count = operation.tile_description.warp_count
warp_shape = [threadblock_shape[idx] // warp_count[idx] for idx in range(3)]
epilogue_vector_length = int(min(operation.C.alignment * DataTypeSize[operation.C.element], 128) / DataTypeSize[operation.C.element])
values = {
'operation_name': operation.procedural_name(),
'element_a': DataTypeTag[operation.A.element],
'layout_a': LayoutTag[operation.A.layout],
'element_b': DataTypeTag[operation.B.element],
'layout_b': LayoutTag[operation.B.layout],
'element_c': DataTypeTag[operation.C.element],
'layout_c': LayoutTag[operation.C.layout],
'fill_mode': FillModeTag[operation.C.fill_mode],
'element_accumulator': DataTypeTag[operation.accumulator_type()],
'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class],
'arch': "cutlass::arch::Sm%d" % operation.arch,
'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]),
'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]),
'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]),
'warp_shape_m': str(warp_shape[0]),
'warp_shape_n': str(warp_shape[1]),
'warp_shape_k': str(warp_shape[2]),
'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]),
'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]),
'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]),
'epilogue_vector_length': str(epilogue_vector_length),
'element_epilogue': str(DataTypeTag[operation.element_epilogue]),
'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor],
'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor],
'stages': str(operation.tile_description.stages),
'align_a': str(operation.A.alignment),
'align_b': str(operation.B.alignment),
'split_k_serial': 'false',
'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation],
'transform_a': ComplexTransformTag[operation.A.complex_transform],
'transform_b': ComplexTransformTag[operation.B.complex_transform],
'blas_mode': BlasModeTag[operation.blas_mode]
}
rank_k_template = self.rank_k_complex_template if operation.is_complex() else self.rank_k_template
return SubstituteTemplate(rank_k_template, values)
###################################################################################################
###################################################################################################
#
# Emitters functions for all targets
#
###################################################################################################
class EmitRank2KConfigurationLibrary:
def __init__(self, operation_path, configuration_name):
self.configuration_name = configuration_name
self.configuration_path = os.path.join(operation_path, "%s.cu" % configuration_name).replace('\\', '/')
self.instance_emitter = {
RankKKind.Universal: EmitRank2KUniversalInstance,
}
self.rank_k_kind_wrappers = {
RankKKind.Universal: 'Rank2KOperation',
}
self.instance_template = {
RankKKind.Universal: """
${compile_guard_start}
manifest.append(new ${rank_k_kind}<
Operation_${operation_name}
>("${operation_name}"));
${compile_guard_end}
"""
}
self.header_template = """
/*
Generated by rank_2k_operation.py - Do not edit.
*/
///////////////////////////////////////////////////////////////////////////////////////////////////
#include "cutlass/cutlass.h"
#include "cutlass/library/library.h"
#include "cutlass/library/manifest.h"
#include "library_internal.h"
#include "rank_2k_operation.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
self.initialize_function_template = """
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
void initialize_${configuration_name}(Manifest &manifest) {
"""
self.epilogue_template = """
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
def __enter__(self):
self.configuration_file = open(self.configuration_path, "w")
self.configuration_file.write(self.header_template)
self.instance_definitions = []
self.instance_wrappers = []
self.operations = []
return self
def emit(self, operation):
emitter = self.instance_emitter[operation.rank_k_kind]()
self.operations.append(operation)
self.instance_definitions.append(emitter.emit(operation))
self.instance_wrappers.append(SubstituteTemplate(self.instance_template[operation.rank_k_kind], {
'configuration_name': self.configuration_name,
'operation_name': operation.procedural_name(),
'rank_k_kind': self.rank_k_kind_wrappers[operation.rank_k_kind],
'compile_guard_start': SubstituteTemplate(self.wmma_guard_start, {'sm_number': str(operation.arch)}) \
if operation.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp else "",
'compile_guard_end': "#endif" \
if operation.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp else ""
}))
def __exit__(self, exception_type, exception_value, traceback):
# Write instance definitions in top-level namespace
for instance_definition in self.instance_definitions:
self.configuration_file.write(instance_definition)
# Add wrapper objects within initialize() function
self.configuration_file.write(SubstituteTemplate(self.initialize_function_template, {
'configuration_name': self.configuration_name
}))
for instance_wrapper in self.instance_wrappers:
self.configuration_file.write(instance_wrapper)
self.configuration_file.write(self.epilogue_template)
self.configuration_file.close()
###################################################################################################
| python/cutlass_library/rank_2k_operation.py/0 | {
"file_path": "python/cutlass_library/rank_2k_operation.py",
"repo_id": "python",
"token_count": 5451
} | 45 |
// Localization support
const messages = {
'en': {
'copy': 'Copy',
'copy_to_clipboard': 'Copy to clipboard',
'copy_success': 'Copied!',
'copy_failure': 'Failed to copy',
},
'es' : {
'copy': 'Copiar',
'copy_to_clipboard': 'Copiar al portapapeles',
'copy_success': '¡Copiado!',
'copy_failure': 'Error al copiar',
},
'de' : {
'copy': 'Kopieren',
'copy_to_clipboard': 'In die Zwischenablage kopieren',
'copy_success': 'Kopiert!',
'copy_failure': 'Fehler beim Kopieren',
},
'fr' : {
'copy': 'Copier',
'copy_to_clipboard': 'Copier dans le presse-papier',
'copy_success': 'Copié !',
'copy_failure': 'Échec de la copie',
},
'ru': {
'copy': 'Скопировать',
'copy_to_clipboard': 'Скопировать в буфер',
'copy_success': 'Скопировано!',
'copy_failure': 'Не удалось скопировать',
},
'zh-CN': {
'copy': '复制',
'copy_to_clipboard': '复制到剪贴板',
'copy_success': '复制成功!',
'copy_failure': '复制失败',
},
'it' : {
'copy': 'Copiare',
'copy_to_clipboard': 'Copiato negli appunti',
'copy_success': 'Copiato!',
'copy_failure': 'Errore durante la copia',
}
}
let locale = 'en'
if( document.documentElement.lang !== undefined
&& messages[document.documentElement.lang] !== undefined ) {
locale = document.documentElement.lang
}
let doc_url_root = DOCUMENTATION_OPTIONS.URL_ROOT;
if (doc_url_root == '#') {
doc_url_root = '';
}
/**
* SVG files for our copy buttons
*/
let iconCheck = `<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-check" width="44" height="44" viewBox="0 0 24 24" stroke-width="2" stroke="#22863a" fill="none" stroke-linecap="round" stroke-linejoin="round">
<title>${messages[locale]['copy_success']}</title>
<path stroke="none" d="M0 0h24v24H0z" fill="none"/>
<path d="M5 12l5 5l10 -10" />
</svg>`
// If the user specified their own SVG use that, otherwise use the default
let iconCopy = ``;
if (!iconCopy) {
iconCopy = `<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-copy" width="44" height="44" viewBox="0 0 24 24" stroke-width="1.5" stroke="#000000" fill="none" stroke-linecap="round" stroke-linejoin="round">
<title>${messages[locale]['copy_to_clipboard']}</title>
<path stroke="none" d="M0 0h24v24H0z" fill="none"/>
<rect x="8" y="8" width="12" height="12" rx="2" />
<path d="M16 8v-2a2 2 0 0 0 -2 -2h-8a2 2 0 0 0 -2 2v8a2 2 0 0 0 2 2h2" />
</svg>`
}
/**
* Set up copy/paste for code blocks
*/
const runWhenDOMLoaded = cb => {
if (document.readyState != 'loading') {
cb()
} else if (document.addEventListener) {
document.addEventListener('DOMContentLoaded', cb)
} else {
document.attachEvent('onreadystatechange', function() {
if (document.readyState == 'complete') cb()
})
}
}
const codeCellId = index => `codecell${index}`
// Clears selected text since ClipboardJS will select the text when copying
const clearSelection = () => {
if (window.getSelection) {
window.getSelection().removeAllRanges()
} else if (document.selection) {
document.selection.empty()
}
}
// Changes tooltip text for a moment, then changes it back
// We want the timeout of our `success` class to be a bit shorter than the
// tooltip and icon change, so that we can hide the icon before changing back.
var timeoutIcon = 2000;
var timeoutSuccessClass = 1500;
const temporarilyChangeTooltip = (el, oldText, newText) => {
el.setAttribute('data-tooltip', newText)
el.classList.add('success')
// Remove success a little bit sooner than we change the tooltip
// So that we can use CSS to hide the copybutton first
setTimeout(() => el.classList.remove('success'), timeoutSuccessClass)
setTimeout(() => el.setAttribute('data-tooltip', oldText), timeoutIcon)
}
// Changes the copy button icon for two seconds, then changes it back
const temporarilyChangeIcon = (el) => {
el.innerHTML = iconCheck;
setTimeout(() => {el.innerHTML = iconCopy}, timeoutIcon)
}
const addCopyButtonToCodeCells = () => {
// If ClipboardJS hasn't loaded, wait a bit and try again. This
// happens because we load ClipboardJS asynchronously.
if (window.ClipboardJS === undefined) {
setTimeout(addCopyButtonToCodeCells, 250)
return
}
// Add copybuttons to all of our code cells
const COPYBUTTON_SELECTOR = 'div.highlight pre';
const codeCells = document.querySelectorAll(COPYBUTTON_SELECTOR)
codeCells.forEach((codeCell, index) => {
const id = codeCellId(index)
codeCell.setAttribute('id', id)
const clipboardButton = id =>
`<button class="copybtn o-tooltip--left" data-tooltip="${messages[locale]['copy']}" data-clipboard-target="#${id}">
${iconCopy}
</button>`
codeCell.insertAdjacentHTML('afterend', clipboardButton(id))
})
function escapeRegExp(string) {
return string.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); // $& means the whole matched string
}
/**
* Removes excluded text from a Node.
*
* @param {Node} target Node to filter.
* @param {string} exclude CSS selector of nodes to exclude.
* @returns {DOMString} Text from `target` with text removed.
*/
function filterText(target, exclude) {
const clone = target.cloneNode(true); // clone as to not modify the live DOM
if (exclude) {
// remove excluded nodes
clone.querySelectorAll(exclude).forEach(node => node.remove());
}
return clone.innerText;
}
// Callback when a copy button is clicked. Will be passed the node that was clicked
// should then grab the text and replace pieces of text that shouldn't be used in output
function formatCopyText(textContent, copybuttonPromptText, isRegexp = false, onlyCopyPromptLines = true, removePrompts = true, copyEmptyLines = true, lineContinuationChar = "", hereDocDelim = "") {
var regexp;
var match;
// Do we check for line continuation characters and "HERE-documents"?
var useLineCont = !!lineContinuationChar
var useHereDoc = !!hereDocDelim
// create regexp to capture prompt and remaining line
if (isRegexp) {
regexp = new RegExp('^(' + copybuttonPromptText + ')(.*)')
} else {
regexp = new RegExp('^(' + escapeRegExp(copybuttonPromptText) + ')(.*)')
}
const outputLines = [];
var promptFound = false;
var gotLineCont = false;
var gotHereDoc = false;
const lineGotPrompt = [];
for (const line of textContent.split('\n')) {
match = line.match(regexp)
if (match || gotLineCont || gotHereDoc) {
promptFound = regexp.test(line)
lineGotPrompt.push(promptFound)
if (removePrompts && promptFound) {
outputLines.push(match[2])
} else {
outputLines.push(line)
}
gotLineCont = line.endsWith(lineContinuationChar) & useLineCont
if (line.includes(hereDocDelim) & useHereDoc)
gotHereDoc = !gotHereDoc
} else if (!onlyCopyPromptLines) {
outputLines.push(line)
} else if (copyEmptyLines && line.trim() === '') {
outputLines.push(line)
}
}
// If no lines with the prompt were found then just use original lines
if (lineGotPrompt.some(v => v === true)) {
textContent = outputLines.join('\n');
}
// Remove a trailing newline to avoid auto-running when pasting
if (textContent.endsWith("\n")) {
textContent = textContent.slice(0, -1)
}
return textContent
}
var copyTargetText = (trigger) => {
var target = document.querySelector(trigger.attributes['data-clipboard-target'].value);
// get filtered text
let exclude = '.linenos';
let text = filterText(target, exclude);
return formatCopyText(text, '', false, true, true, true, '', '')
}
// Initialize with a callback so we can modify the text before copy
const clipboard = new ClipboardJS('.copybtn', {text: copyTargetText})
// Update UI with error/success messages
clipboard.on('success', event => {
clearSelection()
temporarilyChangeTooltip(event.trigger, messages[locale]['copy'], messages[locale]['copy_success'])
temporarilyChangeIcon(event.trigger)
})
clipboard.on('error', event => {
temporarilyChangeTooltip(event.trigger, messages[locale]['copy'], messages[locale]['copy_failure'])
})
}
runWhenDOMLoaded(addCopyButtonToCodeCells) | python/docs/_static/copybutton.js/0 | {
"file_path": "python/docs/_static/copybutton.js",
"repo_id": "python",
"token_count": 3197
} | 46 |
.highlight pre { line-height: 125%; }
.highlight td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; }
.highlight span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; }
.highlight td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; }
.highlight span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; }
.highlight .hll { background-color: #ffffcc }
.highlight { background: #eeffcc; }
.highlight .c { color: #408090; font-style: italic } /* Comment */
.highlight .err { border: 1px solid #FF0000 } /* Error */
.highlight .k { color: #007020; font-weight: bold } /* Keyword */
.highlight .o { color: #666666 } /* Operator */
.highlight .ch { color: #408090; font-style: italic } /* Comment.Hashbang */
.highlight .cm { color: #408090; font-style: italic } /* Comment.Multiline */
.highlight .cp { color: #007020 } /* Comment.Preproc */
.highlight .cpf { color: #408090; font-style: italic } /* Comment.PreprocFile */
.highlight .c1 { color: #408090; font-style: italic } /* Comment.Single */
.highlight .cs { color: #408090; background-color: #fff0f0 } /* Comment.Special */
.highlight .gd { color: #A00000 } /* Generic.Deleted */
.highlight .ge { font-style: italic } /* Generic.Emph */
.highlight .gr { color: #FF0000 } /* Generic.Error */
.highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */
.highlight .gi { color: #00A000 } /* Generic.Inserted */
.highlight .go { color: #333333 } /* Generic.Output */
.highlight .gp { color: #c65d09; font-weight: bold } /* Generic.Prompt */
.highlight .gs { font-weight: bold } /* Generic.Strong */
.highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */
.highlight .gt { color: #0044DD } /* Generic.Traceback */
.highlight .kc { color: #007020; font-weight: bold } /* Keyword.Constant */
.highlight .kd { color: #007020; font-weight: bold } /* Keyword.Declaration */
.highlight .kn { color: #007020; font-weight: bold } /* Keyword.Namespace */
.highlight .kp { color: #007020 } /* Keyword.Pseudo */
.highlight .kr { color: #007020; font-weight: bold } /* Keyword.Reserved */
.highlight .kt { color: #902000 } /* Keyword.Type */
.highlight .m { color: #208050 } /* Literal.Number */
.highlight .s { color: #4070a0 } /* Literal.String */
.highlight .na { color: #4070a0 } /* Name.Attribute */
.highlight .nb { color: #007020 } /* Name.Builtin */
.highlight .nc { color: #0e84b5; font-weight: bold } /* Name.Class */
.highlight .no { color: #60add5 } /* Name.Constant */
.highlight .nd { color: #555555; font-weight: bold } /* Name.Decorator */
.highlight .ni { color: #d55537; font-weight: bold } /* Name.Entity */
.highlight .ne { color: #007020 } /* Name.Exception */
.highlight .nf { color: #06287e } /* Name.Function */
.highlight .nl { color: #002070; font-weight: bold } /* Name.Label */
.highlight .nn { color: #0e84b5; font-weight: bold } /* Name.Namespace */
.highlight .nt { color: #062873; font-weight: bold } /* Name.Tag */
.highlight .nv { color: #bb60d5 } /* Name.Variable */
.highlight .ow { color: #007020; font-weight: bold } /* Operator.Word */
.highlight .w { color: #bbbbbb } /* Text.Whitespace */
.highlight .mb { color: #208050 } /* Literal.Number.Bin */
.highlight .mf { color: #208050 } /* Literal.Number.Float */
.highlight .mh { color: #208050 } /* Literal.Number.Hex */
.highlight .mi { color: #208050 } /* Literal.Number.Integer */
.highlight .mo { color: #208050 } /* Literal.Number.Oct */
.highlight .sa { color: #4070a0 } /* Literal.String.Affix */
.highlight .sb { color: #4070a0 } /* Literal.String.Backtick */
.highlight .sc { color: #4070a0 } /* Literal.String.Char */
.highlight .dl { color: #4070a0 } /* Literal.String.Delimiter */
.highlight .sd { color: #4070a0; font-style: italic } /* Literal.String.Doc */
.highlight .s2 { color: #4070a0 } /* Literal.String.Double */
.highlight .se { color: #4070a0; font-weight: bold } /* Literal.String.Escape */
.highlight .sh { color: #4070a0 } /* Literal.String.Heredoc */
.highlight .si { color: #70a0d0; font-style: italic } /* Literal.String.Interpol */
.highlight .sx { color: #c65d09 } /* Literal.String.Other */
.highlight .sr { color: #235388 } /* Literal.String.Regex */
.highlight .s1 { color: #4070a0 } /* Literal.String.Single */
.highlight .ss { color: #517918 } /* Literal.String.Symbol */
.highlight .bp { color: #007020 } /* Name.Builtin.Pseudo */
.highlight .fm { color: #06287e } /* Name.Function.Magic */
.highlight .vc { color: #bb60d5 } /* Name.Variable.Class */
.highlight .vg { color: #bb60d5 } /* Name.Variable.Global */
.highlight .vi { color: #bb60d5 } /* Name.Variable.Instance */
.highlight .vm { color: #bb60d5 } /* Name.Variable.Magic */
.highlight .il { color: #208050 } /* Literal.Number.Integer.Long */
@media not print {
body[data-theme="dark"] .highlight pre { line-height: 125%; }
body[data-theme="dark"] .highlight td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; }
body[data-theme="dark"] .highlight span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; }
body[data-theme="dark"] .highlight td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; }
body[data-theme="dark"] .highlight span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; }
body[data-theme="dark"] .highlight .hll { background-color: #49483e }
body[data-theme="dark"] .highlight { background: #272822; color: #f8f8f2 }
body[data-theme="dark"] .highlight .c { color: #75715e } /* Comment */
body[data-theme="dark"] .highlight .err { color: #960050; background-color: #1e0010 } /* Error */
body[data-theme="dark"] .highlight .esc { color: #f8f8f2 } /* Escape */
body[data-theme="dark"] .highlight .g { color: #f8f8f2 } /* Generic */
body[data-theme="dark"] .highlight .k { color: #66d9ef } /* Keyword */
body[data-theme="dark"] .highlight .l { color: #ae81ff } /* Literal */
body[data-theme="dark"] .highlight .n { color: #f8f8f2 } /* Name */
body[data-theme="dark"] .highlight .o { color: #f92672 } /* Operator */
body[data-theme="dark"] .highlight .x { color: #f8f8f2 } /* Other */
body[data-theme="dark"] .highlight .p { color: #f8f8f2 } /* Punctuation */
body[data-theme="dark"] .highlight .ch { color: #75715e } /* Comment.Hashbang */
body[data-theme="dark"] .highlight .cm { color: #75715e } /* Comment.Multiline */
body[data-theme="dark"] .highlight .cp { color: #75715e } /* Comment.Preproc */
body[data-theme="dark"] .highlight .cpf { color: #75715e } /* Comment.PreprocFile */
body[data-theme="dark"] .highlight .c1 { color: #75715e } /* Comment.Single */
body[data-theme="dark"] .highlight .cs { color: #75715e } /* Comment.Special */
body[data-theme="dark"] .highlight .gd { color: #f92672 } /* Generic.Deleted */
body[data-theme="dark"] .highlight .ge { color: #f8f8f2; font-style: italic } /* Generic.Emph */
body[data-theme="dark"] .highlight .gr { color: #f8f8f2 } /* Generic.Error */
body[data-theme="dark"] .highlight .gh { color: #f8f8f2 } /* Generic.Heading */
body[data-theme="dark"] .highlight .gi { color: #a6e22e } /* Generic.Inserted */
body[data-theme="dark"] .highlight .go { color: #66d9ef } /* Generic.Output */
body[data-theme="dark"] .highlight .gp { color: #f92672; font-weight: bold } /* Generic.Prompt */
body[data-theme="dark"] .highlight .gs { color: #f8f8f2; font-weight: bold } /* Generic.Strong */
body[data-theme="dark"] .highlight .gu { color: #75715e } /* Generic.Subheading */
body[data-theme="dark"] .highlight .gt { color: #f8f8f2 } /* Generic.Traceback */
body[data-theme="dark"] .highlight .kc { color: #66d9ef } /* Keyword.Constant */
body[data-theme="dark"] .highlight .kd { color: #66d9ef } /* Keyword.Declaration */
body[data-theme="dark"] .highlight .kn { color: #f92672 } /* Keyword.Namespace */
body[data-theme="dark"] .highlight .kp { color: #66d9ef } /* Keyword.Pseudo */
body[data-theme="dark"] .highlight .kr { color: #66d9ef } /* Keyword.Reserved */
body[data-theme="dark"] .highlight .kt { color: #66d9ef } /* Keyword.Type */
body[data-theme="dark"] .highlight .ld { color: #e6db74 } /* Literal.Date */
body[data-theme="dark"] .highlight .m { color: #ae81ff } /* Literal.Number */
body[data-theme="dark"] .highlight .s { color: #e6db74 } /* Literal.String */
body[data-theme="dark"] .highlight .na { color: #a6e22e } /* Name.Attribute */
body[data-theme="dark"] .highlight .nb { color: #f8f8f2 } /* Name.Builtin */
body[data-theme="dark"] .highlight .nc { color: #a6e22e } /* Name.Class */
body[data-theme="dark"] .highlight .no { color: #66d9ef } /* Name.Constant */
body[data-theme="dark"] .highlight .nd { color: #a6e22e } /* Name.Decorator */
body[data-theme="dark"] .highlight .ni { color: #f8f8f2 } /* Name.Entity */
body[data-theme="dark"] .highlight .ne { color: #a6e22e } /* Name.Exception */
body[data-theme="dark"] .highlight .nf { color: #a6e22e } /* Name.Function */
body[data-theme="dark"] .highlight .nl { color: #f8f8f2 } /* Name.Label */
body[data-theme="dark"] .highlight .nn { color: #f8f8f2 } /* Name.Namespace */
body[data-theme="dark"] .highlight .nx { color: #a6e22e } /* Name.Other */
body[data-theme="dark"] .highlight .py { color: #f8f8f2 } /* Name.Property */
body[data-theme="dark"] .highlight .nt { color: #f92672 } /* Name.Tag */
body[data-theme="dark"] .highlight .nv { color: #f8f8f2 } /* Name.Variable */
body[data-theme="dark"] .highlight .ow { color: #f92672 } /* Operator.Word */
body[data-theme="dark"] .highlight .pm { color: #f8f8f2 } /* Punctuation.Marker */
body[data-theme="dark"] .highlight .w { color: #f8f8f2 } /* Text.Whitespace */
body[data-theme="dark"] .highlight .mb { color: #ae81ff } /* Literal.Number.Bin */
body[data-theme="dark"] .highlight .mf { color: #ae81ff } /* Literal.Number.Float */
body[data-theme="dark"] .highlight .mh { color: #ae81ff } /* Literal.Number.Hex */
body[data-theme="dark"] .highlight .mi { color: #ae81ff } /* Literal.Number.Integer */
body[data-theme="dark"] .highlight .mo { color: #ae81ff } /* Literal.Number.Oct */
body[data-theme="dark"] .highlight .sa { color: #e6db74 } /* Literal.String.Affix */
body[data-theme="dark"] .highlight .sb { color: #e6db74 } /* Literal.String.Backtick */
body[data-theme="dark"] .highlight .sc { color: #e6db74 } /* Literal.String.Char */
body[data-theme="dark"] .highlight .dl { color: #e6db74 } /* Literal.String.Delimiter */
body[data-theme="dark"] .highlight .sd { color: #e6db74 } /* Literal.String.Doc */
body[data-theme="dark"] .highlight .s2 { color: #e6db74 } /* Literal.String.Double */
body[data-theme="dark"] .highlight .se { color: #ae81ff } /* Literal.String.Escape */
body[data-theme="dark"] .highlight .sh { color: #e6db74 } /* Literal.String.Heredoc */
body[data-theme="dark"] .highlight .si { color: #e6db74 } /* Literal.String.Interpol */
body[data-theme="dark"] .highlight .sx { color: #e6db74 } /* Literal.String.Other */
body[data-theme="dark"] .highlight .sr { color: #e6db74 } /* Literal.String.Regex */
body[data-theme="dark"] .highlight .s1 { color: #e6db74 } /* Literal.String.Single */
body[data-theme="dark"] .highlight .ss { color: #e6db74 } /* Literal.String.Symbol */
body[data-theme="dark"] .highlight .bp { color: #f8f8f2 } /* Name.Builtin.Pseudo */
body[data-theme="dark"] .highlight .fm { color: #a6e22e } /* Name.Function.Magic */
body[data-theme="dark"] .highlight .vc { color: #f8f8f2 } /* Name.Variable.Class */
body[data-theme="dark"] .highlight .vg { color: #f8f8f2 } /* Name.Variable.Global */
body[data-theme="dark"] .highlight .vi { color: #f8f8f2 } /* Name.Variable.Instance */
body[data-theme="dark"] .highlight .vm { color: #f8f8f2 } /* Name.Variable.Magic */
body[data-theme="dark"] .highlight .il { color: #ae81ff } /* Literal.Number.Integer.Long */
@media (prefers-color-scheme: dark) {
body:not([data-theme="light"]) .highlight pre { line-height: 125%; }
body:not([data-theme="light"]) .highlight td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; }
body:not([data-theme="light"]) .highlight span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; }
body:not([data-theme="light"]) .highlight td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; }
body:not([data-theme="light"]) .highlight span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; }
body:not([data-theme="light"]) .highlight .hll { background-color: #49483e }
body:not([data-theme="light"]) .highlight { background: #272822; color: #f8f8f2 }
body:not([data-theme="light"]) .highlight .c { color: #75715e } /* Comment */
body:not([data-theme="light"]) .highlight .err { color: #960050; background-color: #1e0010 } /* Error */
body:not([data-theme="light"]) .highlight .esc { color: #f8f8f2 } /* Escape */
body:not([data-theme="light"]) .highlight .g { color: #f8f8f2 } /* Generic */
body:not([data-theme="light"]) .highlight .k { color: #66d9ef } /* Keyword */
body:not([data-theme="light"]) .highlight .l { color: #ae81ff } /* Literal */
body:not([data-theme="light"]) .highlight .n { color: #f8f8f2 } /* Name */
body:not([data-theme="light"]) .highlight .o { color: #f92672 } /* Operator */
body:not([data-theme="light"]) .highlight .x { color: #f8f8f2 } /* Other */
body:not([data-theme="light"]) .highlight .p { color: #f8f8f2 } /* Punctuation */
body:not([data-theme="light"]) .highlight .ch { color: #75715e } /* Comment.Hashbang */
body:not([data-theme="light"]) .highlight .cm { color: #75715e } /* Comment.Multiline */
body:not([data-theme="light"]) .highlight .cp { color: #75715e } /* Comment.Preproc */
body:not([data-theme="light"]) .highlight .cpf { color: #75715e } /* Comment.PreprocFile */
body:not([data-theme="light"]) .highlight .c1 { color: #75715e } /* Comment.Single */
body:not([data-theme="light"]) .highlight .cs { color: #75715e } /* Comment.Special */
body:not([data-theme="light"]) .highlight .gd { color: #f92672 } /* Generic.Deleted */
body:not([data-theme="light"]) .highlight .ge { color: #f8f8f2; font-style: italic } /* Generic.Emph */
body:not([data-theme="light"]) .highlight .gr { color: #f8f8f2 } /* Generic.Error */
body:not([data-theme="light"]) .highlight .gh { color: #f8f8f2 } /* Generic.Heading */
body:not([data-theme="light"]) .highlight .gi { color: #a6e22e } /* Generic.Inserted */
body:not([data-theme="light"]) .highlight .go { color: #66d9ef } /* Generic.Output */
body:not([data-theme="light"]) .highlight .gp { color: #f92672; font-weight: bold } /* Generic.Prompt */
body:not([data-theme="light"]) .highlight .gs { color: #f8f8f2; font-weight: bold } /* Generic.Strong */
body:not([data-theme="light"]) .highlight .gu { color: #75715e } /* Generic.Subheading */
body:not([data-theme="light"]) .highlight .gt { color: #f8f8f2 } /* Generic.Traceback */
body:not([data-theme="light"]) .highlight .kc { color: #66d9ef } /* Keyword.Constant */
body:not([data-theme="light"]) .highlight .kd { color: #66d9ef } /* Keyword.Declaration */
body:not([data-theme="light"]) .highlight .kn { color: #f92672 } /* Keyword.Namespace */
body:not([data-theme="light"]) .highlight .kp { color: #66d9ef } /* Keyword.Pseudo */
body:not([data-theme="light"]) .highlight .kr { color: #66d9ef } /* Keyword.Reserved */
body:not([data-theme="light"]) .highlight .kt { color: #66d9ef } /* Keyword.Type */
body:not([data-theme="light"]) .highlight .ld { color: #e6db74 } /* Literal.Date */
body:not([data-theme="light"]) .highlight .m { color: #ae81ff } /* Literal.Number */
body:not([data-theme="light"]) .highlight .s { color: #e6db74 } /* Literal.String */
body:not([data-theme="light"]) .highlight .na { color: #a6e22e } /* Name.Attribute */
body:not([data-theme="light"]) .highlight .nb { color: #f8f8f2 } /* Name.Builtin */
body:not([data-theme="light"]) .highlight .nc { color: #a6e22e } /* Name.Class */
body:not([data-theme="light"]) .highlight .no { color: #66d9ef } /* Name.Constant */
body:not([data-theme="light"]) .highlight .nd { color: #a6e22e } /* Name.Decorator */
body:not([data-theme="light"]) .highlight .ni { color: #f8f8f2 } /* Name.Entity */
body:not([data-theme="light"]) .highlight .ne { color: #a6e22e } /* Name.Exception */
body:not([data-theme="light"]) .highlight .nf { color: #a6e22e } /* Name.Function */
body:not([data-theme="light"]) .highlight .nl { color: #f8f8f2 } /* Name.Label */
body:not([data-theme="light"]) .highlight .nn { color: #f8f8f2 } /* Name.Namespace */
body:not([data-theme="light"]) .highlight .nx { color: #a6e22e } /* Name.Other */
body:not([data-theme="light"]) .highlight .py { color: #f8f8f2 } /* Name.Property */
body:not([data-theme="light"]) .highlight .nt { color: #f92672 } /* Name.Tag */
body:not([data-theme="light"]) .highlight .nv { color: #f8f8f2 } /* Name.Variable */
body:not([data-theme="light"]) .highlight .ow { color: #f92672 } /* Operator.Word */
body:not([data-theme="light"]) .highlight .pm { color: #f8f8f2 } /* Punctuation.Marker */
body:not([data-theme="light"]) .highlight .w { color: #f8f8f2 } /* Text.Whitespace */
body:not([data-theme="light"]) .highlight .mb { color: #ae81ff } /* Literal.Number.Bin */
body:not([data-theme="light"]) .highlight .mf { color: #ae81ff } /* Literal.Number.Float */
body:not([data-theme="light"]) .highlight .mh { color: #ae81ff } /* Literal.Number.Hex */
body:not([data-theme="light"]) .highlight .mi { color: #ae81ff } /* Literal.Number.Integer */
body:not([data-theme="light"]) .highlight .mo { color: #ae81ff } /* Literal.Number.Oct */
body:not([data-theme="light"]) .highlight .sa { color: #e6db74 } /* Literal.String.Affix */
body:not([data-theme="light"]) .highlight .sb { color: #e6db74 } /* Literal.String.Backtick */
body:not([data-theme="light"]) .highlight .sc { color: #e6db74 } /* Literal.String.Char */
body:not([data-theme="light"]) .highlight .dl { color: #e6db74 } /* Literal.String.Delimiter */
body:not([data-theme="light"]) .highlight .sd { color: #e6db74 } /* Literal.String.Doc */
body:not([data-theme="light"]) .highlight .s2 { color: #e6db74 } /* Literal.String.Double */
body:not([data-theme="light"]) .highlight .se { color: #ae81ff } /* Literal.String.Escape */
body:not([data-theme="light"]) .highlight .sh { color: #e6db74 } /* Literal.String.Heredoc */
body:not([data-theme="light"]) .highlight .si { color: #e6db74 } /* Literal.String.Interpol */
body:not([data-theme="light"]) .highlight .sx { color: #e6db74 } /* Literal.String.Other */
body:not([data-theme="light"]) .highlight .sr { color: #e6db74 } /* Literal.String.Regex */
body:not([data-theme="light"]) .highlight .s1 { color: #e6db74 } /* Literal.String.Single */
body:not([data-theme="light"]) .highlight .ss { color: #e6db74 } /* Literal.String.Symbol */
body:not([data-theme="light"]) .highlight .bp { color: #f8f8f2 } /* Name.Builtin.Pseudo */
body:not([data-theme="light"]) .highlight .fm { color: #a6e22e } /* Name.Function.Magic */
body:not([data-theme="light"]) .highlight .vc { color: #f8f8f2 } /* Name.Variable.Class */
body:not([data-theme="light"]) .highlight .vg { color: #f8f8f2 } /* Name.Variable.Global */
body:not([data-theme="light"]) .highlight .vi { color: #f8f8f2 } /* Name.Variable.Instance */
body:not([data-theme="light"]) .highlight .vm { color: #f8f8f2 } /* Name.Variable.Magic */
body:not([data-theme="light"]) .highlight .il { color: #ae81ff } /* Literal.Number.Integer.Long */
}
} | python/docs/_static/pygments.css/0 | {
"file_path": "python/docs/_static/pygments.css",
"repo_id": "python",
"token_count": 7238
} | 47 |
{
"path": "./../../../../examples/python/01_epilogue.ipynb"
}
| python/docs_src/source/externals/01_epilogue.nblink/0 | {
"file_path": "python/docs_src/source/externals/01_epilogue.nblink",
"repo_id": "python",
"token_count": 30
} | 48 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Low-level functionality tests for Conv2d opreations on SM80
"""
import logging
import unittest
import cutlass
from cutlass.backend.utils.device import device_cc
from conv2d_test_utils import *
cutlass.set_log_level(logging.WARNING)
cc = 80
@unittest.skipIf(device_cc() < cc, 'Device compute capability is invalid for SM80 tests.')
class Conv2dSm80(unittest.TestCase):
"""
Wrapper class to which tests will be added dynamically in __main__
"""
pass
conv_problems = get_conv_problems()
# Tests for optimized & analytic
for conv_kind in ["fprop", "wgrad", "dgrad"]:
# F16, simt
add_test(
Conv2dSm80, cc, conv_kind, conv_problems, cutlass.DataType.f16, cutlass.DataType.f32, cutlass.DataType.f16,
opclass="simt", threadblock_shape=[128, 128, 8],
warp_count=[4, 2, 1], stages=2, instruction_shape=[1, 1, 1])
# F16, tensor op
add_test(
Conv2dSm80, cc, conv_kind, conv_problems, cutlass.DataType.f16, cutlass.DataType.f32, cutlass.DataType.f16,
opclass="tensor_op", threadblock_shape=[128, 128, 64],
warp_count=[2, 2, 1], stages=3, instruction_shape=[16, 8, 16])
# F16, tensor op, analytic iterator
add_test(
Conv2dSm80, cc, conv_kind, conv_problems, cutlass.DataType.f16, cutlass.DataType.f16, cutlass.DataType.f16,
opclass="tensor_op", threadblock_shape=[128, 128, 64],
warp_count=[2, 2, 1], stages=3, instruction_shape=[16, 8, 16], iterator_algorithm="analytic")
# F16, tensor op, f32 output
add_test(
Conv2dSm80, cc, conv_kind, conv_problems, cutlass.DataType.f16, cutlass.DataType.f32, cutlass.DataType.f32,
opclass="tensor_op", threadblock_shape=[128, 128, 64],
warp_count=[2, 2, 1], stages=3, instruction_shape=[16, 8, 16])
# F16, tensor op, different tile description
add_test(
Conv2dSm80, cc, conv_kind, conv_problems, cutlass.DataType.f16, cutlass.DataType.f32, cutlass.DataType.f16,
opclass="tensor_op", threadblock_shape=[128, 64, 32],
warp_count=[2, 2, 1], stages=3, instruction_shape=[16, 8, 8])
# F32, simt
add_test(
Conv2dSm80, cc, conv_kind, conv_problems, cutlass.DataType.f32, cutlass.DataType.f32, cutlass.DataType.f32,
opclass="simt", threadblock_shape=[128, 128, 8],
warp_count=[4, 2, 1], stages=4, instruction_shape=[1, 1, 1])
# Tf32, tensorop
add_test(
Conv2dSm80, cc, conv_kind, conv_problems, cutlass.DataType.f32, cutlass.DataType.f32, cutlass.DataType.f32,
opclass="tensor_op", threadblock_shape=[128, 128, 16],
warp_count=[2, 2, 1], stages=3, instruction_shape=[16, 8, 8]
)
# Split-K
add_test(
Conv2dSm80, cc, conv_kind, conv_problems, cutlass.DataType.f16, cutlass.DataType.f32, cutlass.DataType.f16,
opclass="tensor_op", threadblock_shape=[128, 128, 64],
warp_count=[2, 2, 1], stages=3, instruction_shape=[16, 8, 16], split_k_mode="serial",
split_k_slices=2)
add_test(
Conv2dSm80, cc, conv_kind, conv_problems, cutlass.DataType.f16, cutlass.DataType.f32, cutlass.DataType.f16,
opclass="tensor_op", threadblock_shape=[128, 128, 64],
warp_count=[2, 2, 1], stages=3, instruction_shape=[16, 8, 16], split_k_mode="parallel",
split_k_slices=5)
# Swizzling functor
add_test(
Conv2dSm80, cc, conv_kind, conv_problems, cutlass.DataType.f16, cutlass.DataType.f32, cutlass.DataType.f16,
opclass="tensor_op", threadblock_shape=[128, 64, 32],
warp_count=[2, 2, 1], stages=3, instruction_shape=[16, 8, 8], swizzle=4)
# Tests for few channels and fixed channels
# F16, tensor op, few channels
for c, tb, stage, inst in zip([2, 1],
[[128, 128, 64], [128, 128, 32]],
[3, 2],
[[16, 8, 16], [16, 8, 8]]):
add_test(
Conv2dSm80, cc, "fprop", conv2d_few_channel_problemsizes(c), cutlass.DataType.f16, cutlass.DataType.f32, cutlass.DataType.f16,
opclass="tensor_op", threadblock_shape=tb,
warp_count=[2, 2, 1], stages=stage, instruction_shape=inst, iterator_algorithm="few_channels"
)
# F16, tensor op, fixed channels
for c in [8, 4, 2]:
add_test(
Conv2dSm80, cc, "fprop", conv2d_few_channel_problemsizes(c), cutlass.DataType.f16, cutlass.DataType.f32, cutlass.DataType.f16,
opclass="tensor_op", threadblock_shape=[128, 128, 64],
warp_count=[2, 2, 1], stages=3, instruction_shape=[16, 8, 16], iterator_algorithm="fixed_channels"
)
# Test activations
for activation in ["relu", "leaky_relu"]:
for split_k_mode, split_k_slices in zip(["parallel", "serial", "parallel"], [1, 7, 5]):
add_test(
Conv2dSm80, cc, "fprop", conv_problems, cutlass.DataType.f16, cutlass.DataType.f32, cutlass.DataType.f16,
opclass="tensor_op", threadblock_shape=[128, 128, 64],
warp_count=[2, 2, 1], stages=3, instruction_shape=[16, 8, 16], split_k_mode=split_k_mode,
split_k_slices=split_k_slices, activation=activation)
if __name__ == '__main__':
unittest.main()
| test/python/cutlass/conv2d/conv2d_sm80.py/0 | {
"file_path": "test/python/cutlass/conv2d/conv2d_sm80.py",
"repo_id": "test",
"token_count": 2751
} | 49 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Implicit GEMM testbed sizes for Conv2d problem
*/
#pragma once
#include <vector>
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
namespace test {
namespace conv {
namespace device {
using Conv2dProblemVector = std::vector<cutlass::conv::Conv2dProblemSize>;
//
// Structures to prune items from Conv2dProblemVector
//
// Specification template for pruning items for convolution problem lists
template <typename T> struct Specification
{
virtual ~Specification() = default;
virtual bool is_satisfied(T item) const = 0;
};
// input size (NHWC) specification
struct InputSizeSpecification : Specification<cutlass::conv::Conv2dProblemSize>
{
cutlass::Tensor4DCoord input_size;
InputSizeSpecification(cutlass::Tensor4DCoord input_size_) : input_size(input_size_) {}
bool is_satisfied(cutlass::conv::Conv2dProblemSize item) const override {
return ((input_size.n() == item.N) && (input_size.h() == item.H) && (input_size.w() == item.W) && (input_size.c() == item.C));
}
};
// stride (stride_h, stride_w) specification
struct StrideSpecification : Specification<cutlass::conv::Conv2dProblemSize>
{
cutlass::MatrixCoord stride;
StrideSpecification(cutlass::MatrixCoord stride_) : stride(stride_) {}
bool is_satisfied(cutlass::conv::Conv2dProblemSize item) const override {
return ((stride.row() == item.stride_h) && (stride.column() == item.stride_h));
}
};
// channel (C,K) specification, must be multiple of minimum channel
struct ChannelDivisibilitySpecification : Specification<cutlass::conv::Conv2dProblemSize>
{
int channel_multiple;
ChannelDivisibilitySpecification(int channel_multiple_) : channel_multiple(channel_multiple_) {}
bool is_satisfied(cutlass::conv::Conv2dProblemSize item) const override {
return ((item.K % channel_multiple == 0) && (item.C % channel_multiple == 0));
}
};
//
// Pruning function for items from Conv2dProblemVector based on a Specification
//
inline Conv2dProblemVector prune(Conv2dProblemVector const &items,
Specification<cutlass::conv::Conv2dProblemSize> const &spec)
{
Conv2dProblemVector pruned_list;
for (auto& p : items)
if (spec.is_satisfied(p))
pruned_list.push_back(p);
return pruned_list;
}
////////////////////////////////////////////////////////////////////////////
/// Structure TestbedConv2dProblemSizes initializes and holds conv default and
/// important network sizes
////////////////////////////////////////////////////////////////////////////
struct TestbedConv2dProblemSizes {
//
// Data members
//
int minimum_channel_size;
Conv2dProblemVector conv2d_default_sizes;
Conv2dProblemVector conv2d_rigorous_sizes;
Conv2dProblemVector conv2d_resnet50_sizes;
Conv2dProblemVector conv2d_resnet50_sizes_perf;
//
// Methods
//
/// Default ctor
TestbedConv2dProblemSizes(int minimum_channel_size_ = 64): minimum_channel_size (minimum_channel_size_) {
initialize_conv2d_default_sizes();
initialize_conv2d_rigorous_sizes();
initialize_conv2d_resnet50_sizes(conv2d_resnet50_sizes, 1 /*batch-size*/);
initialize_conv2d_resnet50_sizes(conv2d_resnet50_sizes_perf, 34 /*batch-size*/);
filter_all();
}
/// Eliminates some illegal cases
void filter_all() {
Conv2dProblemVector *problems_vectors[] = {
&conv2d_default_sizes,
&conv2d_rigorous_sizes,
&conv2d_resnet50_sizes,
&conv2d_resnet50_sizes_perf
};
for (Conv2dProblemVector *problems : problems_vectors) {
Conv2dProblemVector filtered;
for (cutlass::conv::Conv2dProblemSize const & problem : *problems) {
if (!(problem.C % minimum_channel_size)) {
filtered.push_back(problem);
}
}
*problems = filtered;
}
}
// Add a few standard convolution problem sizes
void initialize_conv2d_default_sizes() {
////////////////////////////////////////////////////////////////////////////////////////////
// Small input size x stride (1,1)
// C < CTA::K and non-multiples of CTA::K. Typical CTA::K = {32, 64}
////////////////////////////////////////////////////////////////////////////////////////////
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 1, 1, minimum_channel_size}, // input size (NHWC)
{8, 1, 1, minimum_channel_size}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 1, 8, minimum_channel_size}, // input size (NHWC)
{8, 1, 3, minimum_channel_size}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 7, 8, minimum_channel_size}, // input size (NHWC)
{8, 3, 3, minimum_channel_size}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 7, 9, minimum_channel_size}, // input size (NHWC)
{8, 4, 4, minimum_channel_size}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{2, 7, 9, minimum_channel_size}, // input size (NHWC)
{8, 5, 5, minimum_channel_size}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{3, 7, 9, minimum_channel_size}, // input size (NHWC)
{8, 6, 5, minimum_channel_size}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{3, 7, 9, minimum_channel_size}, // input size (NHWC)
{8, 6, 6, minimum_channel_size}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{3, 7, 9, minimum_channel_size}, // input size (NHWC)
{8, 7, 7, minimum_channel_size}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
////////////////////////////////////////////////////////////////////////////////////////////
// Small input size x stride (1,1) asymmetric paddings (1, 0, 1, 0)
// C < CTA::K and non-multiples of CTA::K. Typical CTA::K = {32, 64}
////////////////////////////////////////////////////////////////////////////////////////////
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 1, 1, minimum_channel_size}, // input size (NHWC)
{8, 1, 1, minimum_channel_size}, // filter size (KRSC)
{1, 0, 1, 0}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 1, 8, minimum_channel_size}, // input size (NHWC)
{8, 1, 3, minimum_channel_size}, // filter size (KRSC)
{1, 0, 1, 0}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 7, 8, minimum_channel_size}, // input size (NHWC)
{8, 3, 3, minimum_channel_size}, // filter size (KRSC)
{1, 0, 1, 0}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 7, 9, minimum_channel_size}, // input size (NHWC)
{8, 4, 4, minimum_channel_size}, // filter size (KRSC)
{1, 0, 1, 0}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{2, 7, 9, minimum_channel_size}, // input size (NHWC)
{8, 5, 5, minimum_channel_size}, // filter size (KRSC)
{1, 0, 1, 0}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{3, 7, 9, minimum_channel_size}, // input size (NHWC)
{8, 6, 5, minimum_channel_size}, // filter size (KRSC)
{1, 0, 1, 0}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{3, 7, 9, minimum_channel_size}, // input size (NHWC)
{8, 6, 6, minimum_channel_size}, // filter size (KRSC)
{1, 0, 1, 0}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{3, 7, 9, minimum_channel_size}, // input size (NHWC)
{8, 7, 7, minimum_channel_size}, // filter size (KRSC)
{1, 0, 1, 0}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
////////////////////////////////////////////////////////////////////////////////////////////
// Small input size x stride (2,2)
// C < CTA::K and non-multiples of CTA::K. Typical CTA::K = {32, 64}
////////////////////////////////////////////////////////////////////////////////////////////
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 11, 7, minimum_channel_size}, // input size (NHWC)
{8, 1, 1, minimum_channel_size}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{2, 2}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 11, 7, minimum_channel_size}, // input size (NHWC)
{8, 3, 3, minimum_channel_size}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{2, 2}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 13, 11, minimum_channel_size}, // input size (NHWC)
{8, 1, 1, minimum_channel_size}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{2, 2}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 17, 19, minimum_channel_size}, // input size (NHWC)
{16, 2, 2, minimum_channel_size}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{2, 2}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 23, 5, minimum_channel_size}, // input size (NHWC)
{16, 3, 3, minimum_channel_size}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{2, 2}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 13, 17, 8}, // input size (NHWC)
{24, 3, 3, 8}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{2, 2}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 23, 21, 8}, // input size (NHWC)
{24, 3, 3, 8}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{3, 3}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 20, 24, 8}, // input size (NHWC)
{40, 3, 3, 8}, // filter size (KRSC)
{3, 3, 3, 3}, // padding (pad_h, _, pad_w, _)
{3, 3}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
////////////////////////////////////////////////////////////////////////////////////
// Medium input size (1x16x16x128), filter size (1x1, 2x2, 3x3, 5x5), stride (1, 1)
////////////////////////////////////////////////////////////////////////////////////
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 15, 19, 160}, // input size (NHWC)
{224, 1, 1, 160}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 19, 37, 160}, // input size (NHWC)
{224, 3, 3, 160}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{2, 2}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 16, 16, 160}, // input size (NHWC)
{224, 2, 3, 160}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 23, 21, 128}, // input size (NHWC)
{224, 3, 3, 128}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 29, 37, 160}, // input size (NHWC)
{224, 5, 5, 160}, // filter size (KRSC)
{2, 2, 2, 2}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
////////////////////////////////////////////////////////////////////////////////////
// C > CTA::K and non-multiples of CTA::K. Typical CTA::K = {32, 64}
////////////////////////////////////////////////////////////////////////////////////
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 15, 19, 32 + minimum_channel_size}, // input size (NHWC)
{96, 3, 3, 32 + minimum_channel_size}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 16, 24, 64 + minimum_channel_size}, // input size (NHWC)
{96, 3, 3, 64 + minimum_channel_size}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
////////////////////////////////////////////////////////////////////////////////////
// Medium input size, filter size (1x1, 3,x3, 5x5, 7x7), stride (2, 2)
////////////////////////////////////////////////////////////////////////////////////
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 13, 16, 288}, // input size (NHWC)
{160, 5, 5, 288}, // filter size (KRSC)
{2, 2, 2, 2}, // padding (pad_h, _, pad_w, _)
{2, 2}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 55, 51, 256}, // input size (NHWC)
{512, 1, 1, 256}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{2, 2}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 71, 80, 32}, // input size (NHWC)
{64, 5, 5, 32}, // filter size (KRSC)
{2, 2, 2, 2}, // padding (pad_h, _, pad_w, _)
{2, 2}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 224, 224, 8}, // input size (NHWC)
{64, 7, 7, 8}, // filter size (KRSC)
{3, 3, 3, 3}, // padding (pad_h, _, pad_w, _)
{2, 2}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
////////////////////////////////////////////////////////////////////////////////////
// Medium input size stride (3, 3), filter (3, 3), non-default padding
////////////////////////////////////////////////////////////////////////////////////
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 27, 23, 256}, // input size (NHWC)
{512, 3, 3, 256}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{3, 3}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
////////////////////////////////////////////////////////////////////////////////////
// Medium input size padding > stride, asymmetric filter, padding and striding
////////////////////////////////////////////////////////////////////////////////////
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 27, 31, 256}, // input size (NHWC)
{512, 3, 3, 256}, // filter size (KRSC)
{5, 5, 7, 7}, // padding (pad_h, _, pad_w, _)
{3, 4}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 27, 35, 256}, // input size (NHWC)
{512, 7, 5, 256}, // filter size (KRSC)
{11, 11, 7, 7}, // padding (pad_h, _, pad_w, _)
{3, 5}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
////////////////////////////////////////////////////////////////////////////////////
// Medium input size *mixed* stride (1, 2) and (2, 1),
// filter (3, 3), default padding
////////////////////////////////////////////////////////////////////////////////////
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 27, 27, 256}, // input size (NHWC)
{512, 3, 3, 256}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 2}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 27, 27, 256}, // input size (NHWC)
{512, 3, 3, 256}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{2, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
/////////////////////////////////////////////////////////////////////////////
// Additional input size
/////////////////////////////////////////////////////////////////////////////
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{3, 28, 28, 256}, // input size (NHWC)
{256, 2, 2, 256}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{2, 2}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 32, 32, 16}, // input size (NHWC)
{32, 3, 3, 16}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{6, 2}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{32, 24, 32, 32}, // input size (NHWC)
{32, 1, 2, 32}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{4, 4, 5, 128}, // input size (NHWC)
{256, 3, 6, 128}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1}, // dilation (dilation_h, dilation_w)
{4, 3, 3, 256} // output size (NPQK)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{4, 2, 3, 256}, // input size (NHWC)
{328, 3, 5, 256}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1}, // dilation (dilation_h, dilation_w)
{4, 1, 1, 328} // output size (NPQK)
));
}
// Add a few large and rigorous convolution problem sizes
void initialize_conv2d_rigorous_sizes() {
#if CUTLASS_CONV_UNIT_TEST_RIGOROUS_SIZE_ENABLED
conv2d_rigorous_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 124, 224, 96}, // input size (NHWC)
{24, 7, 7, 96}, // filter size (KRSC)
{1, 229, 129, 32} // output size (NPQK)
));
conv2d_rigorous_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 233, 35, 48}, // input size (NHWC)
{24, 7, 5, 48}, // filter size (KRSC)
{1, 233, 35, 24} // output size (NPQK)
));
#endif
}
// Add resent50 layers to unit testing sizes
void initialize_conv2d_resnet50_sizes(Conv2dProblemVector &conv2d_problem_vector, int batch_size = 1){
#if 0 // Resnet50 first layer (layer_id = 0) with channel = 3 is not supported in cutlass
conv2d_problem_vector.push_back(cutlass::conv::Conv2dProblemSize(
[1, 224, 224, 3], // input size (NHWC)
[64, 7, 7, 3], // filter size (KRSC)
[3, 3, 3, 3], // padding (pad_h, _, pad_w, _)
[2, 2], // stride (stride_h, stride_w)
[1, 1], // dilation (dilation_h, dilation_w)
));
#endif
conv2d_problem_vector.push_back(cutlass::conv::Conv2dProblemSize(
{batch_size, 56, 56, 64}, // input size (NHWC)
{256, 1, 1, 64}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_problem_vector.push_back(cutlass::conv::Conv2dProblemSize(
{batch_size, 56, 56, 64}, // input size (NHWC)
{64, 1, 1, 64}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_problem_vector.push_back(cutlass::conv::Conv2dProblemSize(
{batch_size, 56, 56, 64}, // input size (NHWC)
{64, 3, 3, 64}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_problem_vector.push_back(cutlass::conv::Conv2dProblemSize(
{batch_size, 56, 56, 256}, // input size (NHWC)
{64, 1, 1, 256}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_problem_vector.push_back(cutlass::conv::Conv2dProblemSize(
{batch_size, 56, 56, 256}, // input size (NHWC)
{512, 1, 1, 256}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{2, 2}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_problem_vector.push_back(cutlass::conv::Conv2dProblemSize(
{batch_size, 56, 56, 256}, // input size (NHWC)
{128, 1, 1, 256}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{2, 2}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_problem_vector.push_back(cutlass::conv::Conv2dProblemSize(
{batch_size, 28, 28, 128}, // input size (NHWC)
{128, 3, 3, 128}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_problem_vector.push_back(cutlass::conv::Conv2dProblemSize(
{batch_size, 28, 28, 128}, // input size (NHWC)
{512, 1, 1, 128}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_problem_vector.push_back(cutlass::conv::Conv2dProblemSize(
{batch_size, 28, 28, 512}, // input size (NHWC)
{128, 1, 1, 512}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_problem_vector.push_back(cutlass::conv::Conv2dProblemSize(
{batch_size, 28, 28, 512}, // input size (NHWC)
{1024, 1, 1, 512}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{2, 2}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_problem_vector.push_back(cutlass::conv::Conv2dProblemSize(
{batch_size, 28, 28, 512}, // input size (NHWC)
{256, 1, 1, 512}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{2, 2}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_problem_vector.push_back(cutlass::conv::Conv2dProblemSize(
{batch_size, 14, 14, 256}, // input size (NHWC)
{256, 3, 3, 256}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_problem_vector.push_back(cutlass::conv::Conv2dProblemSize(
{batch_size, 14, 14, 256}, // input size (NHWC)
{1024, 1, 1, 256}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_problem_vector.push_back(cutlass::conv::Conv2dProblemSize(
{batch_size, 14, 14, 1024}, // input size (NHWC)
{256, 1, 1, 1024}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_problem_vector.push_back(cutlass::conv::Conv2dProblemSize(
{batch_size, 14, 14, 1024}, // input size (NHWC)
{2048, 1, 1, 1024}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{2, 2}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_problem_vector.push_back(cutlass::conv::Conv2dProblemSize(
{batch_size, 14, 14, 1024}, // input size (NHWC)
{512, 1, 1, 1024}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{2, 2}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_problem_vector.push_back(cutlass::conv::Conv2dProblemSize(
{batch_size, 7, 7, 512}, // input size (NHWC)
{512, 3, 3, 512}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_problem_vector.push_back(cutlass::conv::Conv2dProblemSize(
{batch_size, 7, 7, 512}, // input size (NHWC)
{2048, 1, 1, 512}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_problem_vector.push_back(cutlass::conv::Conv2dProblemSize(
{batch_size, 7, 7, 2048}, // input size (NHWC)
{512, 1, 1, 2048}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
}
};
////////////////////////////////////////////////////////////////////////////
/// Structure TestbedGroupConv2dProblemSizes initializes and holds group conv default and
/// important network sizes
////////////////////////////////////////////////////////////////////////////
struct TestbedGroupConv2dProblemSizes {
//
// Data members
//
int threadblock_n;
int threadblock_k;
int minimum_channel_size;
Conv2dProblemVector default_single_group_sizes;
Conv2dProblemVector default_multiple_group_sizes;
//
// Methods
//
/// Default ctor
TestbedGroupConv2dProblemSizes(
int threadblock_n_,
int threadblock_k_,
int minimum_channel_size_ = 64)
: threadblock_n (threadblock_n_),
threadblock_k (threadblock_k_),
minimum_channel_size (minimum_channel_size_) {
initialize_group_conv2d_default_sizes();
filter_all();
}
/// Eliminates some illegal cases
void filter_all() {
Conv2dProblemVector *problems_vectors[] = {
&default_single_group_sizes,
&default_multiple_group_sizes
};
for (Conv2dProblemVector *problems : problems_vectors) {
Conv2dProblemVector filtered;
for (cutlass::conv::Conv2dProblemSize const & problem : *problems) {
if (!((problem.C / problem.groups) % minimum_channel_size)) {
filtered.push_back(problem);
}
}
*problems = filtered;
}
}
// Add a few standard convolution problem sizes
void initialize_group_conv2d_default_sizes() {
////////////////////////////////////////////////////////////////////////////////////
// One group calculated by one or multiple CTAs: k_per_group % CTA::N = 0
// One CTA calculates a single group
////////////////////////////////////////////////////////////////////////////////////
for (int cta_per_group_k = 1; cta_per_group_k < 4; ++cta_per_group_k) {
// groups = 2, 3, 4
for (int groups = 2; groups < 5; ++groups) {
int conv_k = cta_per_group_k * threadblock_n * groups;
default_single_group_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 8, 8, threadblock_k * 2 * groups}, // input size (NHWC)
{conv_k, 3, 3, threadblock_k * 2}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1}, // dilation (dilation_h, dilation_w)
cutlass::conv::Mode::kCrossCorrelation,
1, // split_k_slices
groups // groups
));
} // loop groups
} // loop cta_per_group_k
// Partial gemm_k: k_per_group == CTA::N && channels_per_group < CTA::K
default_single_group_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 8, 8, threadblock_k}, // input size (NHWC)
{threadblock_n * 2, 3, 3, threadblock_k / 2}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1}, // dilation (dilation_h, dilation_w)
cutlass::conv::Mode::kCrossCorrelation,
1, // split_k_slices
2 // groups
));
// Larger problem sizes
default_single_group_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 56, 56, 696}, // input size (NHWC)
{768, 3, 3, 232}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{2, 2}, // stride (stride_h, stride_w)
{1, 1}, // dilation (dilation_h, dilation_w)
cutlass::conv::Mode::kCrossCorrelation,
1, // split_k_slices
3 // groups
));
default_single_group_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 14, 14, 1392}, // input size (NHWC)
{1536, 3, 3, 232}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1}, // dilation (dilation_h, dilation_w)
cutlass::conv::Mode::kCrossCorrelation,
1, // split_k_slices
3 // groups
));
////////////////////////////////////////////////////////////////////////////////////
// One CTA calculate multiple groups: CTA::N % k_per_group = 0
////////////////////////////////////////////////////////////////////////////////////
// 2 groups per CTA
default_multiple_group_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 8, 8, threadblock_k * 4}, // input size (NHWC)
{threadblock_n, 3, 3, threadblock_k * 2}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1}, // dilation (dilation_h, dilation_w)
cutlass::conv::Mode::kCrossCorrelation,
1, // split_k_slices
2 // groups
));
// 2 groups per CTA and partial gemm_k
default_multiple_group_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 8, 8, threadblock_k}, // input size (NHWC)
{threadblock_n, 3, 3, threadblock_k / 2}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1}, // dilation (dilation_h, dilation_w)
cutlass::conv::Mode::kCrossCorrelation,
1, // split_k_slices
2 // groups
));
// 4 groups per CTA
default_multiple_group_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 8, 8, threadblock_k * 8}, // input size (NHWC)
{threadblock_n / 2, 3, 3, threadblock_k * 2}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1}, // dilation (dilation_h, dilation_w)
cutlass::conv::Mode::kCrossCorrelation,
1, // split_k_slices
4 // groups
));
// 4 groups per CTA and partial gemm_k
default_multiple_group_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 8, 8, threadblock_k * 2}, // input size (NHWC)
{threadblock_n / 2, 3, 3, threadblock_k / 2}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1}, // dilation (dilation_h, dilation_w)
cutlass::conv::Mode::kCrossCorrelation,
1, // split_k_slices
4 // groups
));
}
};
} // namespace device
} // namespace conv
} // namespace test
| test/unit/conv/device/conv2d_problems.h/0 | {
"file_path": "test/unit/conv/device/conv2d_problems.h",
"repo_id": "test",
"token_count": 21819
} | 50 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Implicit GEMM testbed
*/
#pragma once
#include <fstream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/cutlass.h"
#include "cutlass/conv/device/implicit_gemm_convolution.h"
#include "cutlass/reduction/device/tensor_reduce.h"
#include "cutlass/reduction/device/reduce_split_k.h"
#include "cutlass/reduction/thread/reduction_operators.h"
#include "conv2d_problems.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/device/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/convolution.h"
#include "cutlass/util/reference/device/convolution.h"
#include "cutlass/core_io.h"
#include "cutlass/util/tensor_view_io.h"
#include "../cache_testbed_output.h"
namespace test {
namespace conv {
namespace device {
template <typename Conv2d>
class TestbedConv2dWithReduction {
public:
using ElementA = typename Conv2d::ElementA;
using LayoutA = typename Conv2d::LayoutA;
using ElementB = typename Conv2d::ElementB;
using LayoutB = typename Conv2d::LayoutB;
using ElementC = typename Conv2d::ElementC;
using LayoutC = typename Conv2d::LayoutC;
using ElementAccumulator = typename Conv2d::ElementAccumulator;
using ElementCompute = typename Conv2d::ElementCompute;
using EpilogueOutputOp = typename Conv2d::EpilogueOutputOp;
using ElementT = typename EpilogueOutputOp::ElementTensor;
static cutlass::conv::Operator const kConvolutionalOperator = Conv2d::kConvolutionalOperator;
public:
/// Initialization
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::Distribution::Kind init_C;
uint64_t seed;
cutlass::HostTensor<ElementA, LayoutA> tensor_A;
cutlass::HostTensor<ElementB, LayoutB> tensor_B;
cutlass::HostTensor<ElementC, LayoutC> tensor_C;
cutlass::HostTensor<ElementAccumulator, LayoutC> tensor_Reduction;
cutlass::HostTensor<ElementT, cutlass::layout::RowMajor> tensor_Tensor;
cutlass::HostTensor<ElementAccumulator, LayoutC> tensor_Final_Reduction;
cutlass::HostTensor<ElementC, LayoutC> tensor_D_computed;
cutlass::HostTensor<ElementC, LayoutC> tensor_D_reference;
public:
TestbedConv2dWithReduction(
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint64_t seed_ = 2080
):
init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) {
}
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
void initialize_tensor(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
int scope = 2;
cutlass::reference::host::TensorFillRandomUniform(
view, seed, scope, -scope, 0);
}
else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5);
}
else if (dist_kind == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(view.data(), view.capacity());
}
else {
}
}
void initialize(
cutlass::conv::Conv2dProblemSize const &problem_size, uint64_t seed = 2019) {
tensor_A.resize(implicit_gemm_tensor_a_extent(kConvolutionalOperator, problem_size));
tensor_B.resize(implicit_gemm_tensor_b_extent(kConvolutionalOperator, problem_size));
tensor_C.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size));
tensor_Reduction.resize({
1,
1,
(problem_size.N * problem_size.P * problem_size.Q - 1 + Conv2d::ThreadblockShape::kM) / Conv2d::ThreadblockShape::kM,
(problem_size.K)
});
tensor_Final_Reduction.resize({
1,
1,
1,
(problem_size.K)
});
tensor_Tensor.resize({(problem_size.N * problem_size.P * problem_size.Q), problem_size.K});
tensor_D_computed.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size));
tensor_D_reference.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size));
initialize_tensor(tensor_A.host_view(), init_A, seed);
initialize_tensor(tensor_B.host_view(), init_B, seed * 17);
initialize_tensor(tensor_C.host_view(), init_C, seed * 39);
tensor_A.sync_device();
tensor_B.sync_device();
tensor_C.sync_device();
tensor_D_computed.sync_device();
tensor_D_reference.sync_device();
}
bool sufficient() const {
//
// Determine SMEM requirements and waive if not satisfied
//
size_t smem_size = sizeof(typename Conv2d::UnderlyingKernel::SharedStorage);
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.sharedMemPerBlockOptin < smem_size) {
return false;
}
return true;
}
/// Executes one test
bool run(
cutlass::conv::Conv2dProblemSize const &problem_size,
cutlass::conv::SplitKMode const &split_k_mode = cutlass::conv::SplitKMode::kSerial,
ElementCompute alpha = ElementCompute(1),
ElementCompute beta = ElementCompute(0)) {
// Waive test if insufficient CUDA device
if (!sufficient()) {
if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) {
std::cerr << "Test waived due to insufficient CUDA device." << std::endl;
}
return true;
}
#if 0 //display conv2d problem size for debugging
std::cout << problem_size << std::endl
<< "alpha, beta: (" << alpha << ", " << beta << ")" << std::endl
<< "split_k_mode: " << ((split_k_mode == cutlass::conv::SplitKMode::kSerial) ? "(serial)" : "(parallel)") << std::endl
<< std::endl;
#endif
initialize(problem_size);
// configure the operator
Conv2d conv2d_op;
typename Conv2d::Arguments conv2d_args(
problem_size,
tensor_A.device_ref(),
tensor_B.device_ref(),
tensor_C.device_ref(),
tensor_D_computed.device_ref(),
{alpha, beta},
split_k_mode,
tensor_Reduction.device_data(),
tensor_Tensor.device_data(),
static_cast<int>(tensor_Reduction.stride()[0]),
static_cast<int>(tensor_Tensor.stride()[0])
);
// find workspace requirement for parallel split-k reduction
size_t workspace_size = Conv2d::get_workspace_size(conv2d_args);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
cutlass::Status status = conv2d_op.initialize(conv2d_args, workspace.get());
if (status != cutlass::Status::kSuccess) {
cudaError_t error = cudaGetLastError();
std::cerr << "This test is not supported: " << cudaGetErrorString(error) << "\n";
return true;
}
// conv2d operation with parallel split-k-mode
if (split_k_mode == cutlass::conv::SplitKMode::kParallel) {
// conv2d output is written to workspace in global memory
conv2d_args.ref_D.reset(reinterpret_cast<ElementC*>(workspace.get()));
// accumulate mma for each cta in k-dimension (1.0 * A * B)
conv2d_args.output_op = {ElementCompute(1), ElementCompute(0)};
// update conv2d operator arguments
status = conv2d_op.update(conv2d_args, workspace.get());
}
EXPECT_TRUE(status == cutlass::Status::kSuccess);
if (status != cutlass::Status::kSuccess) {
return false;
}
// run conv2d operator
status = conv2d_op();
EXPECT_TRUE(status == cutlass::Status::kSuccess);
if (status != cutlass::Status::kSuccess) {
return false;
}
bool passed = false;
cudaError_t result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess) << " device reference error: "
<< cudaGetErrorString(result);
// Final reduction over the partial reduction tensor
using Functor = cutlass::plus<ElementAccumulator>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementAccumulator,
ElementAccumulator,
LayoutC,
Functor,
8,
ElementAccumulator
>;
TensorReduction reduction(tensor_Reduction.extent(), 2);
cutlass::DeviceAllocation<uint8_t> reduction_device_workspace(reduction.workspace_size());
status = reduction.reduce(
tensor_Final_Reduction.device_ref(),
tensor_Reduction.device_ref(),
reduction_device_workspace.get(),
ElementAccumulator());
EXPECT_EQ(status, cutlass::Status::kSuccess);
EXPECT_EQ(cudaDeviceSynchronize(), cudaSuccess);
//
// Reference check
//
tensor_D_computed.sync_host();
#if CUTLASS_CONV_TEST_UNIT_REFERENCE_DEVICE_ENABLED
cutlass::reference::device::Conv2d<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementCompute,
ElementAccumulator
>(
kConvolutionalOperator,
problem_size,
tensor_A.device_ref(),
tensor_B.device_ref(),
tensor_C.device_ref(),
tensor_D_reference.device_ref(),
alpha,
beta);
// sync host (copy device data to host) for dumping error output in case of mismatches
tensor_D_reference.sync_host();
#else
cutlass::reference::host::Conv2d<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementCompute,
ElementAccumulator
>(
kConvolutionalOperator,
problem_size,
tensor_A.host_ref(),
tensor_B.host_ref(),
tensor_C.host_ref(),
tensor_D_reference.host_ref(),
alpha,
beta);
#endif
passed = cutlass::reference::host::TensorEquals(
tensor_D_computed.host_view(),
tensor_D_reference.host_view());
EXPECT_TRUE(passed);
//
// Reference check on reduction results
//
tensor_Reduction.sync_host();
tensor_Final_Reduction.sync_host();
// compute backwards for reduction results
cutlass::HostTensor<ElementAccumulator, LayoutC> reference_Reduction;
reference_Reduction.resize({
1,
1,
1,
(problem_size.K)
});
for (int k = 0; k < problem_size.K; ++k) {
ElementAccumulator reduced_value = ElementAccumulator();
for (int n = 0; n < problem_size.N; ++n) {
for (int p = 0; p < problem_size.P; ++p) {
for (int q = 0; q < problem_size.Q; ++q) {
reduced_value += tensor_D_reference.at({n, p, q, k});
}
}
}
reference_Reduction.at({0, 0, 0, k}) = reduced_value;
}
passed = cutlass::reference::host::TensorEquals(
tensor_Final_Reduction.host_view(),
reference_Reduction.host_view()
);
EXPECT_TRUE(passed);
if (!passed) {
std::stringstream fname;
fname << "error_Conv2d_ImplicitGemm_device_"
<< (split_k_mode == cutlass::conv::SplitKMode::kSerial ? "serial_reduction_" : "parallel_reduction_")
<< (Conv2d::kConvolutionalOperator == cutlass::conv::Operator::kFprop ? "fprop_" :
(Conv2d::kConvolutionalOperator == cutlass::conv::Operator::kDgrad ? "dgrad_" : "wgrad_"))
<< "nhwc_"
<< problem_size.N << "x"
<< problem_size.H << "x"
<< problem_size.W << "x"
<< problem_size.C
<< "_krsc_"
<< problem_size.K << "x"
<< problem_size.R << "x"
<< problem_size.S << "x"
<< problem_size.C
<< "_padding_"
<< problem_size.pad_h << "x"
<< problem_size.pad_w
<< "_stride_"
<< problem_size.stride_h << "x"
<< problem_size.stride_w
<< "_dilation_"
<< problem_size.dilation_h << "x"
<< problem_size.dilation_w << "_"
<< (problem_size.mode == cutlass::conv::Mode::kCrossCorrelation ? "xcorr_" : "conv_")
<< Conv2d::ThreadblockShape::kM << "x"
<< Conv2d::ThreadblockShape::kN << "x"
<< Conv2d::ThreadblockShape::kK << "_"
<< Conv2d::WarpShape::kM << "x"
<< Conv2d::WarpShape::kN << "x"
<< Conv2d::WarpShape::kK << ".txt";
std::cout << fname.str() << std::endl;
std::ofstream results(fname.str());
results << problem_size << std::endl;
results
<< "\nA:\n" << tensor_A.host_view() << "\n"
<< "\nB:\n" << tensor_B.host_view() << "\n"
<< "\nC:\n" << tensor_C.host_view() << "\n"
<< "\nD reference:\n" << tensor_D_reference.host_view() << "\n"
<< "\nD computed:\n" << tensor_D_computed.host_view() << "\n"
<< "\nreduction reference:\n" << reference_Reduction.host_view() << "\n"
<< "\nreduction computed:\n" << tensor_Reduction.host_view() << "\n";
}
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////////////
// TestAllConv: Runs cutlass::conv::device::ImplicitGemmConvolution operator and compares it with reference
// TestAllConv runs conv operator on default conv problem sizes from test::conv::device::TestbedConv2dProblemSizes
// Additionally, each conv2d test can provide conv problem sizes (conv_test_sizes) and blacklist of sizes
// (conv_blacklist_sizes)
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename ImplicitGemm>
bool TestAllConv2dWithReduction(
const Conv2dProblemVector & conv_test_sizes = Conv2dProblemVector(),
const Conv2dProblemVector & conv_blacklist_sizes = Conv2dProblemVector()) {
bool passed = true;
//
// Testbed object
//
TestbedConv2dWithReduction<ImplicitGemm> testbed;
//
// Get conv problem sizes to run conv operator
//
TestbedConv2dProblemSizes conv_problems(128/cutlass::sizeof_bits<typename ImplicitGemm::ElementA>::value);
// Vector of conv2d problem sizes to avoid duplicate runs
Conv2dProblemVector conv_tested_sizes;
Conv2dProblemVector const *problem_vectors[] = {
&conv_test_sizes, // run user specified sizes
&conv_problems.conv2d_default_sizes, // run default and cudnn bug sizes
&conv_problems.conv2d_resnet50_sizes, // run resnet50 sizes
#if CUTLASS_CONV_UNIT_TEST_RIGOROUS_SIZE_ENABLED
&conv_problems.conv2d_rigorous_sizes, // run large and rigorous sizes if enabled
#endif
};
// Sweep conv2d problem sizes (split-k-mode=kSerial, split-k-slice=1, alpha=1.0, beta=0.0)
for (Conv2dProblemVector const * problem_vector : problem_vectors) {
// Run conv testbed on default convolution sizes
for(auto conv_problem : *problem_vector) {
// Skip blacklist and avoid duplicate problem sizes
if (std::find(conv_blacklist_sizes.begin(), conv_blacklist_sizes.end(), conv_problem) != conv_blacklist_sizes.end() ||
std::find(conv_tested_sizes.begin(), conv_tested_sizes.end(), conv_problem) != conv_tested_sizes.end()) {
continue;
}
//
// Procedurally disable certain cases
//
// CUTLASS DGRAD's *unity* stride specialization only support stride {1, 1}
if ((ImplicitGemm::kConvolutionalOperator ==
cutlass::conv::Operator::kDgrad) &&
(ImplicitGemm::UnderlyingKernel::Mma::IteratorA::kStrideSupport ==
cutlass::conv::StrideSupport::kUnity)) {
if (!((conv_problem.stride_h == 1) && (conv_problem.stride_w == 1))) {
continue;
}
}
#if 0 // relax restrictions on analytic strided dgrad
// CUTLASS DGRAD's *strided* specialization only support stride >= {2, 2}
if ((ImplicitGemm::kConvolutionalOperator ==
cutlass::conv::Operator::kDgrad) &&
(ImplicitGemm::UnderlyingKernel::Mma::IteratorA::kStrideSupport ==
cutlass::conv::StrideSupport::kStrided)) {
if (((conv_problem.stride_h == 1) && (conv_problem.stride_w == 1))) {
continue;
}
}
#endif
//
// Test
//
// push back tested problem size to avoid re-running duplicates
conv_tested_sizes.push_back(conv_problem);
// test mode = xcross
passed = testbed.run(
conv_problem,
cutlass::conv::SplitKMode::kSerial);
if (!passed) {
return false;
}
// test mode = convolution
passed = testbed.run(
conv_problem.reset_mode(cutlass::conv::Mode::kConvolution),
cutlass::conv::SplitKMode::kSerial);
if (!passed) {
return false;
}
}
}
// CUTLASS DGRAD's *strided* specialization does not support split-k mode
if ((ImplicitGemm::kConvolutionalOperator ==
cutlass::conv::Operator::kDgrad) &&
(ImplicitGemm::UnderlyingKernel::Mma::IteratorA::kStrideSupport ==
cutlass::conv::StrideSupport::kStrided)) {
passed = testbed.run(
cutlass::conv::Conv2dProblemSize(
{1, 56, 56, 8}, // input size (NHWC)
{8, 1, 1, 8}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{2, 2}, // stride (stride_h, stride_w)
{1, 1}), // dilation (dilation_h, dilation_w)
cutlass::conv::SplitKMode::kSerial,
cutlass::from_real<typename ImplicitGemm::ElementCompute>(2.0),
cutlass::from_real<typename ImplicitGemm::ElementCompute>(2.0));
if (!passed) {
return false;
}
return passed;
}
// Sweep split-k-slice using serial and prallel reduction with non-unity alpha and non-zero beta for
// a single conv2d problem size. Convolution unit tests take a long time to run so only sweep parameters
// which are abolutely necessary to catch functional bugs. The below code does provide option to sweep
// alpha and beta for local testing, but only runs one value for alpha and beta.
cutlass::conv::Conv2dProblemSize conv2d_split_k_test_size (
{1, 17, 11, 288}, // input size (NHWC)
{160, 3, 3, 288}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
);
// Parallel SplitK is not tested.
cutlass::conv::SplitKMode split_k_modes [] = {
cutlass::conv::SplitKMode::kSerial,
};
int split_k_slices[] = {
1, 2, 3, 4, 201
};
double problem_alpha[] = {
2.0
};
double problem_beta[] = {
2.0
};
for (auto split_k_mode : split_k_modes) {
for (auto split_k_slice : split_k_slices) {
for (auto alpha : problem_alpha) {
for (auto beta : problem_beta) {
passed = testbed.run(
conv2d_split_k_test_size.reset_split_k_slices(split_k_slice),
split_k_mode,
cutlass::from_real<typename ImplicitGemm::ElementCompute>(alpha),
cutlass::from_real<typename ImplicitGemm::ElementCompute>(beta));
if (!passed) {
return false;
}
}
}
}
}
return passed;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace conv
} // namespace test
| test/unit/conv/device/conv2d_with_reduction_testbed.h/0 | {
"file_path": "test/unit/conv/device/conv2d_with_reduction_testbed.h",
"repo_id": "test",
"token_count": 8620
} | 51 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief unit tests for matrix_coord
*/
#include "../common/cutlass_unit_test.h"
#include "cutlass/matrix_coord.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace core {
void test_matrix_coord(cutlass::MatrixCoord::Index row, cutlass::MatrixCoord::Index column) {
cutlass::MatrixCoord matrix_coord(row, column);
EXPECT_EQ(matrix_coord.row(), row);
EXPECT_EQ(matrix_coord.column(), column);
}
void test_matrix_coord_operator_addition() {
cutlass::MatrixCoord::Index row_a = 13;
cutlass::MatrixCoord::Index column_a = 42;
cutlass::MatrixCoord::Index row_b = 20;
cutlass::MatrixCoord::Index column_b = 15;
cutlass::MatrixCoord matrix_coord_a(row_a, column_a);
cutlass::MatrixCoord matrix_coord_b(row_b, column_b);
auto matrix_coord_c = matrix_coord_a + matrix_coord_b;
EXPECT_EQ(matrix_coord_c.row(), row_a + row_b);
EXPECT_EQ(matrix_coord_c.column(), column_a + column_b);
}
void test_matrix_coord_operator_subtraction() {
cutlass::MatrixCoord::Index row_a = 13;
cutlass::MatrixCoord::Index column_a = 42;
cutlass::MatrixCoord::Index row_b = 20;
cutlass::MatrixCoord::Index column_b = 15;
cutlass::MatrixCoord matrix_coord_a(row_a, column_a);
cutlass::MatrixCoord matrix_coord_b(row_b, column_b);
auto matrix_coord_c = matrix_coord_a - matrix_coord_b;
EXPECT_EQ(matrix_coord_c.row(), row_a - row_b);
EXPECT_EQ(matrix_coord_c.column(), column_a - column_b);
}
void test_matrix_coord_operator_multiply() {
cutlass::MatrixCoord::Index row_a = 13;
cutlass::MatrixCoord::Index column_a = 42;
cutlass::MatrixCoord::Index row_b = 20;
cutlass::MatrixCoord::Index column_b = 15;
cutlass::MatrixCoord matrix_coord_a(row_a, column_a);
cutlass::MatrixCoord matrix_coord_b(row_b, column_b);
auto matrix_coord_c = matrix_coord_a * matrix_coord_b;
EXPECT_EQ(matrix_coord_c.row(), row_a * row_b);
EXPECT_EQ(matrix_coord_c.column(), column_a * column_b);
}
void test_matrix_coord_operator_division() {
cutlass::MatrixCoord::Index row_a = 13;
cutlass::MatrixCoord::Index column_a = 42;
cutlass::MatrixCoord::Index row_b = 20;
cutlass::MatrixCoord::Index column_b = 15;
cutlass::MatrixCoord matrix_coord_a(row_a, column_a);
cutlass::MatrixCoord matrix_coord_b(row_b, column_b);
auto matrix_coord_c = matrix_coord_a / matrix_coord_b;
EXPECT_EQ(matrix_coord_c.row(), row_a / row_b);
EXPECT_EQ(matrix_coord_c.column(), column_a / column_b);
}
void test_matrix_coord_operator_addition_assignment() {
cutlass::MatrixCoord::Index row_a = 13;
cutlass::MatrixCoord::Index column_a = 42;
cutlass::MatrixCoord::Index row_b = 20;
cutlass::MatrixCoord::Index column_b = 15;
cutlass::MatrixCoord matrix_coord_a(row_a, column_a);
cutlass::MatrixCoord matrix_coord_b(row_b, column_b);
matrix_coord_a += matrix_coord_b;
EXPECT_EQ(matrix_coord_a.row(), row_a + row_b);
EXPECT_EQ(matrix_coord_a.column(), column_a + column_b);
}
void test_matrix_coord_operator_subtraction_assignment() {
cutlass::MatrixCoord::Index row_a = 13;
cutlass::MatrixCoord::Index column_a = 42;
cutlass::MatrixCoord::Index row_b = 20;
cutlass::MatrixCoord::Index column_b = 15;
cutlass::MatrixCoord matrix_coord_a(row_a, column_a);
cutlass::MatrixCoord matrix_coord_b(row_b, column_b);
matrix_coord_a -= matrix_coord_b;
EXPECT_EQ(matrix_coord_a.row(), row_a - row_b);
EXPECT_EQ(matrix_coord_a.column(), column_a - column_b);
}
void test_matrix_coord_operator_multiply_assignment() {
cutlass::MatrixCoord::Index row_a = 13;
cutlass::MatrixCoord::Index column_a = 42;
cutlass::MatrixCoord::Index row_b = 20;
cutlass::MatrixCoord::Index column_b = 15;
cutlass::MatrixCoord matrix_coord_a(row_a, column_a);
cutlass::MatrixCoord matrix_coord_b(row_b, column_b);
matrix_coord_a *= matrix_coord_b;
EXPECT_EQ(matrix_coord_a.row(), row_a * row_b);
EXPECT_EQ(matrix_coord_a.column(), column_a * column_b);
}
void test_matrix_coord_operator_division_assignment() {
cutlass::MatrixCoord::Index row_a = 13;
cutlass::MatrixCoord::Index column_a = 42;
cutlass::MatrixCoord::Index row_b = 20;
cutlass::MatrixCoord::Index column_b = 15;
cutlass::MatrixCoord matrix_coord_a(row_a, column_a);
cutlass::MatrixCoord matrix_coord_b(row_b, column_b);
matrix_coord_a /= matrix_coord_b;
EXPECT_EQ(matrix_coord_a.row(), row_a / row_b);
EXPECT_EQ(matrix_coord_a.column(), column_a / column_b);
}
}
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Matrix_Coord, basic_row12_column24) {
cutlass::MatrixCoord::Index row = 12;
cutlass::MatrixCoord::Index column = 24;
test::core::test_matrix_coord(row, column);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Matrix_Coord, basic_operator_addition) {
test::core::test_matrix_coord_operator_addition();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Matrix_Coord, basic_operator_subtraction) {
test::core::test_matrix_coord_operator_subtraction();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Matrix_Coord, basic_operator_multiply) {
test::core::test_matrix_coord_operator_multiply();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Matrix_Coord, basic_operator_division) {
test::core::test_matrix_coord_operator_division();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Matrix_Coord, basic_operator_addition_assignment) {
test::core::test_matrix_coord_operator_addition_assignment();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Matrix_Coord, basic_operator_subtraction_assignment) {
test::core::test_matrix_coord_operator_subtraction_assignment();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Matrix_Coord, basic_operator_multiply_assignment) {
test::core::test_matrix_coord_operator_multiply_assignment();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Matrix_Coord, basic_operator_division_assignment) {
test::core::test_matrix_coord_operator_division_assignment();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/core/matrix_coord.cu/0 | {
"file_path": "test/unit/core/matrix_coord.cu",
"repo_id": "test",
"token_count": 2790
} | 52 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass_unit_test.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include <iostream>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <cute/tensor.hpp>
using namespace cute;
template<class ALayout,
class BLayout,
class CLayout,
class SMemALayout,
class SMemBLayout,
class SMemCLayout,
class SmemCopyOpA,
class SmemCopyOpB,
class SmemCopyOpC,
uint32_t ThreadBlockSize,
class TiledMma,
uint32_t CopyMaxVecBits,
class TA,
class TB,
class TC,
class Alpha,
class Beta,
class ALoadTransform,
class BLoadTransform,
class CLoadTransform,
class CStoreTransform>
__launch_bounds__(ThreadBlockSize) __global__ void
cooperative_gemm_kernel(TA const* a,
TB const* b,
TC* c,
TC* c_out,
Alpha const alpha,
Beta const beta,
ALoadTransform a_load_transform,
BLoadTransform b_load_transform,
CLoadTransform c_load_transform,
CStoreTransform c_store_transform)
{
using namespace cute;
Tensor g_a_tensor = make_tensor(make_gmem_ptr(a), ALayout{});
Tensor g_b_tensor = make_tensor(make_gmem_ptr(b), BLayout{});
Tensor g_c_tensor = make_tensor(make_gmem_ptr(c), CLayout{});
Tensor g_c_out_tensor = make_tensor(make_gmem_ptr(c_out), CLayout{});
constexpr uint32_t copy_max_vec_bytes = CopyMaxVecBits / 8;
extern __shared__ float4 smem_buf[];
auto* smem_ptr = reinterpret_cast<unsigned char*>(smem_buf);
auto* smem_ptr_a = smem_ptr;
auto* smem_ptr_b = smem_ptr_a + round_up((sizeof(TA) * cosize(SMemALayout {})), copy_max_vec_bytes);
auto* smem_ptr_c = smem_ptr_b + round_up((sizeof(TB) * cosize(SMemBLayout {})), copy_max_vec_bytes);
Tensor s_a_tensor = make_tensor(make_smem_ptr<TA>(smem_ptr_a), SMemALayout{});
Tensor s_b_tensor = make_tensor(make_smem_ptr<TB>(smem_ptr_b), SMemBLayout{});
Tensor s_c_tensor = make_tensor(make_smem_ptr<TC>(smem_ptr_c), SMemCLayout{});
cooperative_copy<ThreadBlockSize, CopyMaxVecBits>(threadIdx.x, g_a_tensor, s_a_tensor);
cooperative_copy<ThreadBlockSize, CopyMaxVecBits>(threadIdx.x, g_b_tensor, s_b_tensor);
cooperative_copy<ThreadBlockSize, CopyMaxVecBits>(threadIdx.x, g_c_tensor, s_c_tensor);
cp_async_fence();
cp_async_wait<0>();
__syncthreads();
TiledMma tiled_mma;
cooperative_gemm<SmemCopyOpA, SmemCopyOpB, SmemCopyOpC>(
threadIdx.x, tiled_mma,
alpha, s_a_tensor, s_b_tensor, beta, s_c_tensor,
a_load_transform, b_load_transform, c_load_transform, c_store_transform
);
__syncthreads();
cooperative_copy<ThreadBlockSize, CopyMaxVecBits>(threadIdx.x, s_c_tensor, g_c_out_tensor);
}
template<class ALayout, // logical shape (M, K)
class BLayout, // logical shape (N, K)
class CLayout, // logical shape (M, N)
class SMemALayout, // logical shape (M, K)
class SMemBLayout, // logical shape (N, K)
class SMemCLayout, // logical shape (M, N)
class SmemCopyOpA,
class SmemCopyOpB,
class SmemCopyOpC,
uint32_t ThreadBlockSize,
class TiledMma,
uint32_t CopyMaxVecBits,
class TA,
class TB,
class TC,
class ALoadTransform = cute::identity,
class BLoadTransform = cute::identity,
class CLoadTransform = cute::identity,
class CStoreTransform = cute::identity>
void test_cooperative_gemm(ALoadTransform const& a_load_transform = {},
BLoadTransform const& b_load_transform = {},
CLoadTransform const& c_load_transform = {},
CStoreTransform const& c_store_transform = {})
{
using gmem_a_layout_t = ALayout;
using gmem_b_layout_t = BLayout;
using gmem_c_layout_t = CLayout;
using smem_a_layout_t = SMemALayout;
using smem_b_layout_t = SMemBLayout;
using smem_c_layout_t = SMemCLayout;
static_assert(size<0>(gmem_a_layout_t{}) == size<0>(gmem_c_layout_t{})); // AM == CM
static_assert(size<0>(gmem_b_layout_t{}) == size<1>(gmem_c_layout_t{})); // BN == CN
static_assert(size<1>(gmem_a_layout_t{}) == size<1>(gmem_b_layout_t{})); // AK == BK
static_assert(size<0>(smem_a_layout_t{}) == size<0>(smem_c_layout_t{})); // AM == CM
static_assert(size<0>(smem_b_layout_t{}) == size<1>(smem_c_layout_t{})); // BN == CN
static_assert(size<1>(smem_a_layout_t{}) == size<1>(smem_b_layout_t{})); // AK == BK
static_assert(cute::size(gmem_a_layout_t {}) == cute::size(smem_a_layout_t {}));
static_assert(cute::size(gmem_b_layout_t {}) == cute::size(smem_b_layout_t {}));
static_assert(cute::size(gmem_c_layout_t {}) == cute::size(smem_c_layout_t {}));
#if 0
print(" "); print("gmem: "); print(gmem_layout_t{}); print("\n");
print(" "); print("smem: "); print(smem_layout_t{}); print("\n");
print(" "); print("threads: "); print(ThreadBlockSize); print("\n");
#endif
const auto alpha = static_cast<TC>(1.1);
const auto beta = static_cast<TC>(1.2);
thrust::host_vector<TA> h_a(cosize(gmem_a_layout_t{}));
thrust::host_vector<TB> h_b(cosize(gmem_b_layout_t{}));
thrust::host_vector<TC> h_c(cosize(gmem_c_layout_t{}));
thrust::host_vector<TC> h_c_out(cosize(gmem_c_layout_t{}));
auto h_a_tensor = make_tensor(h_a.data(), gmem_a_layout_t{});
auto h_b_tensor = make_tensor(h_b.data(), gmem_b_layout_t{});
auto h_c_tensor = make_tensor(h_c.data(), gmem_c_layout_t{});
size_t max_size = std::max<size_t>({static_cast<size_t>(size(gmem_a_layout_t {})),
static_cast<size_t>(size(gmem_b_layout_t {})),
static_cast<size_t>(size(gmem_c_layout_t {}))});
for (size_t i = 0; i < max_size; ++i) {
double di = static_cast<double>(i);
if(i < size(gmem_a_layout_t{})) {
h_a_tensor(i) = static_cast<TA>(di / size(gmem_a_layout_t{}));
}
if(i < size(gmem_b_layout_t{})) {
h_b_tensor(i) = static_cast<TA>(di / size(gmem_a_layout_t{}));
}
if(i < size(gmem_c_layout_t{})) {
h_c_tensor(i) = static_cast<TC>((di*di) / size(gmem_a_layout_t{}));
}
}
thrust::device_vector<TA> d_a(h_a);
thrust::device_vector<TB> d_b(h_b);
thrust::device_vector<TC> d_c(h_c);
thrust::device_vector<TC> d_c_out(h_c_out.size(), TC(float(-1)));
const size_t shared_memory_size =
(sizeof(TA) * h_a.size()) + (sizeof(TB) * h_b.size()) + (sizeof(TC) * h_c.size());
auto kernel = cooperative_gemm_kernel<
gmem_a_layout_t, gmem_b_layout_t, gmem_c_layout_t,
smem_a_layout_t, smem_b_layout_t, smem_c_layout_t,
SmemCopyOpA, SmemCopyOpB, SmemCopyOpC,
ThreadBlockSize, TiledMma, CopyMaxVecBits,
TA, TB, TC, decltype(alpha), decltype(beta),
ALoadTransform, BLoadTransform, CLoadTransform, CStoreTransform
>;
ASSERT_EQ(cudaFuncSetAttribute(kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, static_cast<int>(shared_memory_size)), 0);
kernel<<<1, ThreadBlockSize, shared_memory_size>>>(
thrust::raw_pointer_cast(d_a.data()),
thrust::raw_pointer_cast(d_b.data()),
thrust::raw_pointer_cast(d_c.data()),
thrust::raw_pointer_cast(d_c_out.data()),
alpha,
beta,
a_load_transform,
b_load_transform,
c_load_transform,
c_store_transform
);
cudaError_t result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
cudaError_t error = cudaGetLastError();
FAIL() << "Error at kernel sync: " << cudaGetErrorString(error) << "\n";
}
thrust::host_vector<TC> h_c_ref(h_c.size(), static_cast<TC>(0.0));
auto h_c_ref_tensor = make_tensor(h_c_ref.data(), gmem_c_layout_t{});
// A * B
for (int k = 0; k < size<1>(h_a_tensor); k++) {
for (int m = 0; m < size<0>(h_a_tensor); m++) {
for (int n = 0; n < size<0>(h_b_tensor); n++) {
const auto a_value = a_load_transform(h_a_tensor(m, k));
const auto b_value = b_load_transform(h_b_tensor(n, k));
const auto a_value_fp64 = static_cast<double>(a_value);
const auto b_value_fp64 = static_cast<double>(b_value);
h_c_ref_tensor(m, n) += static_cast<TC>(a_value_fp64 * b_value_fp64);
}
}
}
// C = A*B + C
for (int i = 0; i < size(h_c_ref_tensor); i++) {
const auto ab_value_fp64 = static_cast<double>(h_c_ref_tensor(i));
const auto c_value_fp64 = static_cast<double>(c_load_transform(h_c_tensor(i)));
h_c_ref_tensor(i) = c_store_transform(static_cast<TC>(alpha * ab_value_fp64 + beta * c_value_fp64));
}
h_c_out = d_c_out;
auto h_c_out_tensor = make_tensor(h_c_out.data(), gmem_c_layout_t{});
for (int i = 0; i < size(h_c_ref_tensor); i++) {
double h_c_ref_i = h_c_ref_tensor(i);
double h_c_out_i = h_c_out_tensor(i);
double epsilon(0.1f);
double nonzero_floor(std::numeric_limits<double>::min());
bool passed = cutlass::relatively_equal(h_c_out_i, h_c_ref_i, epsilon, nonzero_floor);
ASSERT_TRUE(passed) << i << " - result:" << h_c_out_i << " expected:" << h_c_ref_i;
}
}
template<uint32_t M,
uint32_t N,
uint32_t K,
uint32_t ThreadBlockSize,
class TiledMMAType,
uint32_t CopyMaxVecBits,
class TA,
class TB,
class TC,
class ALoadTransform = cute::identity,
class BLoadTransform = cute::identity,
class CLoadTransform = cute::identity,
class CStoreTransform = cute::identity>
void test_cooperative_gemm_col_major_layout(ALoadTransform const& a_load_transform = {},
BLoadTransform const& b_load_transform = {},
CLoadTransform const& c_load_transform = {},
CStoreTransform const& c_store_transform = {})
{
using gmem_a_layout_t = decltype(make_layout(make_shape(Int<M> {}, Int<K> {})));
using gmem_b_layout_t = decltype(make_layout(make_shape(Int<N> {}, Int<K> {}), GenRowMajor{}));
using gmem_c_layout_t = decltype(make_layout(make_shape(Int<M> {}, Int<N> {})));
using smem_a_layout_t = decltype(make_layout(make_shape(Int<M> {}, Int<K> {})));
using smem_b_layout_t = decltype(make_layout(make_shape(Int<N> {}, Int<K> {}), GenRowMajor{}));
using smem_c_layout_t = decltype(make_layout(make_shape(Int<M> {}, Int<N> {})));
test_cooperative_gemm<gmem_a_layout_t,
gmem_b_layout_t,
gmem_c_layout_t,
smem_a_layout_t,
smem_b_layout_t,
smem_c_layout_t,
AutoVectorizingCopyWithAssumedAlignment<sizeof_bits_v<TA>>,
AutoVectorizingCopyWithAssumedAlignment<sizeof_bits_v<TB>>,
AutoVectorizingCopyWithAssumedAlignment<sizeof_bits_v<TC>>,
ThreadBlockSize,
TiledMMAType,
CopyMaxVecBits,
TA,
TB,
TC>(a_load_transform, b_load_transform, c_load_transform, c_store_transform);
}
template<uint32_t M,
uint32_t N,
uint32_t K,
uint32_t ThreadBlockSize,
class TiledMMAType,
class T,
class ALoadTransform = cute::identity,
class BLoadTransform = cute::identity,
class CLoadTransform = cute::identity,
class CStoreTransform = cute::identity>
void test_cooperative_gemm_col_major_layout(ALoadTransform const& a_load_transform = {},
BLoadTransform const& b_load_transform = {},
CLoadTransform const& c_load_transform = {},
CStoreTransform const& c_store_transform = {})
{
test_cooperative_gemm_col_major_layout<M, N, K, ThreadBlockSize, TiledMMAType, cute::sizeof_bits_v<T>, T, T, T>(
a_load_transform, b_load_transform, c_load_transform, c_store_transform);
}
template<class SMemAAtomLayout,
class SMemBAtomLayout,
class SMemCAtomLayout,
uint32_t M,
uint32_t N,
uint32_t K,
uint32_t ThreadBlockSize,
class TiledMMAType,
uint32_t CopyMaxVecBits,
class TA,
class TB,
class TC,
class ALoadTransform = cute::identity,
class BLoadTransform = cute::identity,
class CLoadTransform = cute::identity,
class CStoreTransform = cute::identity>
void test_cooperative_gemm_col_major_layout(ALoadTransform const& a_load_transform = {},
BLoadTransform const& b_load_transform = {},
CLoadTransform const& c_load_transform = {},
CStoreTransform const& c_store_transform = {})
{
using gmem_a_layout_t = decltype(make_layout(make_shape(Int<M> {}, Int<K> {})));
using gmem_b_layout_t = decltype(make_layout(make_shape(Int<N> {}, Int<K> {}), GenRowMajor{}));
using gmem_c_layout_t = decltype(make_layout(make_shape(Int<M> {}, Int<N> {})));
using smem_a_atom_layout_t = SMemAAtomLayout;
using smem_a_layout_t = decltype(tile_to_shape(
smem_a_atom_layout_t{},
make_shape(shape<0>(gmem_a_layout_t{}), shape<1>(gmem_a_layout_t{})))
);
using smem_b_atom_layout_t = SMemBAtomLayout;
using smem_b_layout_t = decltype(tile_to_shape(
smem_b_atom_layout_t{},
make_shape(shape<0>(gmem_b_layout_t{}), shape<1>(gmem_b_layout_t{})))
);
using smem_c_atom_layout_t = SMemCAtomLayout;
using smem_c_layout_t = decltype(tile_to_shape(
smem_c_atom_layout_t{},
make_shape(shape<0>(gmem_c_layout_t{}), shape<1>(gmem_c_layout_t{})))
);
test_cooperative_gemm<gmem_a_layout_t,
gmem_b_layout_t,
gmem_c_layout_t,
smem_a_layout_t,
smem_b_layout_t,
smem_c_layout_t,
AutoVectorizingCopyWithAssumedAlignment<sizeof_bits_v<TA>>,
AutoVectorizingCopyWithAssumedAlignment<sizeof_bits_v<TB>>,
AutoVectorizingCopyWithAssumedAlignment<sizeof_bits_v<TC>>,
ThreadBlockSize,
TiledMMAType,
CopyMaxVecBits,
TA,
TB,
TC>(a_load_transform, b_load_transform, c_load_transform, c_store_transform);
}
template<class SMemAAtomLayout,
class SMemBAtomLayout,
class SMemCAtomLayout,
uint32_t M,
uint32_t N,
uint32_t K,
uint32_t ThreadBlockSize,
class TiledMMAType,
class T,
class ALoadTransform = cute::identity,
class BLoadTransform = cute::identity,
class CLoadTransform = cute::identity,
class CStoreTransform = cute::identity>
void test_cooperative_gemm_col_major_layout(ALoadTransform const& a_load_transform = {},
BLoadTransform const& b_load_transform = {},
CLoadTransform const& c_load_transform = {},
CStoreTransform const& c_store_transform = {})
{
test_cooperative_gemm_col_major_layout<SMemAAtomLayout,
SMemBAtomLayout,
SMemCAtomLayout,
M,
N,
K,
ThreadBlockSize,
TiledMMAType,
cute::sizeof_bits_v<T>,
T,
T,
T>(a_load_transform, b_load_transform, c_load_transform, c_store_transform);
}
| test/unit/cute/cooperative_gemm_common.hpp/0 | {
"file_path": "test/unit/cute/cooperative_gemm_common.hpp",
"repo_id": "test",
"token_count": 8956
} | 53 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cutlass_unit_test.h"
#include <cutlass/trace.h>
#include <cute/numeric/integral_constant.hpp>
#include <cute/numeric/math.hpp>
#include <cute/util/type_traits.hpp>
// If cute::gcd returns auto instead of common_type_t<T, U>,
// then GCC 7.5 reports the following error;
//
// ... /include/cute/numeric/math.hpp:103:26: error:
// inconsistent deduction for auto return type: ‘int’ and then ‘bool’
// if (u == 0) { return t; }
// ^
// Note that common_type_t<C<42>, C<1>>::value_type might still be bool.
TEST(CuTe_core, gcd_returns_common_type)
{
using cute::C;
constexpr auto fifteen = C<3 * 5>{};
static_assert(cute::is_same_v<decltype(fifteen)::value_type, int>);
static_assert(int(fifteen) == 15);
constexpr auto forty_two = C<2 * 3 * 7>{};
static_assert(cute::is_same_v<decltype(forty_two)::value_type, int>);
static_assert(int(forty_two) == 42);
// C<1>::value_type (as well as C<0>::value_type) may be bool.
constexpr auto one = C<1>{};
// Both inputs have value_type int.
{
constexpr auto result = cute::gcd(fifteen, forty_two);
static_assert(cute::is_same_v<decltype(result)::value_type, int>);
static_assert(int(result) == 3);
}
// One input has value_type int, and the other may have value_type bool.
{
constexpr auto result = cute::gcd(one, forty_two);
static_assert(int(result) == 1);
}
{
constexpr auto result = cute::gcd(forty_two, one);
static_assert(int(result) == 1);
}
// Both inputs may have value_type bool.
{
constexpr auto result = cute::gcd(one, one);
static_assert(int(result) == 1);
}
}
TEST(CuTe_core, lcm_returns_common_type)
{
using cute::C;
constexpr auto six = C<2 * 3>{};
static_assert(cute::is_same_v<decltype(six)::value_type, int>);
static_assert(int(six) == 6);
constexpr auto fifteen = C<3 * 5>{};
static_assert(cute::is_same_v<decltype(fifteen)::value_type, int>);
static_assert(int(fifteen) == 15);
// C<1>::value_type (as well as C<0>::value_type) may be bool.
constexpr auto one = C<1>{};
// Both inputs have value_type int.
{
constexpr auto result = cute::lcm(six, fifteen);
static_assert(cute::is_same_v<decltype(result)::value_type, int>);
static_assert(int(result) == 30);
}
// One input has value_type int, and the other may have value_type bool.
{
constexpr auto result = cute::lcm(one, six);
static_assert(cute::is_same_v<decltype(result)::value_type, int>);
static_assert(int(result) == 6);
}
{
constexpr auto result = cute::lcm(six, one);
static_assert(cute::is_same_v<decltype(result)::value_type, int>);
static_assert(int(result) == 6);
}
// Both inputs may have value_type bool.
{
constexpr auto result = cute::lcm(one, one);
static_assert(int(result) == 1);
}
}
| test/unit/cute/core/math.cpp/0 | {
"file_path": "test/unit/cute/core/math.cpp",
"repo_id": "test",
"token_count": 1595
} | 54 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass_unit_test.h"
#include <iostream>
#include <cstdint>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <cute/tensor.hpp>
namespace cutlass::test {
template <class ElementType, class SmemLayout>
struct SharedStorage
{
cute::ArrayEngine<ElementType, cute::cosize_v<SmemLayout>> smem;
};
#if CUDA_12_0_SM90_FEATURES_SUPPORTED
template <class T, class TiledCopy, class CTA_Tiler, class GmemLayout, class SmemLayout>
__global__ void
tma_test_device_cute(T const* g_in, T* g_out,
CUTE_GRID_CONSTANT TiledCopy const tma, CTA_Tiler cta_tiler,
GmemLayout gmem_layout, SmemLayout smem_layout)
{
using namespace cute;
CUTE_STATIC_ASSERT_V(product_each(shape(cta_tiler)) == product_each(shape(smem_layout)));
// Use Shared Storage structure to allocate and distribute aligned SMEM addresses
extern __shared__ char shared_memory[];
using SharedStorage = SharedStorage<T, SmemLayout>;
SharedStorage& shared_storage = *reinterpret_cast<SharedStorage*>(shared_memory);
// Construct SMEM tensor
Tensor sB = make_tensor(make_smem_ptr(shared_storage.smem.begin()), smem_layout); // (CTA_TILE_M,CTA_TILE_N,...)
// TMA requires special handling of strides to deal with coord codomain mapping
// Represent the full tensors -- get these from TMA
Tensor mA = make_tensor(make_gmem_ptr<T>(g_in), gmem_layout);
Tensor mB = tma.get_tma_tensor(shape(gmem_layout));
constexpr int R = rank_v<CTA_Tiler>;
Tensor gA = flat_divide(mA, cta_tiler); // (CTA_TILE_M,CTA_TILE_N,...REST_M,REST_N,...)
Tensor gB = flat_divide(mB, cta_tiler); // (CTA_TILE_M,CTA_TILE_N,...REST_M,REST_N,...)
//
// Prepare the TMA_STORE
//
auto cta_tma = tma.get_slice(Int<0>{}); // CTA slice
Tensor tBsB_x = cta_tma.partition_S(sB); // (TMA,TMA_M,TMA_N)
Tensor tBgB_x = cta_tma.partition_D(gB); // (TMA,TMA_M,TMA_N,REST_M,REST_N)
#if 0
if (thread0()) {
print(tma);
print("TILE : "); print(cta_tiler); print("\n");
print(" mB : "); print( mB.data()); print(" o "); print( mB.layout()); print("\n");
print(" gB : "); print( gB.data()); print(" o "); print( gB.layout()); print("\n");
print("tBgB_x: "); print(tBgB_x.data()); print(" o "); print(tBgB_x.layout()); print("\n");
print(" sB : "); print( sB.data()); print(" o "); print( sB.layout()); print("\n");
print("tBsB_x: "); print(tBsB_x.data()); print(" o "); print(tBsB_x.layout()); print("\n");
}
#endif
//
// Perform the TMA_STORE
//
// INPUT: Group the CTA_TILE_X modes and REST_X modes for input
Tensor tAgA = group_modes<0,R>(group_modes<R,rank(gA)>(gA)); // (CTA_TILE, REST)
// OUTPUT: Group the REST_X modes and the TMA_X modes to easily iterate through the tiles
Tensor tBgB = group_modes<1,rank(tBgB_x)>(tBgB_x); // (TMA,REST)
Tensor tBsB = group_modes<1,rank(tBsB_x)>(tBsB_x); // (TMA,REST)
static_assert(size<1>(tBsB) == 1);
#if 0
if (thread0()) {
print("tAgA : "); print(tAgA.data()); print(" o "); print(tAgA.layout()); print("\n");
print("tBsB : "); print(tBsB.data()); print(" o "); print(tBsB.layout()); print("\n");
print("tBgB : "); print(tBgB.data()); print(" o "); print(tBgB.layout()); print("\n");
}
#endif
// Loop over the TMA stages, using smem as our buffer
for (int stage = 0; stage < size<1>(tBgB); ++stage)
{
//
// Read in trivially gmem -> smem
//
// Subbyte elements could cause race conditions, so be even more conservative
if (thread0()) {
copy(tAgA(_,stage), sB);
}
__syncthreads();
cute::cp_async_wait<0>();
//
// Perform the TMA_STORE
//
if (threadIdx.x == 0) {
copy(tma, tBsB(_,0), tBgB(_,stage));
}
tma_store_wait<0>();
__syncthreads();
}
}
template <class T, class TmaType = T, class CopyOp, class GMEM_Layout, class SMEM_Layout, class CTA_Tile>
void
test_tma_store(CopyOp const& copy_op,
GMEM_Layout const& gmem_layout,
SMEM_Layout const& smem_layout,
CTA_Tile const& cta_tile)
{
using namespace cute;
// Allocate and initialize host test data
size_t N = ceil_div(cosize(gmem_layout) * sizeof_bits<T>::value, 8);
thrust::host_vector<uint8_t> h_in(N);
for (size_t i = 0; i < h_in.size(); ++i) {
h_in[i] = uint8_t(i % 13);
}
Tensor hA_in = make_tensor(recast_ptr<T>(h_in.data()), gmem_layout);
// Allocate and initialize device test data
thrust::device_vector<uint8_t> d_in = h_in;
thrust::device_vector<uint8_t> d_out(h_in.size(), uint8_t(-1)); // overflow uint
// Create TMA for this device Tensor
Tensor gA = make_tensor(make_gmem_ptr<T>(raw_pointer_cast(d_out.data())), gmem_layout);
auto tma = make_tma_copy<TmaType>(copy_op, gA, smem_layout, cta_tile, Int<1>{});
//print(tma);
// Launch
int smem_size = int(sizeof(SharedStorage<T, decltype(smem_layout)>));
tma_test_device_cute<<<1, 128, smem_size>>>(
reinterpret_cast<T const*>(raw_pointer_cast(d_in.data())),
reinterpret_cast<T*> (raw_pointer_cast(d_out.data())),
tma, cta_tile,
gmem_layout,
smem_layout);
// Copy results back to host
thrust::host_vector<uint8_t> h_out = d_out;
Tensor hA_out = make_tensor(recast_ptr<T>(h_out.data()), gmem_layout);
// Validate the results. Print only the first 3 errors.
int count = 3;
for (int i = 0; i < int(size(hA_out)) && count > 0; ++i) {
EXPECT_EQ(hA_in(i), hA_out(i));
if (hA_in(i) != hA_out(i)) {
--count;
}
}
}
#endif
} // end namespace cutlass::test
| test/unit/cute/hopper/tma_store_testbed.hpp/0 | {
"file_path": "test/unit/cute/hopper/tma_store_testbed.hpp",
"repo_id": "test",
"token_count": 3062
} | 55 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for thread-level GEMM
*/
#include "../../common/cutlass_unit_test.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/half.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/epilogue/thread/linear_combination_clamp.h"
#include "cutlass/gemm/warp/default_mma_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "testbed.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_threadblock_epilogue, s4_tensor_op_64x64_64x64x32) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = cutlass::int4b_t;
using ElementAccumulator = int;
using ElementCompute = float;
int const kElementsPerAccess = 32 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<64, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>;
using Element = ElementOutput;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementAccumulator,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
TEST(SM75_Epilogue_threadblock_epilogue, s4_tensor_op_64x64_32x32x32) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = cutlass::int4b_t;
using ElementAccumulator = int;
using ElementCompute = float;
int const kElementsPerAccess = 32 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<64, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>;
using Element = ElementOutput;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementAccumulator,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
TEST(SM75_Epilogue_threadblock_epilogue, s4_tensor_op_128x128_64x64x32) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = cutlass::int4b_t;
using ElementAccumulator = int;
using ElementCompute = float;
int const kElementsPerAccess = 64 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<128, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>;
using Element = ElementOutput;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementAccumulator,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
TEST(SM75_Epilogue_threadblock_epilogue, s4_tensor_op_128x64_64x32x32) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = cutlass::int4b_t;
using ElementAccumulator = int;
using ElementCompute = float;
int const kElementsPerAccess = 32 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<128, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>;
using Element = ElementOutput;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementAccumulator,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
TEST(SM75_Epilogue_threadblock_epilogue, s4_tensor_op_64x128_32x64x32) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = cutlass::int4b_t;
using ElementAccumulator = int;
using ElementCompute = float;
int const kElementsPerAccess = 64 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<64, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>;
using Element = ElementOutput;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementAccumulator,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
TEST(SM75_Epilogue_threadblock_epilogue, s4_tensor_op_32x128_32x64x32) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = cutlass::int4b_t;
using ElementAccumulator = int;
using ElementCompute = float;
int const kElementsPerAccess = 64 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<32, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>;
using Element = ElementOutput;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementAccumulator,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
TEST(SM75_Epilogue_threadblock_epilogue, s4_tensor_op_128x32_64x32x32) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = cutlass::int4b_t;
using ElementAccumulator = int;
using ElementCompute = float;
int const kElementsPerAccess = 32 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<128, 32, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>;
using Element = ElementOutput;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementAccumulator,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
TEST(SM75_Epilogue_threadblock_epilogue, s4_tensor_op_256x128_64x64x32) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = cutlass::int4b_t;
using ElementAccumulator = int;
using ElementCompute = float;
int const kElementsPerAccess = 64 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<256, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>;
using Element = ElementOutput;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementAccumulator,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
TEST(SM75_Epilogue_threadblock_epilogue, s4_tensor_op_128x256_64x64x32) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = cutlass::int4b_t;
using ElementAccumulator = int;
using ElementCompute = float;
int const kElementsPerAccess = 32 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<128, 256, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>;
using Element = ElementOutput;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementAccumulator,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_threadblock_epilogue, s8_tensor_op_64x64_64x64x16) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = int8_t;
using ElementAccumulator = int;
using ElementCompute = float;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<64, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using Element = ElementOutput;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementAccumulator,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
TEST(SM75_Epilogue_threadblock_epilogue, s8_tensor_op_64x64_32x3216) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = int8_t;
using ElementAccumulator = int;
using ElementCompute = float;
int const kElementsPerAccess = 64 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<64, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using Element = ElementOutput;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementAccumulator,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
TEST(SM75_Epilogue_threadblock_epilogue, s8_tensor_op_128x128_64x64x16) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = int8_t;
using ElementAccumulator = int;
using ElementCompute = float;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<128, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using Element = ElementOutput;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementAccumulator,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
TEST(SM75_Epilogue_threadblock_epilogue, s8_tensor_op_64x128_64x64x16) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = int8_t;
using ElementAccumulator = int;
using ElementCompute = float;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<128, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using Element = ElementOutput;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementAccumulator,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
TEST(SM75_Epilogue_threadblock_epilogue, s8_tensor_op_128x64_64x32x16) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = int8_t;
using ElementAccumulator = int;
using ElementCompute = float;
int const kElementsPerAccess = 64 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<128, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using Element = ElementOutput;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementAccumulator,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
TEST(SM75_Epilogue_threadblock_epilogue, s8_tensor_op_64x128_32x64x16) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = int8_t;
using ElementAccumulator = int;
using ElementCompute = float;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<64, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 16>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using Element = ElementOutput;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementAccumulator,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
TEST(SM75_Epilogue_threadblock_epilogue, s8_tensor_op_32x128_32x64x16) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = int8_t;
using ElementAccumulator = int;
using ElementCompute = float;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<32, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 16>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using Element = ElementOutput;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementAccumulator,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
TEST(SM75_Epilogue_threadblock_epilogue, s8_tensor_op_128x32_64x32x16) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = int8_t;
using ElementAccumulator = int;
using ElementCompute = float;
int const kElementsPerAccess = 64 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<128, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using Element = ElementOutput;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementAccumulator,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_threadblock_epilogue, tensor_op_64x64_64x64x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = float;
using ElementAccumulator = float;
using ElementCompute = float;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_threadblock_epilogue, tensor_op_128x128_64x64x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = float;
using ElementAccumulator = float;
using ElementCompute = float;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<128, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_threadblock_epilogue, tensor_op_128x256_64x64x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = float;
using ElementAccumulator = float;
using ElementCompute = float;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<128, 256, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_threadblock_epilogue, tensor_op_256x128_64x64x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = float;
using ElementAccumulator = float;
using ElementCompute = float;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<256, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_threadblock_epilogue, tensor_op_32x32_32x32x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = float;
using ElementAccumulator = float;
using ElementCompute = float;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_threadblock_epilogue, tensor_op_64x64_32x32x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = float;
using ElementAccumulator = float;
using ElementCompute = float;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_threadblock_epilogue, tensor_op_64x128_32x64x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = float;
using ElementAccumulator = float;
using ElementCompute = float;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<64, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_threadblock_epilogue, tensor_op_128x64_64x32x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = float;
using ElementAccumulator = float;
using ElementCompute = float;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<128, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Mixed precision tests
//
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_threadblock_epilogue, mixed_f16_f32_tensor_op_64x64_64x64x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = cutlass::half_t;
using ElementAccumulator = float;
using ElementCompute = float;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_threadblock_epilogue, mixed_f16_f32_tensor_op_128x128_64x64x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = cutlass::half_t;
using ElementAccumulator = float;
using ElementCompute = float;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<128, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_threadblock_epilogue, mixed_f16_f32_tensor_op_128x256_64x64x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = cutlass::half_t;
using ElementAccumulator = float;
using ElementCompute = float;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<128, 256, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_threadblock_epilogue, mixed_f16_f32_tensor_op_256x128_64x64x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = cutlass::half_t;
using ElementAccumulator = float;
using ElementCompute = float;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<256, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_threadblock_epilogue, mixed_f16_f32_tensor_op_32x32_32x32x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = cutlass::half_t;
using ElementAccumulator = float;
using ElementCompute = float;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_threadblock_epilogue, mixed_f16_f32_tensor_op_64x64_32x32x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = cutlass::half_t;
using ElementAccumulator = float;
using ElementCompute = float;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_threadblock_epilogue, mixed_f16_f32_tensor_op_64x128_32x64x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = cutlass::half_t;
using ElementAccumulator = float;
using ElementCompute = float;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<64, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_threadblock_epilogue, mixed_f16_f32_tensor_op_128x64_64x32x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = cutlass::half_t;
using ElementAccumulator = float;
using ElementCompute = float;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<128, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// F16 acumulation
//
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_threadblock_epilogue, f16_tensor_op_64x64_64x64x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = cutlass::half_t;
using ElementAccumulator = cutlass::half_t;
using ElementCompute = cutlass::half_t;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_threadblock_epilogue, f16_tensor_op_128x128_64x64x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = cutlass::half_t;
using ElementAccumulator = cutlass::half_t;
using ElementCompute = cutlass::half_t;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<128, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_threadblock_epilogue, f16_tensor_op_128x256_64x64x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = cutlass::half_t;
using ElementAccumulator = cutlass::half_t;
using ElementCompute = cutlass::half_t;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<128, 256, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_threadblock_epilogue, f16_tensor_op_256x128_64x64x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = cutlass::half_t;
using ElementAccumulator = cutlass::half_t;
using ElementCompute = cutlass::half_t;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<256, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_threadblock_epilogue, f16_tensor_op_32x32_32x32x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = cutlass::half_t;
using ElementAccumulator = cutlass::half_t;
using ElementCompute = cutlass::half_t;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_threadblock_epilogue, f16_tensor_op_64x64_32x32x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = cutlass::half_t;
using ElementAccumulator = cutlass::half_t;
using ElementCompute = cutlass::half_t;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_threadblock_epilogue, f16_tensor_op_64x128_32x64x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = cutlass::half_t;
using ElementAccumulator = cutlass::half_t;
using ElementCompute = cutlass::half_t;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<64, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_threadblock_epilogue, f16_tensor_op_128x64_64x32x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = cutlass::half_t;
using ElementAccumulator = cutlass::half_t;
using ElementCompute = cutlass::half_t;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<128, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM80_Epilogue_threadblock_epilogue, f64_tensor_op_64x64_32x32x4) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = double;
using ElementAccumulator = double;
using ElementCompute = double;
int const kElementsPerAccess = 1;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<64, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Element = double;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM80_Epilogue_threadblock_epilogue, f64_tensor_op_128x64_64x32x4) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = double;
using ElementAccumulator = double;
using ElementCompute = double;
int const kElementsPerAccess = 1;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<64, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Element = double;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM80_Epilogue_threadblock_epilogue, f64_tensor_op_64x128_32x64x4) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = double;
using ElementAccumulator = double;
using ElementCompute = double;
int const kElementsPerAccess = 1;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<64, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Element = double;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM80_Epilogue_threadblock_epilogue, f64_tensor_op_128x128_32x64x4) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = double;
using ElementAccumulator = double;
using ElementCompute = double;
int const kElementsPerAccess = 1;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<128, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 16>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Element = double;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_threadblock_epilogue, vec1_mixed_f16_f32_tensor_op_128x128_64x64x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = cutlass::half_t;
using ElementAccumulator = float;
using ElementCompute = float;
int const kElementsPerAccess = 1;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<128, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_threadblock_epilogue, vec1_mixed_f16_f32_tensor_op_128x256_64x64x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = cutlass::half_t;
using ElementAccumulator = float;
using ElementCompute = float;
int const kElementsPerAccess = 1;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<128, 256, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
TEST(SM75_Epilogue_threadblock_epilogue, vec1_tensor_op_128x128_64x64x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = float;
using ElementAccumulator = float;
using ElementCompute = float;
int const kElementsPerAccess = 1;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<128, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_threadblock_epilogue, vec1_tensor_op_128x256_64x64x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = float;
using ElementAccumulator = float;
using ElementCompute = float;
int const kElementsPerAccess = 1;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<128, 256, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/epilogue/threadblock/epilogue_tensor_op.cu/0 | {
"file_path": "test/unit/epilogue/threadblock/epilogue_tensor_op.cu",
"repo_id": "test",
"token_count": 30192
} | 56 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide GEMV interface
*/
#include <iostream>
#include <fstream>
#include <sstream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/kernel/gemv.h"
#include "cutlass/gemm/device/gemv.h"
#include "../../common/cutlass_unit_test.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/util/reference/host/gemm_complex.h"
#include "testbed_utils.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace gemm {
template <typename Gemv>
class TestbedGemv {
public:
using ElementA = typename Gemv::ElementA;
using LayoutA = typename Gemv::LayoutA;
using ElementB = typename Gemv::ElementB;
using ElementC = typename Gemv::ElementC;
using ElementAccumulator = typename Gemv::ElementAccumulator;
using ElementCompute = typename Gemv::EpilogueOutputOp::ElementCompute;
using LayoutV = cutlass::layout::RowMajor;
private:
/// Initialization
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::Distribution::Kind init_C;
uint64_t seed;
cutlass::HostTensor<ElementA, LayoutA> tensor_A;
cutlass::HostTensor<ElementB, LayoutV> tensor_B;
cutlass::HostTensor<ElementC, LayoutV> tensor_C;
cutlass::HostTensor<ElementC, LayoutV> tensor_D;
cutlass::HostTensor<ElementC, LayoutV> reference_D;
public:
//
// Methods
//
TestbedGemv(
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint64_t seed_ = 2023
):
init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { }
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
bool initialize_tensor(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
double scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
int bits_output = cutlass::sizeof_bits<typename Gemv::ElementC>::value;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
} else if (bits_input <= 8) {
scope_max = 2;
scope_min = -2;
} else if (bits_output == 16) {
scope_max = 5;
scope_min = -5;
} else {
scope_max = 8;
scope_min = -8;
}
cutlass::reference::host::TensorFillRandomUniform(
view, seed, scope_max, scope_min, 0);
}
else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5);
}
else if (dist_kind == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(
view.data(), view.capacity());
}
else {
EXPECT_TRUE(false) << "Not implemented";
return false;
}
return true;
}
/// Initializes data structures
void initialize(
cutlass::MatrixCoord problem_size,
int32_t batch_count
) {
//
// Allocate the GEMV workspace
//
if(std::is_same<LayoutA, cutlass::layout::ColumnMajor>::value) {
tensor_A.resize({problem_size.row(), batch_count * problem_size.column()});
}
else {
tensor_A.resize({batch_count * problem_size.row(), problem_size.column()});
}
tensor_B.resize({batch_count * problem_size.column(), 1});
tensor_C.resize({batch_count * problem_size.row(), 1});
tensor_D.resize({batch_count * problem_size.row(), 1});
reference_D.resize({batch_count * problem_size.row(), 1}, false);
EXPECT_TRUE(initialize_tensor(tensor_A.host_view(), init_A, seed + 1));
EXPECT_TRUE(initialize_tensor(tensor_B.host_view(), init_B, seed + 2));
EXPECT_TRUE(initialize_tensor(tensor_C.host_view(), init_C, seed + 3));
// It is possible to randomly initialize to all zeros, so override this with non-zeros
// in the upper left corner of each operand.
tensor_A.host_view().at({0, 0}) = typename Gemv::ElementA(1);
tensor_B.host_view().at({0, 0}) = typename Gemv::ElementB(1);
tensor_C.host_view().at({0, 0}) = typename Gemv::ElementC(1);
cutlass::reference::host::TensorCopy(reference_D.host_view(), tensor_C.host_view());
tensor_A.sync_device();
tensor_B.sync_device();
tensor_C.sync_device();
tensor_D.sync_device();
}
/// Compares computed reference with device reference and outputs to a file if incorrect
bool compare_reference(
cutlass::MatrixCoord problem_size,
ElementCompute alpha,
ElementCompute beta) {
tensor_D.sync_host();
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_A.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_B.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_C.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(reference_D.host_view()), 0);
bool passed = cutlass::reference::host::TensorEquals(reference_D.host_view(), tensor_D.host_view());
EXPECT_TRUE(passed) << " mismatched reference";
if (!passed) {
std::ofstream file("testbed_universal_errors.txt");
file
<< "problem: " << problem_size
<< ", alpha: " << alpha << ", beta: " << beta << "\n\n";
file
<< "A =\n" << tensor_A.host_view()
<< "\nB =\n" << tensor_B.host_view()
<< "\nC =\n" << tensor_C.host_view()
<< "\n\nReference =\n" << reference_D.host_view()
<< "\nComputed =\n" << tensor_D.host_view();
}
return passed;
}
/// Verifies the result
bool verify(
cutlass::MatrixCoord problem_size,
int32_t batch_count,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C,
int64_t batch_stride_D,
ElementCompute alpha,
ElementCompute beta) {
//
// Verify
//
cutlass::reference::host::GemmComplex<
typename Gemv::ElementA, typename Gemv::LayoutA,
typename Gemv::ElementB, LayoutV,
typename Gemv::ElementC, LayoutV,
ElementCompute, ElementAccumulator
>(
{problem_size.row(), 1, problem_size.column()},
alpha,
tensor_A.host_ref(),
Gemv::kTransformA,
tensor_B.host_ref(),
Gemv::kTransformB,
beta,
tensor_C.host_ref(),
reference_D.host_ref(),
ElementAccumulator(0),
batch_count,
batch_stride_A,
batch_stride_B,
batch_stride_C,
batch_stride_D
);
return compare_reference(problem_size, alpha, beta);
}
/// Runs one problem size
bool run(
cutlass::MatrixCoord problem_size,
int32_t batch_count,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C,
int64_t batch_stride_D,
ElementCompute alpha,
ElementCompute beta) {
this->initialize(problem_size, batch_count);
//
// Initialize the GEMV operator
//
typename Gemv::Arguments arguments{
problem_size,
batch_count,
{alpha, beta},
tensor_A.device_ref(),
tensor_B.device_data(),
tensor_C.device_data(),
tensor_D.device_data(),
batch_stride_A,
batch_stride_B,
batch_stride_C,
batch_stride_D
};
Gemv gemm_op;
cutlass::Status status = gemm_op.can_implement(arguments);
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
size_t workspace_size = Gemv::get_workspace_size(arguments);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
status = gemm_op.initialize(arguments, workspace.get());
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
//
// Run the GEMV
//
status = gemm_op();
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
//
// Verify
//
bool passed = this->verify(
problem_size,
batch_count,
batch_stride_A,
batch_stride_B,
batch_stride_C,
batch_stride_D,
alpha,
beta);
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Gemv>
bool TestAllGemv() {
using ElementCompute = typename Gemv::EpilogueOutputOp::ElementCompute;
int Batch[] = {
1, 520, 1314
};
int M[] = {
1, 5, 16
};
int K[] = {
8, 128, 256
};
double Alpha[] = {
1, 1.25
};
double Beta[] = {
0, 1, 1.25
};
for (int b : Batch) {
for (int m : M) {
for (int k : K) {
for (double alpha : Alpha) {
for (double beta : Beta) {
TestbedGemv<Gemv> testbed;
if (!testbed.run(
{m, k},
b,
m * k,
k,
m,
m,
ElementCompute(alpha),
ElementCompute(beta))) {
return false;
}
}
}
}
}
}
return true;
}
} // namespace gemm
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM50_Device_Gemv_f16n_f16_f16_simt_f32, RowMajorA) {
using ElementInput = cutlass::half_t;
using ElementOutput = cutlass::half_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementAccumulator = float;
int const kElementsPerAccess = 8;
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
1,
ElementAccumulator,
ElementAccumulator>;
using Gemv = cutlass::gemm::device::Gemv<
cutlass::gemm::kernel::Gemv<
ElementInput, // Element A
LayoutA, // Layout A
ElementInput, // Element B
ElementOutput, // Element C
ElementAccumulator, // Element accumulator
EpilogueOp, // Output operator
kElementsPerAccess // Element access granularity
>
>;
EXPECT_TRUE(test::gemm::TestAllGemv<Gemv>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM50_Device_Gemv_f32n_f32_f32_simt_f32, RowMajorA) {
using ElementInput = float;
using ElementOutput = float;
using LayoutA = cutlass::layout::RowMajor;
using ElementAccumulator = float;
int const kElementsPerAccess = 4;
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
1,
ElementAccumulator,
ElementAccumulator>;
using Gemv = cutlass::gemm::device::Gemv<
cutlass::gemm::kernel::Gemv<
ElementInput, // Element A
LayoutA, // Layout A
ElementInput, // Element B
ElementOutput, // Element C
ElementAccumulator, // Element accumulator
EpilogueOp, // Output operator
kElementsPerAccess // Element access granularity
>
>;
EXPECT_TRUE(test::gemm::TestAllGemv<Gemv>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM50_Device_Gemv_f64n_f64_f64_simt_f64, RowMajorA) {
using ElementInput = double;
using ElementOutput = double;
using LayoutA = cutlass::layout::RowMajor;
using ElementAccumulator = double;
int const kElementsPerAccess = 2;
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
1,
ElementAccumulator,
ElementAccumulator>;
using Gemv = cutlass::gemm::device::Gemv<
cutlass::gemm::kernel::Gemv<
ElementInput, // Element A
LayoutA, // Layout A
ElementInput, // Element B
ElementOutput, // Element C
ElementAccumulator, // Element accumulator
EpilogueOp, // Output operator
kElementsPerAccess // Element access granularity
>
>;
EXPECT_TRUE(test::gemm::TestAllGemv<Gemv>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM50_Device_Gemv_f16n_f16_f16_simt_f32, ColumnMajorA) {
using ElementInput = cutlass::half_t;
using ElementOutput = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementAccumulator = float;
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
1,
ElementAccumulator,
ElementAccumulator>;
using Gemv = cutlass::gemm::device::Gemv<
cutlass::gemm::kernel::Gemv<
ElementInput, // Element A
LayoutA, // Layout A
ElementInput, // Element B
ElementOutput, // Element C
ElementAccumulator, // Element Accumulator
EpilogueOp // Output operator
>
>;
EXPECT_TRUE(test::gemm::TestAllGemv<Gemv>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM50_Device_Gemv_f32n_f32_f32_simt_f32, ColumnMajorA) {
using ElementInput = float;
using ElementOutput = float;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementAccumulator = float;
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
1,
ElementAccumulator,
ElementAccumulator>;
using Gemv = cutlass::gemm::device::Gemv<
cutlass::gemm::kernel::Gemv<
ElementInput, // Element A
LayoutA, // Layout A
ElementInput, // Element B
ElementOutput, // Element C
ElementAccumulator, // Element Accumulator
EpilogueOp // Output operator
>
>;
EXPECT_TRUE(test::gemm::TestAllGemv<Gemv>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM50_Device_Gemv_f64n_f64_f64_simt_f64, ColumnMajorA) {
using ElementInput = double;
using ElementOutput = double;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementAccumulator = double;
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
1,
ElementAccumulator,
ElementAccumulator>;
using Gemv = cutlass::gemm::device::Gemv<
cutlass::gemm::kernel::Gemv<
ElementInput, // Element A
LayoutA, // Layout A
ElementInput, // Element B
ElementOutput, // Element C
ElementAccumulator, // Element Accumulator
EpilogueOp // Output operator
>
>;
EXPECT_TRUE(test::gemm::TestAllGemv<Gemv>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/gemm/device/gemv.cu/0 | {
"file_path": "test/unit/gemm/device/gemv.cu",
"repo_id": "test",
"token_count": 7013
} | 57 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide GEMM interface
*/
#pragma once
#include <iostream>
#include <fstream>
#include <sstream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/reference/host/gemm_planar_complex.h"
#include "cutlass/util/host_tensor_planar_complex.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace gemm {
namespace device {
////////////////////////////////////////////////////////////////////////////////
template <typename Gemm>
class TestbedPlanarComplex {
public:
using ElementA = typename Gemm::ElementA;
using LayoutA = typename Gemm::LayoutA;
using ElementB = typename Gemm::ElementB;
using LayoutB = typename Gemm::LayoutB;
using ElementC = typename Gemm::ElementC;
using LayoutC = typename Gemm::LayoutC;
using ElementCompute = typename Gemm::EpilogueOutputOp::ElementCompute;
using ElementAccumulator = typename Gemm::ElementAccumulator;
//
// Data members
//
cutlass::gemm::GemmCoord problem_size;
cutlass::HostTensorPlanarComplex<ElementA, LayoutA> tensor_A;
cutlass::HostTensorPlanarComplex<ElementB, LayoutB> tensor_B;
cutlass::HostTensorPlanarComplex<ElementC, LayoutC> tensor_C;
cutlass::HostTensorPlanarComplex<ElementC, LayoutC> tensor_D;
cutlass::HostTensorPlanarComplex<ElementC, LayoutC> tensor_D_ref;
//
// Methods
//
TestbedPlanarComplex(cutlass::gemm::GemmCoord const & problem_size): problem_size(problem_size) {
tensor_A.reset({problem_size.m(), problem_size.k()});
tensor_B.reset({problem_size.k(), problem_size.n()});
tensor_C.reset({problem_size.m(), problem_size.n()});
tensor_D.reset({problem_size.m(), problem_size.n()});
tensor_D_ref.reset({problem_size.m(), problem_size.n()}, false);
}
void initialize() {
uint64_t seed = 1073;
int scope_max = 8;
int scope_min = -8;
cutlass::reference::host::TensorFillRandomUniform(
tensor_A.host_view(), seed, scope_max, scope_min, 0);
cutlass::reference::host::TensorFillRandomUniform(
tensor_B.host_view(), seed * 2019, scope_max, scope_min, 0);
cutlass::reference::host::TensorFillRandomUniform(
tensor_C.host_view(), seed * 2020, scope_max, scope_min, 0);
cutlass::reference::host::TensorFill(tensor_D.host_view(), cutlass::complex<ElementC>());
cutlass::reference::host::TensorFill(tensor_D_ref.host_view(), cutlass::complex<ElementC>());
tensor_A.sync_device();
tensor_B.sync_device();
tensor_C.sync_device();
tensor_D.sync_device();
}
/// Returns true if the CUDA device is sufficient to execute the kernel.
bool sufficient() const {
//
// Determine SMEM requirements and waive if not satisfied
//
size_t smem_size = sizeof(typename Gemm::GemmKernel::SharedStorage);
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.sharedMemPerBlockOptin < smem_size) {
return false;
}
return true;
}
bool run(
cutlass::complex<ElementCompute> alpha = {1, 0},
cutlass::complex<ElementCompute> beta = {0, 0}) {
// Waive test if insufficient CUDA device
if (!sufficient()) {
if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) {
std::cerr << "Test waived due to insufficient CUDA device." << std::endl;
}
return true;
}
initialize();
int batch_count = 1;
ElementA *ptr_A = tensor_A.device_data();
ElementB *ptr_B = tensor_B.device_data();
ElementC *ptr_C = tensor_C.device_data();
ElementC *ptr_D = tensor_D.device_data();
typename LayoutA::Stride::Index lda = tensor_A.layout().stride(0);
typename LayoutB::Stride::Index ldb = tensor_B.layout().stride(0);
typename LayoutC::Stride::Index ldc = tensor_C.layout().stride(0);
typename LayoutC::Stride::Index ldd = tensor_D.layout().stride(0);
int64_t imag_stride_A = tensor_A.imaginary_stride();
int64_t imag_stride_B = tensor_B.imaginary_stride();
int64_t imag_stride_C = tensor_C.imaginary_stride();
int64_t imag_stride_D = tensor_D.imaginary_stride();
//
// Launch device kernel
//
Gemm gemm_op;
typename Gemm::Arguments args{
cutlass::gemm::GemmUniversalMode::kGemm,
problem_size,
batch_count,
{alpha, beta},
ptr_A,
ptr_A + imag_stride_A,
ptr_B,
ptr_B + imag_stride_B,
ptr_C,
ptr_C + imag_stride_C,
ptr_D,
ptr_D + imag_stride_D,
lda,
lda,
ldb,
ldb,
ldc,
ldc,
ldd,
ldd
};
cutlass::Status status = gemm_op(args);
EXPECT_EQ(status, cutlass::Status::kSuccess);
cudaError_t error = cudaDeviceSynchronize();
tensor_D.sync_host();
//
// Compute reference
//
cutlass::reference::host::GemmPlanarComplex<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementAccumulator
>(
problem_size,
alpha,
tensor_A.host_ref(),
Gemm::kTransformA,
tensor_B.host_ref(),
Gemm::kTransformB,
beta,
tensor_C.host_ref(),
tensor_D_ref.host_ref()
);
bool passed = cutlass::reference::host::TensorEquals(
tensor_D.host_view(),
tensor_D_ref.host_view()
);
EXPECT_TRUE(passed);
if (!passed) {
std::ofstream output("gemm_planar_complex.txt");
output
<< "A:\n" << tensor_A.host_view() << "\n"
<< "B:\n" << tensor_B.host_view() << "\n"
<< "C:\n" << tensor_C.host_view() << "\n"
<< "Reference:\n"
<< tensor_D_ref.host_view() << "\n"
<< "Computed:\n"
<< tensor_D.host_view() << "\n";
}
return passed;
}
};
template <typename Gemm>
bool TestOneGemmPlanarComplex(cutlass::gemm::GemmCoord problem_size) {
TestbedPlanarComplex<Gemm> testbed(problem_size);
return testbed.run();
}
template <typename Gemm>
bool TestAllGemmPlanarComplex() {
int M[] = {
16, 64, 72, 144, 264, 520,
};
int N[] = {
16, 64, 72, 144, 248, 264, 520
};
int K[] = {
8, 64, 72, 96, 264, 520
};
using ElementCompute = typename Gemm::EpilogueOutputOp::ElementCompute;
cutlass::complex<ElementCompute> alpha_values[] = {
{ElementCompute(1.25), ElementCompute(-0.5)}
};
cutlass::complex<ElementCompute> beta_values[] = {
{ElementCompute(-2.25), ElementCompute(1.5)}
};
for (int m : M) {
for (int n : N) {
for (int k : K) {
test::gemm::device::TestbedPlanarComplex<Gemm> testbed({m, n, k});
for (auto const &alpha : alpha_values) {
for (auto const &beta : beta_values) {
bool passed = testbed.run(alpha, beta);
if (!passed) {
return false;
}
}
}
}
}
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace gemm
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/gemm/device/testbed_planar_complex.h/0 | {
"file_path": "test/unit/gemm/device/testbed_planar_complex.h",
"repo_id": "test",
"token_count": 3596
} | 58 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief unit tests for matrix layout
*/
#include "../common/cutlass_unit_test.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace layout {
void test_row_major_layout(int row_size, int column_size, int ldm) {
cutlass::layout::RowMajor row_major(ldm);
// test pointer offset
for (int row_idx = 0; row_idx < row_size; row_idx++) {
for (int column_idx = 0; column_idx < column_size; column_idx++) {
cutlass::MatrixCoord matrix_coord(row_idx, column_idx);
auto ptr_offset = row_major(matrix_coord);
decltype(ptr_offset) reference_offset = row_idx * ldm + column_idx;
EXPECT_EQ(ptr_offset, reference_offset);
}
}
// test stride
EXPECT_EQ(row_major.stride()[0], ldm);
// test capacity
auto capacity = row_major.capacity(cutlass::MatrixCoord(row_size, column_size));
decltype(capacity) reference_capacity = row_size * ldm;
EXPECT_EQ(capacity, reference_capacity);
// test packed
auto packed = row_major.packed(cutlass::MatrixCoord(row_size, column_size));
// the packed matrix's stride is the same with column size
EXPECT_EQ(packed.stride()[0], column_size);
}
void test_column_major_layout(int row_size, int column_size, int ldm) {
cutlass::layout::ColumnMajor column_major(ldm);
// test pointer offset
for (int row_idx = 0; row_idx < row_size; row_idx++) {
for (int column_idx = 0; column_idx < column_size; column_idx++) {
cutlass::MatrixCoord matrix_coord(row_idx, column_idx);
auto ptr_offset = column_major(matrix_coord);
decltype(ptr_offset) reference_offset = row_idx + column_idx * ldm;
EXPECT_EQ(ptr_offset, reference_offset);
}
}
// test stride
EXPECT_EQ(column_major.stride()[0], ldm);
// test capacity
auto capacity = column_major.capacity(cutlass::MatrixCoord(row_size, column_size));
decltype(capacity) reference_capacity = column_size * ldm;
EXPECT_EQ(capacity, reference_capacity);
// test packed
auto packed = column_major.packed(cutlass::MatrixCoord(row_size, column_size));
// the packed matrix's stride is the same with row size
EXPECT_EQ(packed.stride()[0], row_size);
}
} // namespace layout
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Layout_Matrix, row_major_32_53) {
int const row_size = 32;
int const column_size = 53;
int const ldm = 55;
test::layout::test_row_major_layout(row_size, column_size, ldm);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Layout_Matrix, column_major_32_53) {
int const row_size = 32;
int const column_size = 53;
int const ldm = 55;
test::layout::test_column_major_layout(row_size, column_size, ldm);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Layout_Matrix, general_matrix) {
int M = 16;
int N = 16;
int interleave = 4;
cutlass::layout::GeneralMatrix::TensorCoord extent = {M, N};
cutlass::layout::GeneralMatrix layout =
cutlass::layout::GeneralMatrix::packed(
extent, cutlass::layout::Matrix::kColumnMajor, interleave);
cutlass::HostTensor<int, cutlass::layout::ColumnMajor> tensor(extent);
for (int m = 0; m < M; ++m) {
for (int n = 0; n < N; ++n) {
tensor.host_data(m * N + n) = m * N + n;
}
}
cutlass::TensorView<int, cutlass::layout::GeneralMatrix> canonical({tensor.host_data(), layout}, extent);
// Uncomment this to view
//
//std::cout << canonical << std::endl;
//
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/layout/matrix.cu/0 | {
"file_path": "test/unit/layout/matrix.cu",
"repo_id": "test",
"token_count": 1830
} | 59 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit test for the PipelineAsync class
*/
#define KERNEL_DBG_TRACE false
#include "../common/cutlass_unit_test.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <cute/tensor.hpp>
#include <cute/arch/cluster_sm90.hpp>
#include <cutlass/util/reference/host/gemm.h>
#include <cutlass/cluster_launch.hpp>
#include "cutlass/core_io.h"
#include "cutlass/util/print_error.hpp"
#include "cutlass/util/GPU_Clock.hpp"
#include "testbed.h"
#include "cutlass/pipeline/pipeline.hpp"
#include "cutlass/arch/barrier.h"
#include "cute/arch/cluster_sm90.hpp"
using namespace cute;
//////////////////// KERNEL /////////////////////////
template <uint32_t Stages>
struct SharedStorage
{
typename cutlass::PipelineAsync<Stages>::SharedStorage storage;
};
// Goal of this kernel is to complete deadlock-free
// Simple 1 producer warp, one consumer warp scenario
template <class ClusterShape, uint32_t NumStages>
__global__ static
void pipeline_async_basic_device(uint32_t const num_iterations)
{
extern __shared__ char shared_memory[];
using MainloopPipeline = typename cutlass::PipelineAsync<NumStages>;
using PipelineState = typename cutlass::PipelineState<NumStages>;
using SharedStorage = SharedStorage<NumStages>;
SharedStorage& shared_storage = *reinterpret_cast<SharedStorage*>(shared_memory);
int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
int lane_predicate = cute::elect_one_sync();
dim3 block_id_in_cluster = cute::block_id_in_cluster();
// This example showcases 2 producer 1 consumer example
typename MainloopPipeline::Params params;
params.producer_arv_count = 2;
params.consumer_arv_count = 1;
MainloopPipeline pipeline(shared_storage.storage, params);
// Ensure All CTAs in Cluster have completed init before issuing commits
cute::cluster_arrive_relaxed();
cute::cluster_wait();
__syncthreads();
if (lane_predicate) {
// Producer Warps
if (warp_idx==0 || warp_idx==1) {
PipelineState smem_pipe_write = cutlass::make_producer_start_state<MainloopPipeline>();
int prologue_iterations = min(NumStages, num_iterations);
for ( int i = 0; i < prologue_iterations; ++i) {
// Can also specify stage to commit directly
pipeline.producer_commit(smem_pipe_write);
++smem_pipe_write;
}
int mainloop_iterations = num_iterations - prologue_iterations;
for ( ; mainloop_iterations > 0; --mainloop_iterations) {
pipeline.producer_acquire(smem_pipe_write);
pipeline.producer_commit(smem_pipe_write);
++smem_pipe_write;
}
}
else {
PipelineState smem_pipe_read;
for (int iter=0 ; iter < num_iterations; ++iter) {
pipeline.consumer_wait(smem_pipe_read);
pipeline.consumer_release(smem_pipe_read);
++smem_pipe_read;
}
}
}
// To make sure remote SMEM doesn't get destroyed
cute::cluster_arrive();
cute::cluster_wait();
}
/////////////////////////////////////////////////////
template<uint32_t Stages_, typename ClusterShape_>
struct PipelineTest {
//
// Data members
//
static constexpr uint32_t Stages = Stages_;
static constexpr uint32_t kBlockSize = 96;
using ClusterShape = ClusterShape_;
//
// Methods
//
// Ctor
PipelineTest() = default;
// Run CuTe GEMM kernel
cudaError_t run(uint32_t const kNumIters,
cudaStream_t stream = nullptr) {
// Pipeline (multistage pipeline)
auto cluster_shape = Shape<Int<ClusterShape::kM>, Int<ClusterShape::kN>, _1>{};
//
// Configure and launch
//
int iterations = 2;
cudaError_t result;
for (int iter = 0; iter < iterations; ++iter) {
// Define the tiled MMA layout (static, 4warps)
using MainloopPipeline = typename cutlass::PipelineAsync<Stages>;
int smem_size = int(sizeof(SharedStorage<Stages>));
result = cudaFuncSetAttribute(
pipeline_async_basic_device<decltype(cluster_shape), Stages>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
// Launch a single Cluster, with 128 thread per CTA
dim3 dimCluster(size<0>(cluster_shape), size<1>(cluster_shape), 1);
dim3 dimGrid(size<0>(cluster_shape), size<1>(cluster_shape), 1);
dim3 dimBlock(kBlockSize,1,1);
const void* kernel = (const void*)pipeline_async_basic_device<decltype(cluster_shape), Stages>;
int iters = kNumIters;
void* kernel_params[] = {reinterpret_cast<void*>(&iters)};
cutlass::ClusterLauncher::launch(dimGrid, dimCluster, dimBlock, smem_size, stream, kernel, kernel_params);
} // profiling loop ends
result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
std::cerr << "Error: cudaDeviceSynchronize() failed" << std::endl;
return result;
}
return cudaSuccess;
}
};
#if CUDA_12_0_SM90_FEATURES_SUPPORTED
TEST(SM90_Verify_PipelineAsync, Cluster1x1_Stage2) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<1, 1, 1>;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineAsync, Cluster1x1_Stage5) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<1, 1, 1>;
static constexpr uint32_t Stages = 5;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineAsync, Cluster1x1_Stage10) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<1, 1, 1>;
static constexpr uint32_t Stages = 10;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineAsync, Cluster2x2_Stage2) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<2, 2, 1>;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineAsync, Cluster2x2_Stage5) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<2, 2, 1>;
static constexpr uint32_t Stages = 5;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineAsync, Cluster2x2_Stage10) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<2, 2, 1>;
static constexpr uint32_t Stages = 10;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineAsync, Cluster1x2_Stage2) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<1, 2, 1>;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineAsync, Cluster1x2_Stage7) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<1, 2, 1>;
static constexpr uint32_t Stages = 7;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineAsync, Cluster1x2_Stage10) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<1, 2, 1>;
static constexpr uint32_t Stages = 10;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineAsync, Cluster2x1_Stage2) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<2, 1, 1>;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineAsync, Cluster2x1_Stage7) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<2, 1, 1>;
static constexpr uint32_t Stages = 7;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineAsync, Cluster4x1_Stage2) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<4, 1, 1>;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineAsync, Cluster4x1_Stage7) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<4, 1, 1>;
static constexpr uint32_t Stages = 7;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineAsync, Cluster1x4_Stage2) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<1, 4, 1>;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineAsync, Cluster1x4_Stage7) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<1, 4, 1>;
static constexpr uint32_t Stages = 7;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineAsync, Cluster2x4_Stage2) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<2, 4, 1>;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineAsync, Cluster2x4_Stage7) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<2, 4, 1>;
static constexpr uint32_t Stages = 7;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineAsync, Cluster4x2_Stage2) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<4, 2, 1>;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineAsync, Cluster4x2_Stage7) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<4, 2, 1>;
static constexpr uint32_t Stages = 7;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineAsync, Cluster4x4_Stage2) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<4, 4, 1>;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineAsync, Cluster4x4_Stage3) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<4, 4, 1>;
static constexpr uint32_t Stages = 3;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineAsync, Cluster4x4_Stage4) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<4, 4, 1>;
static constexpr uint32_t Stages = 4;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineAsync, Cluster4x4_Stage5) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<4, 4, 1>;
static constexpr uint32_t Stages = 5;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineAsync, Cluster4x4_Stage6) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<4, 4, 1>;
static constexpr uint32_t Stages = 6;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineAsync, Cluster4x4_Stage7) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<4, 4, 1>;
static constexpr uint32_t Stages = 7;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineAsync, Cluster4x4_Stage8) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<4, 4, 1>;
static constexpr uint32_t Stages = 8;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineAsync, Cluster4x4_Stage9) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<4, 4, 1>;
static constexpr uint32_t Stages = 9;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineAsync, Cluster4x4_Stage10) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<4, 4, 1>;
static constexpr uint32_t Stages = 10;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineAsync, Cluster4x4_Stage11) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<4, 4, 1>;
static constexpr uint32_t Stages = 11;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
#endif
| test/unit/pipeline/pipeline_async.cu/0 | {
"file_path": "test/unit/pipeline/pipeline_async.cu",
"repo_id": "test",
"token_count": 5561
} | 60 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*!
\file
\brief CUTLASS Library is an object-oriented approach to managing operations implemented by CUTLASS.
Generally,
description - compile-time constant parameters used to instantiate an operation
configuration - runtime parameters with computationally expensive initialization
arguments - runtime parameters that may be passed to an initialized operation with low
computational overhead
*/
#pragma once
/////////////////////////////////////////////////////////////////////////////////////////////////
#include <vector>
#include <string>
#include <cstdint>
#include <stdexcept>
#include <cuda_runtime.h>
#include "cutlass/cutlass.h"
#include "cutlass/library/types.h"
#include "cutlass/library/descriptions.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/blas3.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/conv3d_problem_size.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace library {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Mode of Universal GEMM
using GemmUniversalMode = cutlass::gemm::GemmUniversalMode;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Base class for all operations
class Operation {
public:
virtual ~Operation() { }
virtual OperationDescription const & description() const = 0;
virtual Status can_implement(
void const *configuration,
void const *arguments) const = 0;
virtual uint64_t get_host_workspace_size(
void const *configuration) const = 0;
virtual uint64_t get_device_workspace_size(
void const *configuration,
void const *arguments = nullptr) const = 0;
virtual Status initialize(
void const *configuration,
void *host_workspace,
void *device_workspace = nullptr,
cudaStream_t stream = nullptr) const = 0;
virtual Status run(
void const *arguments,
void *host_workspace,
void *device_workspace = nullptr,
cudaStream_t stream = nullptr) const = 0;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Configuration for basic GEMM operations
//
// OperationKind: Gemm
// GemmKind: Gemm
//
struct GemmConfiguration {
/// GEMM problem size
gemm::GemmCoord problem_size{};
/// Leading dimension of A matrix
int64_t lda{0};
/// Leading dimension of B matrix
int64_t ldb{0};
/// Leading dimension of C matrix
int64_t ldc{0};
/// Leading dimension of D matrix
int64_t ldd{0};
/// Number of partitions of K dimension
int split_k_slices{0};
};
/// Arguments for GEMM
struct GemmArguments {
/// Pointer to A matrix
void const *A{nullptr};
/// Pointer to B matrix
void const *B{nullptr};
/// Pointer to C matrix
void const *C{nullptr};
/// Pointer to D matrix
void *D{nullptr};
/// Host or device pointer to alpha scalar
void const *alpha{nullptr};
/// Host or device pointer to beta scalar
void const *beta{nullptr};
/// Enumerant indicating whether alpha/beta point to host or device memory
ScalarPointerMode pointer_mode{};
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Configuration for batched GEMM in which multiple matrix products are computed
//
// OperationKind: Gemm
// GemmKind: Batched
struct GemmBatchedConfiguration {
/// GEMM problem size
gemm::GemmCoord problem_size{};
/// Leading dimension of A matrix
int64_t lda{0};
/// Leading dimension of B matrix
int64_t ldb{0};
/// Leading dimension of C matrix
int64_t ldc{0};
/// Leading dimension of D matrix
int64_t ldd{0};
/// Stride between instances of the A matrix in memory
int64_t batch_stride_A{0};
/// Stride between instances of the B matrix in memory
int64_t batch_stride_B{0};
/// Stride between instances of the C matrix in memory
int64_t batch_stride_C{0};
/// Stride between instances of the D matrix in memory
int64_t batch_stride_D{0};
/// Number of GEMMs in batch
int batch_count{1};
};
/// Arguments to batched GEMM
using GemmBatchedArguments = GemmArguments;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Configuration for batched GEMM in which multiple matrix products are computed
//
// OperationKind: Gemm
// GemmKind: Array
struct GemmArrayConfiguration {
gemm::GemmCoord problem_size{};
/// Leading dimension of A matrix
int64_t lda{0};
/// Leading dimension of B matrix
int64_t ldb{0};
/// Leading dimension of C matrix
int64_t ldc{0};
/// Leading dimension of D matrix
int64_t ldd{0};
int batch_count{1};
};
/// Arguments for GEMM - used by all the GEMM operations
struct GemmArrayArguments {
void const * const *A{nullptr};
void const * const *B{nullptr};
void const * const *C{nullptr};
void * const *D{nullptr};
void const *alpha{nullptr};
void const *beta{nullptr};
ScalarPointerMode pointer_mode{};
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Universal GEMM supporting multiple split-K modes, multiple batched modes, real and complex
//
// OperationKind: Gemm
// GemmKind: Universal
struct GemmUniversalConfiguration {
GemmUniversalMode mode{GemmUniversalMode::kGemm};
gemm::GemmCoord problem_size{};
int batch_count{1};
int64_t lda{0};
int64_t ldb{0};
int64_t ldc{0};
int64_t ldd{0};
};
struct GemmUniversalArguments {
// NOTE: these are replicated for 3.0 interfaces
gemm::GemmCoord problem_size{};
int batch_count{1};
void const *A{nullptr};
void const *B{nullptr};
void const *C{nullptr};
void *D{nullptr};
void const *alpha{nullptr};
void const *beta{nullptr};
ScalarPointerMode pointer_mode{};
// NOTE: these are replicated for 3.0 interfaces
int64_t lda{0};
int64_t ldb{0};
int64_t ldc{0};
int64_t ldd{0};
int64_t batch_stride_A{0};
int64_t batch_stride_B{0};
int64_t batch_stride_C{0};
int64_t batch_stride_D{0};
// Needed for some 3.x kernels
int sm_count{0};
library::RasterOrder raster_order{};
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Complex valued GEMM in which real and imaginary parts are separated by a stride
//
// OperationKind: Gemm
// GemmKind: Planar complex
struct GemmPlanarComplexConfiguration {
GemmUniversalMode mode{GemmUniversalMode::kGemm};
gemm::GemmCoord problem_size{};
int batch_count{1};
int64_t lda_real{0};
int64_t lda_imag{0};
int64_t ldb_real{0};
int64_t ldb_imag{0};
int64_t ldc_real{0};
int64_t ldc_imag{0};
int64_t ldd_real{0};
int64_t ldd_imag{0};
};
/// Arguments for planar complex GEMMs
struct GemmPlanarComplexArguments {
void const *A_real{nullptr};
void const *A_imag{nullptr};
void const *B_real{nullptr};
void const *B_imag{nullptr};
void const *C_real{nullptr};
void const *C_imag{nullptr};
void *D_real{nullptr};
void *D_imag{nullptr};
void const *alpha{nullptr};
void const *beta{nullptr};
ScalarPointerMode pointer_mode{};
int64_t batch_stride_A_real{0};
int64_t batch_stride_A_imag{0};
int64_t batch_stride_B_real{0};
int64_t batch_stride_B_imag{0};
int64_t batch_stride_C_real{0};
int64_t batch_stride_C_imag{0};
int64_t batch_stride_D_real{0};
int64_t batch_stride_D_imag{0};
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// This is a special form of planar complex which loads pointers and problem size
/// from memory.
struct GemmPlanarComplexArrayConfiguration {
gemm::GemmCoord problem_size{};
int batch_count{1};
int64_t lda_real{0};
int64_t lda_imag{0};
int64_t ldb_real{0};
int64_t ldb_imag{0};
int64_t ldc_real{0};
int64_t ldc_imag{0};
int64_t ldd_real{0};
int64_t ldd_imag{0};
};
/// Arguments for planar complex GEMMs
struct GemmPlanarComplexArrayArguments {
int const *M{nullptr};
int const *N{nullptr};
int const *K{nullptr};
void const * const * A_real{nullptr};
void const * const * A_imag{nullptr};
void const * const * B_real{nullptr};
void const * const * B_imag{nullptr};
void const * const * C_real{nullptr};
void const * const * C_imag{nullptr};
void * const * D_real{nullptr};
void * const * D_imag{nullptr};
void const * alpha{nullptr};
void const * beta{nullptr};
ScalarPointerMode pointer_mode{};
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Grouped GEMM supporting
//
// OperationKind: Gemm
// GemmKind: Grouped
struct GemmGroupedConfiguration {
int problem_count{0};
int threadblock_count{0};
};
struct GemmGroupedArguments {
gemm::GemmCoord *problem_sizes{nullptr};
void * ptr_A{nullptr};
void * ptr_B{nullptr};
void * ptr_C{nullptr};
void * ptr_D{nullptr};
int64_t *lda{nullptr};
int64_t *ldb{nullptr};
int64_t *ldc{nullptr};
int64_t *ldd{nullptr};
void const *alpha{nullptr};
void const *beta{nullptr};
ScalarPointerMode pointer_mode{};
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// OperationKind: kSparseGemm
//
/// Computes GEMM assuming one of the inputs has 2:4 structured sparsity.
struct SparseGemmConfiguration {
GemmUniversalMode mode{GemmUniversalMode::kGemm};
gemm::GemmCoord problem_size{};
int batch_count{1}; /// number of sparse matrix products in batch
int64_t lda{0}; /// leading dimension of A operand
int64_t ldb{0}; /// leading dimension of B operand
int64_t ldc{0}; /// leading dimension of C operand
int64_t ldd{0}; /// leading dimension of D operand
int64_t lde{0}; /// leading dimension of E operand (metadata matrix)
int64_t batch_stride_A{0}; // stride between matrices
int64_t batch_stride_B{0}; // stride between matrices
int64_t batch_stride_C{0}; // stride between matrices
int64_t batch_stride_D{0}; // stride between matrices
int64_t batch_stride_E{0}; // stride between matrices
};
/// Arguments for sparse GEMMs
struct SparseGemmArguments {
void const *A{nullptr}; /// pointer to A matrix
void const *B{nullptr}; /// pointer to B matrix
void const *C{nullptr}; /// pointer to C matrix
void *D{nullptr}; /// pointer to D matrix
void const *E{nullptr}; /// pointer to E matrix (metadata)
void const *alpha{nullptr}; /// pointer to alpha scalar
void const *beta{nullptr}; /// pointer to beta scalar
ScalarPointerMode pointer_mode{}; /// enumerant indicating whether alpha/beta pointers are host
/// or device pointers.
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Configuration for basic Rank K update operations
//
// OperationKind: (Syrk, Herk, Syr2k, Her2k)
// RankKKind: Universal
//
struct RankKConfiguration {
/// SYRK problem size
gemm::GemmCoord problem_size{};
/// Leading dimension of A matrix
int64_t lda{0};
/// Leading dimension of B matrix
int64_t ldb{0};
/// Leading dimension of C matrix
int64_t ldc{0};
/// Leading dimension of D matrix
int64_t ldd{0};
/// Batch Count
int batch_count{1};
};
/// Arguments for (Syrk, Herk, Syr2k, Her2k)
struct RankKArguments {
/// Pointer to A matrix
void const *A{nullptr};
/// Pointer to B matrix (used only for Syr2k and Her2k)
void const *B{nullptr};
/// Pointer to C matrix
void const *C{nullptr};
/// Pointer to D matrix
void *D{nullptr};
/// Host or device pointer to alpha scalar
void const *alpha{nullptr};
/// Host or device pointer to beta scalar
void const *beta{nullptr};
/// Enumerant indicating whether alpha/beta point to host or device memory
ScalarPointerMode pointer_mode{};
int64_t batch_stride_A{0};
int64_t batch_stride_B{0};
int64_t batch_stride_C{0};
int64_t batch_stride_D{0};
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Configuration for basic TRMM operations
//
// OperationKind: Trmm
// TrmmKind: Universal
//
struct TrmmConfiguration {
/// TRMM problem size
gemm::GemmCoord problem_size{};
/// Leading dimension of A matrix
int64_t lda{0};
/// Leading dimension of B matrix
int64_t ldb{0};
/// Leading dimension of D matrix
int64_t ldd{0};
/// Batch Count
int batch_count{1};
};
/// Arguments for TRMM
struct TrmmArguments {
/// Pointer to A matrix
void const *A{nullptr};
/// Pointer to B matrix
void const *B{nullptr};
/// Pointer to D matrix
void *D{nullptr};
/// Host or device pointer to alpha scalar
void const *alpha{nullptr};
/// Host or device pointer to beta scalar
void const *beta{nullptr};
/// Enumerant indicating whether alpha/beta point to host or device memory
ScalarPointerMode pointer_mode{};
int64_t batch_stride_A{0};
int64_t batch_stride_B{0};
int64_t batch_stride_D{0};
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Configuration for basic SYMM/HEMM update operations
//
// OperationKind: (Symm, Hemm)
// SymmKind: Universal
//
struct SymmConfiguration {
/// SYMM/HEMM problem size
gemm::GemmCoord problem_size{};
/// Leading dimension of A matrix
int64_t lda{0};
/// Leading dimension of B matrix
int64_t ldb{0};
/// Leading dimension of C matrix
int64_t ldc{0};
/// Leading dimension of D matrix
int64_t ldd{0};
/// Batch Count
int batch_count{1};
};
/// Arguments for (Symm, Hemm)
struct SymmArguments {
/// Pointer to A matrix
void const *A{nullptr};
/// Pointer to B matrix
void const *B{nullptr};
/// Pointer to C matrix
void const *C{nullptr};
/// Pointer to D matrix
void *D{nullptr};
/// Host or device pointer to alpha scalar
void const *alpha{nullptr};
/// Host or device pointer to beta scalar
void const *beta{nullptr};
/// Enumerant indicating whether alpha/beta point to host or device memory
ScalarPointerMode pointer_mode{};
int64_t batch_stride_A{0};
int64_t batch_stride_B{0};
int64_t batch_stride_C{0};
int64_t batch_stride_D{0};
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Two dimensional convolution
//
// OperationKind: Conv2d
//
struct Conv2dConfiguration {
conv::SplitKMode split_k_mode;
/// Conv2d problem size
// contains strictly conv2d size (N,H,W,C,K,R,S,P,Q,padding,stride,dilation,mode)
// also includes (split_k_slices, groups)
conv::Conv2dProblemSize problem_size{};
// stride of operand A
std::vector<int64_t> stride_a{};
// stride of operand B
std::vector<int64_t> stride_b{};
// stride of operand C
std::vector<int64_t> stride_c{};
};
/// Three dimensional convolution
//
// OperationKind: Conv3d
//
struct Conv3dConfiguration {
conv::SplitKMode split_k_mode{};
/// Conv2d problem size
// contains strictly conv2d size (N,D,H,W,C,K,T,R,S,Z,P,Q,padding,stride,dilation,mode)
// also includes (split_k_slices, groups)
conv::Conv3dProblemSize problem_size{};
/// Layout object for activations tensor
layout::TensorNDHWC layout_activations{};
/// Layout object for filters tensor
layout::TensorNDHWC layout_filters{};
/// Layout object for source tensor
layout::TensorNDHWC layout_source{};
/// Layout object for output tensor
layout::TensorNDHWC layout_output{};
//
// Methods
//
// Mapping functions (A,B,C -> activation,filter,output)
layout::TensorNDHWC layout_a(library::ConvKind const &conv_kind) const {
switch (conv_kind) {
case library::ConvKind::kFprop: return layout_activations;
case library::ConvKind::kDgrad: return layout_output;
case library::ConvKind::kWgrad: return layout_output;
default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)");
}
}
layout::TensorNDHWC layout_b(library::ConvKind const &conv_kind) const {
switch (conv_kind) {
case library::ConvKind::kFprop: return layout_filters;
case library::ConvKind::kDgrad: return layout_filters;
case library::ConvKind::kWgrad: return layout_activations;
default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)");
}
}
layout::TensorNDHWC layout_c(library::ConvKind const &conv_kind) const {
switch (conv_kind) {
case library::ConvKind::kFprop: return layout_output;
case library::ConvKind::kDgrad: return layout_activations;
case library::ConvKind::kWgrad: return layout_filters;
default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)");
}
}
};
/// Arguments for CONV
struct ConvArguments {
/////////////////////////////////////////////////////////
/// ImplicitGemm matrices A, B, C, D
/////////////////////////////////////////////////////////
/// pointer to implicit gemm matrix A
void const *A{nullptr};
/// pointer to implicit gemm matrix B
void const *B{nullptr};
/// pointer to reordered matrix B
void const *reordered_B{nullptr};
/// pointer to implicit gemm matrix C
void const *C{nullptr};
/// pointer to implicit gemm destination matrix D
void *D{nullptr};
/// Host or device pointer to alpha scalar
void const *alpha{nullptr};
/// Host or device pointer to beta scalar
void const *beta{nullptr};
/// Enumerant indicating whether alpha/beta point to host or device memory
ScalarPointerMode pointer_mode{};
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Configuration for Reduction operations
//
// OperationKind: Reduction
//
struct ReductionConfiguration {
/// Reduction problem size
MatrixCoord problem_size{};
/// Number of partitions to reduce
int partitions{0};
/// Number of elements between each partition
int64_t partition_stride{0};
/// leading dimension of 'w'orkspace operand
int64_t ldw{0};
/// leading dimension of 's'ource operand
int64_t lds{0};
/// leading dimension of 'd'estination operand
int64_t ldd{0};
};
/// Arguments for Reduction
struct ReductionArguments {
/// Pointer to workspace matrix
void const *workspace{nullptr};
/// Pointer to source matrix
void const *source{nullptr};
/// Pointer to destination matrix
void *destination{nullptr};
/// pointer to reference matrix
void *reference{nullptr};
/// Host or device pointer to alpha scalar
void const *alpha{nullptr};
/// Host or device pointer to beta scalar
void const *beta{nullptr};
/// Enumerant indicating whether alpha/beta point to host or device memory
ScalarPointerMode pointer_mode{};
};
} // namespace library
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| tools/library/include/cutlass/library/library.h/0 | {
"file_path": "tools/library/include/cutlass/library/library.h",
"repo_id": "tools",
"token_count": 6719
} | 61 |
/******************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#pragma once
/**
* \file
* \brief cuda kernels to transform a device memory tensor from NCHW layout to NHWC layout.
*/
#include "cutlass/cutlass.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/tensor_ref.h"
namespace cutlass {
/** \brief interface to transform a device memory tensor from NCHW layout to NHWC layout.
* \tparam T: data type
*/
template <typename T>
void nchw_to_nhwc(cutlass::Tensor4DCoord input_tensor_size,
cutlass::Tensor4DCoord output_tensor_size,
TensorRef<T, layout::TensorNCHW> ref_input,
TensorRef<T, layout::TensorNHWC> ref_output,
cudaStream_t stream);
template <typename T>
__global__ void nchw_to_nhwc_kernel(T *output,
const T *input,
const int n,
const int h,
const int w,
const int c) {
const int hw = h*w;
const int chw = c*hw;
__shared__ T shbuf[32 * (32 + 1)];
const int32_t tid = threadIdx.y*blockDim.x + threadIdx.x;
const int32_t wid = tid / 32;
const int32_t lid = tid % 32;
const int32_t ni = blockIdx.z;
const int32_t ci0 = blockIdx.y * 32;
const int32_t hwi0 = blockIdx.x * 32;
const size_t input_idx = ni * chw + (ci0 + wid) * hw + hwi0;
const T *A = input + input_idx;
if (hwi0 + lid < hw) {
const int lid_x_33 = lid * 33;
if ((ci0 + 32) <= c) {
int ci = wid; // between 0 and 7
CUTLASS_PRAGMA_UNROLL
for (int cLoopIdx = 0; cLoopIdx < 4; cLoopIdx++) {
shbuf[lid_x_33 + ci] = A[lid];
A = &A[8 * hw];
ci += 8;
}
} else {
for (int ci = wid; ci < 32; ci += 8) {
if ((ci + ci0) < c) {
shbuf[lid_x_33 + ci] = A[lid];
}
A = &A[8 * hw];
}
}
}
__syncthreads();
const int32_t ciOut = ci0 + lid;
output = &output[ni * chw + ciOut];
if (ciOut < c) {
if (hwi0 + 32 < hw) {
int hwI = wid;
CUTLASS_PRAGMA_UNROLL
for (int hwLoopIdx = 0; hwLoopIdx < 4; ++hwLoopIdx) {
output[(hwi0 + hwI) * c] = shbuf[(hwI)*33 + lid];
hwI += 8;
}
} else {
for (int hwI = wid; hwI < 32; hwI += 8) {
if (hwi0 + hwI < hw) {
output[(hwi0 + hwI) * c] = shbuf[(hwI)*33 + lid];
}
}
}
}
}
template <typename T>
void nchw_to_nhwc(cutlass::Tensor4DCoord input_tensor_size,
cutlass::Tensor4DCoord output_tensor_size,
TensorRef<T, layout::TensorNCHW> ref_input,
TensorRef<T, layout::TensorNHWC> ref_output,
cudaStream_t stream) {
assert(
input_tensor_size.n() == output_tensor_size.n() &&
input_tensor_size.c() == output_tensor_size.h() &&
input_tensor_size.h() == output_tensor_size.w() &&
input_tensor_size.w() == output_tensor_size.c());
int n = output_tensor_size.n();
int h = output_tensor_size.h();
int w = output_tensor_size.w();
int c = output_tensor_size.c();
dim3 grid((h*w + 31)/32, (c + 31)/32, n);
dim3 block(32, 8);
nchw_to_nhwc_kernel<<<grid, block, 0, stream>>>(ref_output.data(), ref_input.data(),
n, h, w, c);
}
} //namespace cutlass
| tools/util/include/cutlass/util/device_nchw_to_nhwc.h/0 | {
"file_path": "tools/util/include/cutlass/util/device_nchw_to_nhwc.h",
"repo_id": "tools",
"token_count": 2302
} | 62 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <array>
#include <cassert>
#include <cmath>
#include <iostream>
#include <type_traits>
#include <cute/util/type_traits.hpp>
#include <cute/tensor.hpp>
#include <cute/numeric/numeric_types.hpp>
#include <cute/numeric/complex.hpp>
#include <cutlass/layout/layout.h>
// The computed infinity norm does not include
// any NaN column absolute-value sums.
struct matrix_inf_norm_result {
// Accumulate errors in double, as this is generally
// the highest precision that the examples use.
double inf_norm = 0.0;
bool found_nan = false;
};
// In theory, cute::Tensor<ViewEngine<T*>, T> could be treated as a view type,
// and thus passed by value (as std::span or std::string_view would be).
// However, generic cute::Tensor are more like containers
// and thus are best passed by reference or const reference.
template <typename EngineType, typename LayoutType>
matrix_inf_norm_result
matrix_inf_norm(cute::Tensor<EngineType, LayoutType> const& host_matrix)
{
using error_type = decltype(std::declval<matrix_inf_norm_result>().inf_norm);
using element_type = typename EngineType::value_type;
error_type inf_norm = 0.0;
bool found_nan = false;
// Computing the infinity norm requires that we be able
// to treat the input as a matrix, with rows and columns.
const int64_t num_rows = cute::size<0>(host_matrix);
const int64_t num_cols = cute::size<1>(host_matrix);
auto abs_fn = [] (element_type A_ij) {
if constexpr (not std::is_unsigned_v<element_type>) {
using std::abs;
return abs(A_ij);
}
else {
return A_ij;
}
};
for (int64_t i = 0; i < num_rows; ++i) {
error_type row_abs_sum = 0.0;
for(int64_t j = 0; j < num_cols; ++j) {
row_abs_sum += abs_fn(host_matrix(i, j));
}
if (std::isnan(row_abs_sum)) {
found_nan = true;
}
else {
inf_norm = row_abs_sum > inf_norm ? row_abs_sum : inf_norm;
}
}
return {inf_norm, found_nan};
}
// Infinity norm of (X - Y).
template <typename EngineType, typename LayoutType>
matrix_inf_norm_result
matrix_diff_inf_norm(cute::Tensor<EngineType, LayoutType> const& X,
cute::Tensor<EngineType, LayoutType> const& Y)
{
using error_type = decltype(std::declval<matrix_inf_norm_result>().inf_norm);
using element_type = typename EngineType::value_type;
auto abs_fn = [] (element_type A_ij) {
if constexpr (not std::is_unsigned_v<element_type>) {
using std::abs;
return abs(A_ij);
}
else {
return A_ij;
}
};
assert(cute::size<0>(X) == cute::size<0>(Y));
assert(cute::size<1>(X) == cute::size<1>(Y));
// Computing the infinity norm requires that we be able
// to treat the input as a matrix, with rows and columns.
const int64_t num_rows = cute::size<0>(X);
const int64_t num_cols = cute::size<1>(X);
error_type inf_norm = 0.0;
bool found_nan = false;
for (int64_t i = 0; i < num_rows; ++i) {
error_type row_abs_sum = 0.0;
for (int64_t j = 0; j < num_cols; ++j) {
row_abs_sum += error_type(abs_fn(element_type(X(i,j)) -
element_type(Y(i,j))));
}
if (std::isnan(row_abs_sum)) {
found_nan = true;
}
else {
inf_norm = row_abs_sum > inf_norm ? row_abs_sum : inf_norm;
}
}
return {inf_norm, found_nan};
}
template <typename EngineType_A, typename LayoutType_A,
typename EngineType_B, typename LayoutType_B,
typename EngineType_C, typename LayoutType_C,
typename EngineType_C_ref, typename LayoutType_C_ref>
auto
print_matrix_multiply_mollified_relative_error(
char const A_value_type_name[],
cute::Tensor<EngineType_A, LayoutType_A> const& A,
char const B_value_type_name[],
cute::Tensor<EngineType_B, LayoutType_B> const& B,
char const C_value_type_name[],
cute::Tensor<EngineType_C, LayoutType_C> const& C,
cute::Tensor<EngineType_C_ref, LayoutType_C_ref> const& C_ref)
{
const auto [A_norm, A_has_nan] = matrix_inf_norm(A);
const auto [B_norm, B_has_nan] = matrix_inf_norm(B);
const auto [C_norm, C_has_nan] = matrix_inf_norm(C_ref);
const auto [diff_norm, diff_has_nan] = matrix_diff_inf_norm(C, C_ref);
const auto A_norm_times_B_norm = A_norm * B_norm;
const auto relative_error = A_norm_times_B_norm == 0.0 ?
diff_norm : (diff_norm / A_norm_times_B_norm);
// For expected error bounds, please refer to the LAPACK Users' Guide,
// in particular https://netlib.org/lapack/lug/node108.html .
// Printing the infinity norm of C is a way to check
// that both the function being tested (C)
// and the reference implementation (C_ref)
// don't just do nothing (or fill with zeros).
using std::cout;
using cute::shape;
cout << "Matrix A: " << shape<0>(A) << "x" << shape<1>(A) << " of " << A_value_type_name << '\n'
<< "Matrix B: " << shape<0>(B) << "x" << shape<1>(B) << " of " << B_value_type_name << '\n'
<< "Matrix C: " << shape<0>(C) << "x" << shape<1>(C) << " of " << C_value_type_name << '\n'
<< std::scientific
<< "Infinity norm of A: " << A_norm << '\n'
<< "Infinity norm of B: " << B_norm << '\n'
<< "Infinity norm of C: " << C_norm << '\n'
<< "Infinity norm of (C - C_ref): " << diff_norm << '\n';
if(A_norm_times_B_norm == 0.0) {
cout << "Mollified relative error: " << relative_error << '\n';
} else {
cout << "Relative error: " << relative_error << '\n';
}
if (A_has_nan || B_has_nan || C_has_nan || diff_has_nan) {
cout << "Did we encounter NaN in A? " << (A_has_nan ? "yes" : "no") << '\n'
<< "Did we encounter NaN in B? " << (B_has_nan ? "yes" : "no") << '\n'
<< "Did we encounter NaN in C? " << (C_has_nan ? "yes" : "no") << '\n'
<< "Did we encounter NaN in (C - C_ref)? " << (diff_has_nan ? "yes" : "no") << '\n';
}
return relative_error;
}
template <typename EngineType, typename LayoutType>
auto
print_matrix_multiply_mollified_relative_error(
const char value_type_name[],
const cute::Tensor<EngineType, LayoutType>& A,
const cute::Tensor<EngineType, LayoutType>& B,
const cute::Tensor<EngineType, LayoutType>& C_computed,
const cute::Tensor<EngineType, LayoutType>& C_expected)
{
return print_matrix_multiply_mollified_relative_error(value_type_name, A, value_type_name, B,
value_type_name, C_computed, C_expected);
}
// Take a CUTLASS HostTensor (or the like) as input,
// and return a const CuTe Tensor.
// This is useful for use with the above error printing functions.
// This implicitly "transposes" if the layout is RowMajor.
// Note that the HostTensor must be captured by nonconst reference
// in order for X.host_ref().data() to compile.
// (CUTLASS is a bit more container-y than CuTe.)
template<class CutlassHostTensorType>
auto host_matrix_to_const_cute_tensor(CutlassHostTensorType& X)
{
// The tensors were created with post-transposed extents.
const auto extents = X.extent();
const auto shape = cute::Shape<int, int>{extents[0], extents[1]};
// Both RowMajor and ColumnMajor only store one stride.
const int LDX = X.stride(0);
const auto strides = [&]() {
using input_layout_type = typename std::decay_t<decltype(X)>::Layout;
if constexpr (std::is_same_v<input_layout_type, cutlass::layout::ColumnMajor>) {
return cute::Stride<int, int>{1, LDX};
}
else {
static_assert(std::is_same_v<input_layout_type, cutlass::layout::RowMajor>);
return cute::Stride<int, int>{LDX, 1};
}
}();
const auto layout = cute::make_layout(shape, strides);
auto X_data = X.host_ref().data();
auto X_data_const = const_cast<std::add_const_t< decltype(X_data)> >(X_data);
return cute::make_tensor(X_data_const, layout);
};
// Returns EXIT_SUCCESS if the 2-norm relative error is exactly zero, else returns EXIT_FAILURE.
// This makes the return value suitable as the return value of main().
template <typename T1, typename T2>
int
print_relative_error(
std::size_t n,
T1 const& data,
T2 const& reference,
bool print_verbose = false,
bool print_error = true,
double error_margin = 0.00001) {
using std::abs; using std::sqrt;
// Use either double or complex<double> for error computation
using value_type = cute::remove_cvref_t<decltype(reference[0])>;
using error_type = std::conditional_t<cute::is_complex<value_type>::value,
cute::complex<double>,
double>;
if (print_verbose) {
std::cout << "Idx:\t"<< "Val\t" << "RefVal\t" << "RelError" << std::endl;
}
double eps = 1e-200;
double tot_error_sq = 0;
double tot_norm_sq = 0;
double tot_ind_rel_err = 0;
double max_ind_rel_err = 0;
double max_diff = 0;
for (std::size_t i = 0; i < n; ++i) {
error_type val = data[i];
error_type ref = reference[i];
double aref = abs(ref);
double diff = abs(ref - val);
double rel_error = diff / (aref + eps);
// Individual relative error
tot_ind_rel_err += rel_error;
// Maximum relative error
max_ind_rel_err = std::max(max_ind_rel_err, rel_error);
// Maximum delta in value error
max_diff = std::max(max_diff, diff);
// Total relative error
tot_error_sq += diff * diff;
tot_norm_sq += aref * aref;
if (print_verbose) {
std::cout << i << ":\t" << val << "\t" << ref << "\t" << rel_error << std::endl;
}
}
double ave_rel_err = tot_ind_rel_err / double(n);
if (print_error) {
printf("Average relative error: %.3e\n", ave_rel_err);
}
if (print_error) {
printf("Maximum relative error: %.3e\n", max_ind_rel_err);
}
if (print_error) {
printf("Maximum difference : %.3e\n", max_diff);
}
double tot_rel_err = sqrt(tot_error_sq/(tot_norm_sq+eps));
if (print_error) {
printf("Vector relative error: %.3e\n", tot_rel_err);
}
printf("Vector reference norm: %.3e\n", sqrt(tot_norm_sq));
return (tot_rel_err <= error_margin) ? EXIT_SUCCESS : EXIT_FAILURE;
}
// Overload for cute::Tensor<>
template <class Engine, class Layout>
int
print_relative_error(
cute::Tensor<Engine, Layout> data,
cute::Tensor<Engine, Layout> reference,
bool print_verbose = false,
bool print_error = true,
double error_margin = 0.00001) {
assert(size(data) == size(reference));
return print_relative_error(static_cast<std::size_t>(size(data)),
data, reference,
print_verbose, print_error, error_margin);
}
| tools/util/include/cutlass/util/print_error.hpp/0 | {
"file_path": "tools/util/include/cutlass/util/print_error.hpp",
"repo_id": "tools",
"token_count": 4754
} | 63 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Defines host-side elementwise operations on TensorView.
*/
#pragma once
// Standard Library includes
#include <utility>
// Cutlass includes
#include "cutlass/cutlass.h"
#include "tensor_foreach.h"
namespace cutlass {
namespace reference {
namespace host {
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
/// Helper to convert between types
template <
typename DstElement,
typename SrcElement
>
struct TrivialConvert {
TrivialConvert() { }
DstElement operator()(SrcElement src) const {
return DstElement(src);
}
};
/// Helper to conditionally copy between tensor views.
template <
typename DstElement,
typename DstLayout,
typename SrcElement,
typename SrcLayout,
typename F
>
struct TensorCopyIf {
using DstTensorView = TensorView<DstElement, DstLayout>;
using SrcTensorView = TensorView<SrcElement, SrcLayout>;
//
// Data members
//
DstTensorView dst;
SrcTensorView src;
F convert;
//
// Methods
//
TensorCopyIf() { }
TensorCopyIf(
DstTensorView const &dst_,
SrcTensorView const &src_,
F const &convert_): dst(dst_), src(src_), convert(convert_) {}
/// Copies based on destination and source bounds
void operator()(Coord<DstLayout::kRank> const &coord) {
if (dst.contains(coord) && src.contains(coord)) {
dst.at(coord) = convert(src.at(coord));
}
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Copies elements from one tensor view into another, satisfying bounds of each tensor.
template <
typename DstElement, /// Destination tensor's element type
typename DstLayout, /// Destination tensor's layout
typename SrcElement, /// Source tensor's element type
typename SrcLayout, /// Source tensor's layout
typename F /// Transformation functor
>
void TensorCopy(
TensorView<DstElement, DstLayout> dst,
TensorView<SrcElement, SrcLayout> src,
F const &transform) {
using CopyIf = detail::TensorCopyIf<
DstElement,
DstLayout,
SrcElement,
SrcLayout,
F>;
CopyIf copy_if(dst, src, transform);
TensorForEach(dst.extent(), copy_if);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Copies elements from a TensorRef into a TensorView. Assumes source tensor has sufficient extent
/// to avoid out of bounds accesses.
template <
typename DstElement, /// Destination tensor's element type
typename DstLayout, /// Destination tensor's layout
typename SrcElement, /// Source tensor's element type
typename SrcLayout, /// Source tensor's layout
typename F /// Transformation functor
>
void TensorCopy(
TensorView<DstElement, DstLayout> dst,
TensorRef<SrcElement, SrcLayout> src,
F const &transform) {
using CopyIf = detail::TensorCopyIf<
DstElement,
DstLayout,
SrcElement,
SrcLayout,
F>;
TensorView<SrcElement, SrcLayout> src_view(src, dst.extent());
CopyIf copy_if(dst, src_view, transform);
TensorForEach(dst.extent(), copy_if);
}
/// Copies elements from a TensorRef into a TensorView. Assumes source tensor has sufficient extent
/// to avoid out of bounds accesses.
template <
typename DstElement, /// Destination tensor's element type
typename DstLayout, /// Destination tensor's layout
typename SrcElement, /// Source tensor's element type
typename SrcLayout, /// Source tensor's layout
typename F /// Transformation functor
>
void TensorCopy(
TensorRef<DstElement, DstLayout> dst,
TensorView<SrcElement, SrcLayout> src,
F const &transform) {
using CopyIf = detail::TensorCopyIf<
DstElement,
DstLayout,
SrcElement,
SrcLayout,
F>;
TensorView<DstElement, DstLayout> dst_view(dst, src.extent());
CopyIf copy_if(dst_view, src, transform);
TensorForEach(src.extent(), copy_if);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Copies elements from one tensor view into another, satisfying bounds of each tensor. Succeeds
/// if SrcElement can be converted to DstElement.
template <
typename DstElement, /// Destination tensor's element type
typename DstLayout, /// Destination tensor's layout
typename SrcElement, /// Source tensor's element type
typename SrcLayout /// Source tensor's layout
>
void TensorCopy(
TensorView<DstElement, DstLayout> dst,
TensorView<SrcElement, SrcLayout> src) {
detail::TrivialConvert<DstElement, SrcElement> convert;
TensorCopy(dst, src, convert);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Copies elements from one tensor view into another, satisfying bounds of each tensor. Succeeds
/// if SrcElement can be converted to DstElement.
template <
typename DstElement, /// Destination tensor's element type
typename DstLayout, /// Destination tensor's layout
typename SrcElement, /// Source tensor's element type
typename SrcLayout, /// Source tensor's layout
typename F /// Transformation functor
>
void TensorCopy(
TensorView<DstElement, DstLayout> dst,
TensorRef<SrcElement, SrcLayout> src) {
detail::TrivialConvert<DstElement, SrcElement> convert;
TensorCopy(dst, src, convert);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Copies elements from one tensor view into another, satisfying bounds of each tensor. Succeeds
/// if SrcElement can be converted to DstElement.
template <
typename DstElement, /// Destination tensor's element type
typename DstLayout, /// Destination tensor's layout
typename SrcElement, /// Source tensor's element type
typename SrcLayout /// Source tensor's layout
>
void TensorCopy(
TensorRef<DstElement, DstLayout> dst,
TensorView<SrcElement, SrcLayout> src) {
detail::TrivialConvert<DstElement, SrcElement> convert;
TensorCopy(dst, src, convert);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace host
} // namespace reference
} // namespace cutlass
| tools/util/include/cutlass/util/reference/host/tensor_copy.h/0 | {
"file_path": "tools/util/include/cutlass/util/reference/host/tensor_copy.h",
"repo_id": "tools",
"token_count": 2602
} | 64 |
var searchData=
[
['kind',['Kind',['../structcutlass_1_1Distribution.html#a499f4023e0d42356ce71d38cc32bf92a',1,'cutlass::Distribution']]]
];
| docs/search/enums_3.js/0 | {
"file_path": "docs/search/enums_3.js",
"repo_id": "docs",
"token_count": 61
} | 0 |
var searchData=
[
['debug_2eh',['debug.h',['../tools_2util_2include_2cutlass_2util_2debug_8h.html',1,'']]],
['gemm_2eh',['gemm.h',['../tools_2util_2include_2cutlass_2util_2reference_2device_2gemm_8h.html',1,'']]],
['gemm_2eh',['gemm.h',['../tools_2util_2include_2cutlass_2util_2reference_2device_2thread_2gemm_8h.html',1,'']]],
['gemm_2eh',['gemm.h',['../tools_2util_2include_2cutlass_2util_2reference_2host_2gemm_8h.html',1,'']]],
['gemm_2eh',['gemm.h',['../tools_2util_2include_2cutlass_2util_2reference_2device_2kernel_2gemm_8h.html',1,'']]],
['gemm_5fcomplex_2eh',['gemm_complex.h',['../tools_2util_2include_2cutlass_2util_2reference_2host_2gemm__complex_8h.html',1,'']]],
['matrix_2eh',['matrix.h',['../thread_2matrix_8h.html',1,'']]],
['predicated_5ftile_5fiterator_2eh',['predicated_tile_iterator.h',['../transform_2threadblock_2predicated__tile__iterator_8h.html',1,'']]],
['tensor_2eh',['tensor.h',['../tensor_8h.html',1,'']]],
['tensor_5fcoord_2eh',['tensor_coord.h',['../tensor__coord_8h.html',1,'']]],
['tensor_5fcopy_2eh',['tensor_copy.h',['../tensor__copy_8h.html',1,'']]],
['tensor_5fnorm_2eh',['tensor_norm.h',['../tensor__norm_8h.html',1,'']]],
['tensor_5fop_5fmultiplicand_5fsm70_2eh',['tensor_op_multiplicand_sm70.h',['../tensor__op__multiplicand__sm70_8h.html',1,'']]],
['tensor_5fop_5fmultiplicand_5fsm75_2eh',['tensor_op_multiplicand_sm75.h',['../tensor__op__multiplicand__sm75_8h.html',1,'']]],
['tensor_5fop_5fpolicy_2eh',['tensor_op_policy.h',['../tensor__op__policy_8h.html',1,'']]],
['tensor_5fref_2eh',['tensor_ref.h',['../tensor__ref_8h.html',1,'']]],
['tensor_5fview_2eh',['tensor_view.h',['../tensor__view_8h.html',1,'']]],
['tensor_5fview_5fio_2eh',['tensor_view_io.h',['../tensor__view__io_8h.html',1,'']]],
['tile_5fiterator_5fsimt_2eh',['tile_iterator_simt.h',['../tile__iterator__simt_8h.html',1,'']]],
['tile_5fiterator_5ftensor_5fop_2eh',['tile_iterator_tensor_op.h',['../tile__iterator__tensor__op_8h.html',1,'']]],
['tile_5fiterator_5fvolta_5ftensor_5fop_2eh',['tile_iterator_volta_tensor_op.h',['../tile__iterator__volta__tensor__op_8h.html',1,'']]],
['tile_5fiterator_5fwmma_5ftensor_5fop_2eh',['tile_iterator_wmma_tensor_op.h',['../tile__iterator__wmma__tensor__op_8h.html',1,'']]],
['transpose_2eh',['transpose.h',['../transpose_8h.html',1,'']]],
['type_5ftraits_2eh',['type_traits.h',['../type__traits_8h.html',1,'']]]
];
| docs/search/files_11.js/0 | {
"file_path": "docs/search/files_11.js",
"repo_id": "docs",
"token_count": 1119
} | 1 |
var searchData=
[
['half_2eh',['half.h',['../half_8h.html',1,'']]],
['host_5freorder_2eh',['host_reorder.h',['../host__reorder_8h.html',1,'']]],
['host_5ftensor_2eh',['host_tensor.h',['../host__tensor_8h.html',1,'']]],
['tensor_5fcompare_2eh',['tensor_compare.h',['../host_2tensor__compare_8h.html',1,'']]],
['tensor_5felementwise_2eh',['tensor_elementwise.h',['../host_2tensor__elementwise_8h.html',1,'']]],
['tensor_5ffill_2eh',['tensor_fill.h',['../host_2tensor__fill_8h.html',1,'']]],
['tensor_5fforeach_2eh',['tensor_foreach.h',['../host_2tensor__foreach_8h.html',1,'']]]
];
| docs/search/files_7.js/0 | {
"file_path": "docs/search/files_7.js",
"repo_id": "docs",
"token_count": 276
} | 2 |
var searchData=
[
['real_2eh',['real.h',['../real_8h.html',1,'']]],
['reduce_2eh',['reduce.h',['../reduce_8h.html',1,'']]],
['reduce_5fsplit_5fk_2eh',['reduce_split_k.h',['../reduce__split__k_8h.html',1,'']]],
['reduction_5fop_2eh',['reduction_op.h',['../reduction__op_8h.html',1,'']]],
['reduction_5foperators_2eh',['reduction_operators.h',['../reduction__operators_8h.html',1,'']]],
['regular_5ftile_5faccess_5fiterator_2eh',['regular_tile_access_iterator.h',['../regular__tile__access__iterator_8h.html',1,'']]],
['regular_5ftile_5faccess_5fiterator_5fpitch_5flinear_2eh',['regular_tile_access_iterator_pitch_linear.h',['../regular__tile__access__iterator__pitch__linear_8h.html',1,'']]],
['regular_5ftile_5faccess_5fiterator_5ftensor_5fop_2eh',['regular_tile_access_iterator_tensor_op.h',['../regular__tile__access__iterator__tensor__op_8h.html',1,'']]],
['regular_5ftile_5fiterator_2eh',['regular_tile_iterator.h',['../regular__tile__iterator_8h.html',1,'']]],
['regular_5ftile_5fiterator_5fpitch_5flinear_2eh',['regular_tile_iterator_pitch_linear.h',['../regular__tile__iterator__pitch__linear_8h.html',1,'']]],
['regular_5ftile_5fiterator_5fpitch_5flinear_5f2dthreadtile_2eh',['regular_tile_iterator_pitch_linear_2dthreadtile.h',['../regular__tile__iterator__pitch__linear__2dthreadtile_8h.html',1,'']]],
['regular_5ftile_5fiterator_5ftensor_5fop_2eh',['regular_tile_iterator_tensor_op.h',['../regular__tile__iterator__tensor__op_8h.html',1,'']]],
['regular_5ftile_5fiterator_5ftensor_5fop_5fsm70_2eh',['regular_tile_iterator_tensor_op_sm70.h',['../regular__tile__iterator__tensor__op__sm70_8h.html',1,'']]],
['relatively_5fequal_2eh',['relatively_equal.h',['../relatively__equal_8h.html',1,'']]],
['threadblock_5fswizzle_2eh',['threadblock_swizzle.h',['../reduction_2threadblock__swizzle_8h.html',1,'']]]
];
| docs/search/files_f.js/0 | {
"file_path": "docs/search/files_f.js",
"repo_id": "docs",
"token_count": 767
} | 3 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief CUTLASS layout visualization example
*/
#include <map>
#include <memory>
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor_op_multiplicand_sm70.h"
#include "cutlass/layout/tensor_op_multiplicand_sm75.h"
#include "cutlass/layout/tensor_op_multiplicand_sm80.h"
#include "visualize_layout.h"
#include "register_layout.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
void RegisterLayouts(std::map<std::string, std::unique_ptr<VisualizeLayoutBase> > &layouts) {
struct {
char const *name;
VisualizeLayoutBase *ptr;
} layout_pairs[] = {
{"PitchLinear", new VisualizeLayout<cutlass::layout::PitchLinear>},
{"ColumnMajor", new VisualizeLayout<cutlass::layout::ColumnMajor>},
{"RowMajor", new VisualizeLayout<cutlass::layout::RowMajor>},
{"ColumnMajorInterleaved<4>",
new VisualizeLayout<cutlass::layout::ColumnMajorInterleaved<4>>},
{"RowMajorInterleaved<4>",
new VisualizeLayout<cutlass::layout::RowMajorInterleaved<4>>},
// All Ampere/Turing H/Integer matrix multiply tensor core kernels uses the same swizzling
// layout implementation with different templates.
//
// mma.sync.aligned.m8n8k128.s32.b1.b1.s32 Interleaved-256
// mma.sync.aligned.m16n8k256.s32.b1.b1.s32 Interleaved-256
{"TensorOpMultiplicand<1,256>",
new VisualizeLayout<cutlass::layout::TensorOpMultiplicand<1, 256>>},
// mma.sync.aligned.m8n8k128.s32.b1.b1.s32 TN kblock512
// mma.sync.aligned.m16n8k256.s32.b1.b1.s32 TN kblock512
{"TensorOpMultiplicand<1,512>",
new VisualizeLayout<cutlass::layout::TensorOpMultiplicand<1, 512>>},
// mma.sync.aligned.m16n8k256.s32.b1.b1.s32 TN kblock1024
{"TensorOpMultiplicand<1,1024>",
new VisualizeLayout<cutlass::layout::TensorOpMultiplicand<1, 1024>>},
// Integer matrix multiply.int4 8832 Interleaved-64
// Integer matrix multiply.int4 16864 Interleaved-64
{"TensorOpMultiplicand<4,64>",
new VisualizeLayout<cutlass::layout::TensorOpMultiplicand<4, 64>>},
// Integer matrix multiply.int4 8832 TN kblock128
// Integer matrix multiply.int4 16864 TN kblock128
{"TensorOpMultiplicand<4,128>",
new VisualizeLayout<cutlass::layout::TensorOpMultiplicand<4, 128>>},
// Integer matrix multiply.int4 16864 TN kblock256
{"TensorOpMultiplicand<4,256>",
new VisualizeLayout<cutlass::layout::TensorOpMultiplicand<4, 256>>},
// Integer matrix multiply 8816 Interleaved-32
// Integer matrix multiply 16832 Interleaved-32
{"TensorOpMultiplicand<8,32>",
new VisualizeLayout<cutlass::layout::TensorOpMultiplicand<8, 32>>},
// Integer matrix multiply 8816 TN kblock64
// Integer matrix multiply 16832 TN kblock64
{"TensorOpMultiplicand<8,64>",
new VisualizeLayout<cutlass::layout::TensorOpMultiplicand<8, 64>>},
// Integer matrix multiply 16832 TN kblock128
{"TensorOpMultiplicand<8,128>",
new VisualizeLayout<cutlass::layout::TensorOpMultiplicand<8, 128>>},
// Matrix Multiply 1688 TN kblock32
// Matrix multiply 16816 TN kblock32
{"TensorOpMultiplicand<16,32>",
new VisualizeLayout<cutlass::layout::TensorOpMultiplicand<16, 32>>},
// Matrix multiply 1688 NT
// Matrix multiply 16816 NT
// Matrix multiply 16816 TN kblock64
{"TensorOpMultiplicand<16,64>",
new VisualizeLayout<cutlass::layout::TensorOpMultiplicand<16, 64>>},
// Matrix multiply 1688.TF32 TN kblock16
{"TensorOpMultiplicand<32,16>",
new VisualizeLayout<cutlass::layout::TensorOpMultiplicand<32, 16>>},
// Matrix multiply 1688.TF32 TN kblock32
{"TensorOpMultiplicand<32,32>",
new VisualizeLayout<cutlass::layout::TensorOpMultiplicand<32, 32>>},
// Matrix multiply 1688 NT
{"TensorOpMultiplicandCongruous<32,32>",
new VisualizeLayout<
cutlass::layout::TensorOpMultiplicandCongruous<32, 32>>},
// Matrix multiply 884 NT
{"TensorOpMultiplicandCongruous<64,16>",
new VisualizeLayout<
cutlass::layout::TensorOpMultiplicandCongruous<64, 16>>},
// Matrix multiply 884 TN
{"TensorOpMultiplicand64bCrosswise",
new VisualizeLayout<cutlass::layout::TensorOpMultiplicand64bCrosswise>},
{"TensorOpMultiplicandCongruous<128,4>",
new VisualizeLayout<
cutlass::layout::TensorOpMultiplicandCongruous<128, 4>>},
{"TensorOpMultiplicandCrosswise<128,4>",
new VisualizeLayout<
cutlass::layout::TensorOpMultiplicandCrosswise<128, 4>>},
{"VoltaTensorOpMultiplicandCongruous<16>",
new VisualizeLayout<
cutlass::layout::VoltaTensorOpMultiplicandCongruous<16>>},
{"VoltaTensorOpMultiplicandCrosswise<16,32>",
new VisualizeLayout<
cutlass::layout::VoltaTensorOpMultiplicandCrosswise<16, 32>>}
};
for (auto layout : layout_pairs) {
layouts.emplace(std::string(layout.name), std::unique_ptr<VisualizeLayoutBase>(layout.ptr));
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/03_visualize_layout/register_layout.cu/0 | {
"file_path": "examples/03_visualize_layout/register_layout.cu",
"repo_id": "examples",
"token_count": 2565
} | 4 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/semaphore.h"
#include "kernel/b2b_gemm_grouped_problem_visitor.h"
#include "threadblock/grouped_threadblock_swizzle.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
namespace detail {
/// Utility struct for returning the type of the problem visitor used by the swizzling function,
/// if it is a grouped swizzling function, or a default visitor. This is used only for defining
/// the parameters of the problem visitor used in GroupedParams.
template <
typename B2bMma_,
typename ThreadblockSwizzle_,
typename Enable = void
>
struct ProblemVisitorOrDefault;
/// Return a generic problem visitor for GEMM problems
template <
typename B2bMma_,
typename ThreadblockSwizzle_
>
struct ProblemVisitorOrDefault<B2bMma_,
ThreadblockSwizzle_,
typename platform::enable_if<
! cutlass::gemm::threadblock::detail::IsGroupedSwizzle<ThreadblockSwizzle_>::value
>::type> {
using value = B2bGemmGroupedProblemVisitor<typename B2bMma_::Shape,
GroupScheduleMode::kDeviceOnly,
128,
128,
platform::is_same<typename B2bMma_::LayoutC,
cutlass::layout::ColumnMajor>::value>;
};
/// Return the problem visitor specified by the swizzling function
template <
typename B2bMma_,
typename ThreadblockSwizzle_
>
struct ProblemVisitorOrDefault<B2bMma_,
ThreadblockSwizzle_,
typename platform::enable_if<
cutlass::gemm::threadblock::detail::IsGroupedSwizzle<ThreadblockSwizzle_>::value
>::type> {
using value = typename ThreadblockSwizzle_::ProblemVisitor;
};
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename B2bMma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_ ///! Threadblock swizzling function
>
struct B2bGemm {
using B2bMma = B2bMma_;
using Epilogue = Epilogue_;
using OutputOp0 = typename B2bMma::OutputOp;
using OutputOp1 = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using ElementA0 = typename B2bMma::IteratorA0::Element;
using LayoutA0 = typename B2bMma::IteratorA0::Layout;
using ElementB0 = typename B2bMma::IteratorB0::Element;
using LayoutB0 = typename B2bMma::IteratorB0::Layout;
using ElementB1 = typename B2bMma::IteratorB1::Element;
using LayoutB1 = typename B2bMma::IteratorB1::Layout;
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename Epilogue::OutputTileIterator::Layout;
using ScaleBiasData = typename B2bMma::IteratorAccumulatorScaleBias::Element;
/// Data types needed for higher-level containers. In some cases, a single type must be exposed
/// despite the B2b GEMM using two GEMMs under the hood. In such cases, we select the values from
/// the second GEMM (other than for ElementA/ElementB)
using ElementA = typename B2bMma::IteratorA0::Element;
using LayoutA = typename B2bMma::IteratorA0::Layout;
using ElementB = typename B2bMma::IteratorB0::Element;
using LayoutB = typename B2bMma::IteratorB0::Layout;
static ComplexTransform const kTransformA = B2bMma::kTransformA;
static ComplexTransform const kTransformB = B2bMma::kTransformB;
using Operator = typename B2bMma::Operator0;
using OperatorClass = typename Operator::OperatorClass;
using ThreadblockShape = typename B2bMma::Shape0;
using WarpShape = typename Operator::Shape;
using InstructionShape = typename Operator::InstructionShape;
using ArchTag = typename B2bMma::ArchTag;
static int const kStages = B2bMma::kStages;
static int const kAlignmentA = B2bMma::IteratorA::AccessType::kElements;
static int const kAlignmentB = B2bMma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
using Mma = B2bMma;
using EpilogueOutputOp = OutputOp1;
/// Warp count (concept: GemmShape)
using WarpCount0 = typename B2bMma::WarpCount0;
static int const kThreadCount = 32 * WarpCount0::kCount;
/// Argument structure
struct Arguments {
//
// Data members
//
GemmUniversalMode mode = cutlass::gemm::GemmUniversalMode::kGemm;
GemmCoord problem_size_0{0,0,0};
GemmCoord problem_size_1{0,0,0};
typename B2bMma::IteratorA0::TensorRef ref_A0{};
typename B2bMma::IteratorB0::TensorRef ref_B0{};
typename Epilogue::OutputTileIterator::TensorRef ref_C0{};
typename B2bMma::IteratorAccumulatorScaleBias::TensorRef ref_Scale0{};
typename B2bMma::IteratorAccumulatorScaleBias::TensorRef ref_Bias0{};
typename B2bMma::IteratorB1::TensorRef ref_B1{};
typename Epilogue::OutputTileIterator::TensorRef ref_C1{};
typename Epilogue::OutputTileIterator::TensorRef ref_D1{};
int64_t batch_stride_A0{0};
int64_t batch_stride_B0{0};
int64_t batch_stride_B1{0};
int64_t batch_stride_C1{0};
int64_t batch_stride_D1{0};
int64_t batch_stride_Bias0{0};
int64_t batch_stride_Scale0{0};
typename OutputOp0::Params epilogue0 {};
typename OutputOp1::Params epilogue1 {};
int batch_count{1};
//
// Methods
//
/// Default ctor
Arguments() = default;
/// Constructs an Arguments structure
CUTLASS_HOST_DEVICE
Arguments(
GemmUniversalMode mode_,
GemmCoord problem_size_0_,
GemmCoord problem_size_1_,
typename B2bMma::IteratorA0::TensorRef ref_A0_,
typename B2bMma::IteratorB0::TensorRef ref_B0_,
typename Epilogue::OutputTileIterator::TensorRef ref_C0_,
typename B2bMma::IteratorAccumulatorScaleBias::TensorRef ref_Scale0_,
typename B2bMma::IteratorAccumulatorScaleBias::TensorRef ref_Bias0_,
typename B2bMma::IteratorB1::TensorRef ref_B1_,
typename Epilogue::OutputTileIterator::TensorRef ref_C1_,
typename Epilogue::OutputTileIterator::TensorRef ref_D1_,
int64_t batch_stride_A0_,
int64_t batch_stride_B0_,
int64_t batch_stride_B1_,
int64_t batch_stride_C1_,
int64_t batch_stride_D1_,
int64_t batch_stride_Bias0_,
int64_t batch_stride_Scale0_,
typename OutputOp0::Params epilogue0_ = typename OutputOp0::Params(),
typename OutputOp1::Params epilogue1_ = typename OutputOp1::Params(),
int batch_count_ = 1
):
mode(mode_),
problem_size_0(problem_size_0_),
problem_size_1(problem_size_1_),
ref_A0(ref_A0_),
ref_B0(ref_B0_),
ref_C0(ref_C0_),
ref_Scale0(ref_Scale0_),
ref_Bias0(ref_Bias0_),
ref_B1(ref_B1_),
ref_C1(ref_C1_),
ref_D1(ref_D1_),
batch_stride_A0(batch_stride_A0_),
batch_stride_B0(batch_stride_B0_),
batch_stride_B1(batch_stride_B1_),
batch_stride_C1(batch_stride_C1_),
batch_stride_D1(batch_stride_D1_),
batch_stride_Bias0(batch_stride_Bias0_),
batch_stride_Scale0(batch_stride_Scale0_),
epilogue0(epilogue0_),
epilogue1(epilogue1_),
batch_count(batch_count_) {
}
};
// Arguments structure for grouped B2B problems
struct GroupedArguments {
GemmCoord* problem_size_0;
GemmCoord* problem_size_1;
typename B2bMma::IteratorA0::TensorRef* ref_A0;
typename B2bMma::IteratorB0::TensorRef* ref_B0;
typename Epilogue::OutputTileIterator::TensorRef* ref_C0;
typename B2bMma::IteratorAccumulatorScaleBias::TensorRef* ref_Scale0;
typename B2bMma::IteratorAccumulatorScaleBias::TensorRef* ref_Bias0;
typename B2bMma::IteratorB1::TensorRef* ref_B1;
typename Epilogue::OutputTileIterator::TensorRef* ref_C1;
typename Epilogue::OutputTileIterator::TensorRef* ref_D1;
// Epilogue params remain constant across all problmes in the group. Thus,
// the parameter here is not a pointer.
typename OutputOp0::Params epilogue0;
typename OutputOp1::Params epilogue1;
int problem_count;
int threadblock_count;
GemmCoord* host_problem_sizes;
CUTLASS_HOST_DEVICE
GroupedArguments(
int problem_count,
GemmCoord* problem_size_0_,
GemmCoord* problem_size_1_,
typename B2bMma::IteratorA0::TensorRef* ref_A0_,
typename B2bMma::IteratorB0::TensorRef* ref_B0_,
typename Epilogue::OutputTileIterator::TensorRef* ref_C0_,
typename B2bMma::IteratorAccumulatorScaleBias::TensorRef* ref_Scale0_,
typename B2bMma::IteratorAccumulatorScaleBias::TensorRef* ref_Bias0_,
typename B2bMma::IteratorB1::TensorRef* ref_B1_,
typename Epilogue::OutputTileIterator::TensorRef* ref_C1_,
typename Epilogue::OutputTileIterator::TensorRef* ref_D1_,
typename OutputOp0::Params epilogue0_ = typename OutputOp0::Params(),
typename OutputOp1::Params epilogue1_ = typename OutputOp1::Params(),
int threadblock_count = 0
) : problem_size_0(problem_size_0_), problem_size_1(problem_size_1_),
ref_A0(ref_A0_), ref_B0(ref_B0_), ref_C0(ref_C0_),
ref_Scale0(ref_Scale0_), ref_Bias0(ref_Bias0_), ref_B1(ref_B1_),
ref_C1(ref_C1_), ref_D1(ref_D1_), epilogue0(epilogue0_), epilogue1(epilogue1_),
problem_count(problem_count),
threadblock_count(threadblock_count)
{}
};
/// Parameters structure
struct Params {
cutlass::gemm::GemmUniversalMode mode = cutlass::gemm::GemmUniversalMode::kGemm;
cutlass::gemm::GemmCoord problem_size_0{};
cutlass::gemm::GemmCoord problem_size_1{};
cutlass::gemm::GemmCoord grid_tiled_shape{};
int swizzle_log_tile{0};
typename B2bMma::IteratorA0::Params params_A0{};
typename B2bMma::IteratorA0::TensorRef ref_A0{};
typename B2bMma::IteratorB0::Params params_B0{};
typename B2bMma::IteratorB0::TensorRef ref_B0{};
typename Epilogue::OutputTileIterator::Params params_C0{};
typename Epilogue::OutputTileIterator::TensorRef ref_C0{};
typename B2bMma::IteratorAccumulatorScaleBias::TensorRef ref_Scale0{};
typename B2bMma::IteratorAccumulatorScaleBias::TensorRef ref_Bias0{};
typename B2bMma::IteratorB1::Params params_B1{};
typename B2bMma::IteratorB1::TensorRef ref_B1{};
typename Epilogue::OutputTileIterator::Params params_C1{};
typename Epilogue::OutputTileIterator::TensorRef ref_C1{};
typename Epilogue::OutputTileIterator::Params params_D1{};
typename Epilogue::OutputTileIterator::TensorRef ref_D1{};
typename OutputOp0::Params output_op_0{};
typename OutputOp1::Params output_op_1{};
int64_t batch_stride_A0{0};
int64_t batch_stride_B0{0};
int64_t batch_stride_B1{0};
int64_t batch_stride_C1{0};
int64_t batch_stride_D1{0};
int64_t batch_stride_Bias0{0};
int64_t batch_stride_Scale0{0};
int *semaphore = nullptr;
int gemm_k_iterations_0{0};
int gemm_k_size_0{0};
int gemm_k_iterations_1{0};
int gemm_k_size_1{0};
//
// Methods
//
Params() = default;
CUTLASS_HOST_DEVICE
Params(
cutlass::gemm::GemmUniversalMode mode,
cutlass::gemm::GemmCoord const & problem_size_0,
cutlass::gemm::GemmCoord const & problem_size_1,
cutlass::gemm::GemmCoord const & grid_tiled_shape,
typename B2bMma::IteratorA0::TensorRef ref_A0,
typename B2bMma::IteratorB0::TensorRef ref_B0,
typename Epilogue::OutputTileIterator::TensorRef ref_C0,
typename B2bMma::IteratorAccumulatorScaleBias::TensorRef ref_Scale0,
typename B2bMma::IteratorAccumulatorScaleBias::TensorRef ref_Bias0,
typename B2bMma::IteratorB1::TensorRef ref_B1,
typename Epilogue::OutputTileIterator::TensorRef ref_C1,
typename Epilogue::OutputTileIterator::TensorRef ref_D1,
int64_t batch_stride_A0,
int64_t batch_stride_B0,
int64_t batch_stride_B1,
int64_t batch_stride_C1,
int64_t batch_stride_D1,
int64_t batch_stride_Bias0,
int64_t batch_stride_Scale0,
typename OutputOp0::Params output_op_0 = typename OutputOp0::Params(),
typename OutputOp1::Params output_op_1 = typename OutputOp1::Params(),
int *workspace = nullptr
):
mode(mode),
problem_size_0(problem_size_0),
problem_size_1(problem_size_1),
grid_tiled_shape(grid_tiled_shape),
swizzle_log_tile(ThreadblockSwizzle::get_log_tile(grid_tiled_shape)),
params_A0(ref_A0.layout()),
ref_A0(ref_A0),
params_B0(ref_B0.layout()),
ref_B0(ref_B0),
params_C0(ref_C0.layout()),
ref_C0(ref_C0),
ref_Scale0(ref_Scale0),
ref_Bias0(ref_Bias0),
params_B1(ref_B1.layout()),
ref_B1(ref_B1),
params_C1(ref_C1.layout()),
ref_C1(ref_C1),
params_D1(ref_D1.layout()),
ref_D1(ref_D1),
batch_stride_A0(batch_stride_A0),
batch_stride_B0(batch_stride_B0),
batch_stride_B1(batch_stride_B1),
batch_stride_C1(batch_stride_C1),
batch_stride_D1(batch_stride_D1),
batch_stride_Bias0(batch_stride_Bias0),
batch_stride_Scale0(batch_stride_Scale0),
output_op_0(output_op_0),
output_op_1(output_op_1) {
int total_gemm_k_iterations_0 = (problem_size_0.k() + B2bMma::Shape0::kK - 1) / B2bMma::Shape0::kK;
int gemm_k_iterations_0 = (total_gemm_k_iterations_0 + grid_tiled_shape.k() - 1) / grid_tiled_shape.k();
gemm_k_size_0 = gemm_k_iterations_0 * B2bMma::Shape0::kK;
int total_gemm_k_iterations_1 = (problem_size_1.k() + B2bMma::Shape1::kK - 1) / B2bMma::Shape1::kK;
int gemm_k_iterations_1 = (total_gemm_k_iterations_1 + grid_tiled_shape.k() - 1) / grid_tiled_shape.k();
gemm_k_size_1 = gemm_k_iterations_1 * B2bMma::Shape1::kK;
semaphore = workspace;
}
};
struct GroupedParams {
cutlass::gemm::GemmCoord* problem_size_0;
cutlass::gemm::GemmCoord* problem_size_1;
cutlass::gemm::GemmCoord* grid_tiled_shape;
typename B2bMma::IteratorA0::TensorRef* ref_A0;
typename B2bMma::IteratorB0::TensorRef* ref_B0;
typename Epilogue::OutputTileIterator::TensorRef* ref_C0;
typename B2bMma::IteratorAccumulatorScaleBias::TensorRef* ref_Scale0;
typename B2bMma::IteratorAccumulatorScaleBias::TensorRef* ref_Bias0;
typename B2bMma::IteratorB1::TensorRef* ref_B1;
typename Epilogue::OutputTileIterator::TensorRef* ref_C1;
typename Epilogue::OutputTileIterator::TensorRef* ref_D1;
// Epilogue params remain constant across all problmes in the group. Thus,
// the parameter here is not a pointer.
typename OutputOp0::Params output_op_0;
typename OutputOp1::Params output_op_1;
using ProblemVisitor = typename detail::ProblemVisitorOrDefault<B2bMma, ThreadblockSwizzle>::value;
typename ProblemVisitor::Params problem_visitor;
int threadblock_count;
int* workspace;
CUTLASS_HOST_DEVICE
GroupedParams() {}
CUTLASS_HOST_DEVICE
GroupedParams(
GroupedArguments const &args,
void *workspace = nullptr,
int tile_count = 0
) :
problem_size_0(args.problem_size_0), problem_size_1(args.problem_size_1),
ref_A0(args.ref_A0), ref_B0(args.ref_B0), ref_C0(args.ref_C0),
ref_Scale0(args.ref_Scale0), ref_Bias0(args.ref_Bias0), ref_B1(args.ref_B1), ref_C1(args.ref_C1), ref_D1(args.ref_D1),
output_op_0(args.epilogue0), output_op_1(args.epilogue1),
problem_visitor(args.problem_size_0, args.problem_size_1, args.problem_count, workspace, tile_count),
threadblock_count(args.threadblock_count),
workspace(reinterpret_cast<int*>(workspace)) {}
CUTLASS_HOST_DEVICE
void transpose() {
// Only row-major outputs are currently supported, so no transpose is performed
}
/// Returns non-grouped paramaters to be used as input to the kernel-level
/// operator for the problem indicated by problem_visitor.
CUTLASS_HOST_DEVICE
Params to_single_params(const ProblemVisitor& problem_visitor) const {
GemmCoord problem_size0 = problem_visitor.problem_size0();
GemmCoord problem_size1 = problem_visitor.problem_size1();
int32_t idx = problem_visitor.problem_index();
GemmCoord grid_shape = problem_visitor.grid_shape(problem_size1);
return Params(
cutlass::gemm::GemmUniversalMode::kGemm,
problem_size0,
problem_size1,
grid_shape,
ref_A0[idx],
ref_B0[idx],
ref_C0[idx],
ref_Scale0[idx],
ref_Bias0[idx],
ref_B1[idx],
ref_C1[idx],
ref_D1[idx],
0, 0, 0, 0, 0, 0, 0, // Batched B2B GEMMs within the grouped kernel are currently unsupported
output_op_0,
output_op_1,
workspace
);
}
};
/// Shared memory storage structure
union SharedStorage {
typename B2bMma::B2bMmaSharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
//
// Methods
//
CUTLASS_HOST_DEVICE
B2bGemm() { }
/// Determines whether kernel satisfies alignment
static Status can_implement(
cutlass::gemm::GemmCoord const & problem_size_0,
cutlass::gemm::GemmCoord const & problem_size_1,
typename B2bMma::IteratorA0::TensorRef ref_A0,
typename B2bMma::IteratorB0::TensorRef ref_B0,
typename Epilogue::OutputTileIterator::TensorRef ref_C0,
typename B2bMma::IteratorB1::TensorRef ref_B1,
typename Epilogue::OutputTileIterator::TensorRef ref_C1,
typename Epilogue::OutputTileIterator::TensorRef ref_D1) {
static int const kAlignmentA = B2bMma::IteratorA0::AccessType::kElements;
static int const kAlignmentB = B2bMma::IteratorB0::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
if (!TensorRef_aligned(ref_A0, kAlignmentA)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_B0, kAlignmentB)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_C0, kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_B1, kAlignmentB)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_C1, kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_D1, kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
if ((problem_size_0.m() % kAlignmentA) || (problem_size_0.k() % kAlignmentA) ||
(problem_size_0.n() % kAlignmentB) || (problem_size_0.k() % kAlignmentB) ||
(problem_size_0.m() % kAlignmentC) || (problem_size_0.n() % kAlignmentC) ||
(problem_size_1.m() % kAlignmentA) || (problem_size_1.k() % kAlignmentA) ||
(problem_size_1.n() % kAlignmentB) || (problem_size_1.k() % kAlignmentB) ||
(problem_size_1.m() % kAlignmentC) || (problem_size_1.n() % kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
// Determine if fusion sizes are valid
if(problem_size_0.m() != problem_size_1.m())
return Status::kErrorInvalidProblem;
if(problem_size_0.n() != problem_size_1.k())
return Status::kErrorInvalidProblem;
if(problem_size_0.n() > B2bMma::Shape0::kN)
return Status::kErrorInvalidProblem;
if(problem_size_1.n() > B2bMma::Shape1::kN)
return Status::kErrorInvalidProblem;
return Status::kSuccess;
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
ThreadblockSwizzle threadblock_swizzle;
run_with_swizzle(params, shared_storage, threadblock_swizzle);
}
/// Executes one GEMM with an externally-provided swizzling function
CUTLASS_DEVICE
void run_with_swizzle(Params const ¶ms, SharedStorage &shared_storage, ThreadblockSwizzle& threadblock_swizzle) {
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
ElementA0 *ptr_A0 = static_cast<ElementA0 *>(params.ref_A0.data());
ElementB0 *ptr_B0 = static_cast<ElementB0 *>(params.ref_B0.data());
ElementB1 *ptr_B1 = static_cast<ElementB1 *>(params.ref_B1.data());
ScaleBiasData *ptr_Bias0 = static_cast<ScaleBiasData *>(params.ref_Bias0.data());
ScaleBiasData *ptr_Scale0 = static_cast<ScaleBiasData *>(params.ref_Scale0.data());
int offset_k_0 = 0;
int offset_k_1 = 0;
int problem_size_k_0 = params.problem_size_0.k();
int problem_size_k_1 = params.problem_size_1.k();
if (params.mode == GemmUniversalMode::kGemm) {
// Problem size is a function of threadblock index in the K dimension
problem_size_k_0 = min(
problem_size_k_0,
(threadblock_tile_offset.k() + 1) * params.gemm_k_size_0);
// Problem size is a function of threadblock index in the K dimension
problem_size_k_1 = min(
problem_size_k_1,
(threadblock_tile_offset.k() + 1) * params.gemm_k_size_1);
offset_k_0 = threadblock_tile_offset.k() * params.gemm_k_size_0;
offset_k_1 = threadblock_tile_offset.k() * params.gemm_k_size_1;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_A0 += threadblock_tile_offset.k() * params.batch_stride_A0;
ptr_B0 += threadblock_tile_offset.k() * params.batch_stride_B0;
ptr_B1 += threadblock_tile_offset.k() * params.batch_stride_B1;
ptr_Bias0 += threadblock_tile_offset.k() * params.batch_stride_Bias0;
ptr_Scale0 += threadblock_tile_offset.k() * params.batch_stride_Scale0;
}
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A0{
threadblock_tile_offset.m() * B2bMma::Shape0::kM,
offset_k_0,
};
cutlass::MatrixCoord tb_offset_B0{
offset_k_0,
threadblock_tile_offset.n() * B2bMma::Shape0::kN
};
cutlass::MatrixCoord tb_offset_B1{
offset_k_1,
threadblock_tile_offset.n() * B2bMma::Shape1::kN
};
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations_0 = (problem_size_k_0 - tb_offset_A0.column() + B2bMma::Shape0::kK - 1) / B2bMma::Shape0::kK;
// Compute threadblock-scoped matrix multiply-add
// int gemm_k_iterations_1 = (problem_size_k_1 - tb_offset_B1.row() + B2bMma::Shape1::kK - 1) / B2bMma::Shape1::kK;
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands
typename B2bMma::IteratorA0 iterator_A0(
params.params_A0,
ptr_A0,
{params.problem_size_0.m(), problem_size_k_0},
thread_idx,
tb_offset_A0);
typename B2bMma::IteratorB0 iterator_B0(
params.params_B0,
ptr_B0,
{problem_size_k_0, params.problem_size_0.n()},
thread_idx,
tb_offset_B0);
typename B2bMma::IteratorB1 iterator_B1(
params.params_B1,
ptr_B1,
{problem_size_k_1, params.problem_size_1.n()},
thread_idx,
tb_offset_B1);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
int lane_idx = threadIdx.x % 32;
// Construct iterators to accumulator scale/bias vector
typename B2bMma::IteratorAccumulatorScaleBias iterator_Scale0(
ptr_Scale0,
{1, params.problem_size_0.n()},
thread_idx,
warp_idx,
MatrixCoord(
0, threadblock_tile_offset.n() * B2bMma::Shape0::kN
)
);
typename B2bMma::IteratorAccumulatorScaleBias iterator_Bias0(
ptr_Bias0,
{1, params.problem_size_0.n()},
thread_idx,
warp_idx,
MatrixCoord(
0, threadblock_tile_offset.n() * B2bMma::Shape0::kN
)
);
//
// Main loop
//
OutputOp0 output_op_0(params.output_op_0);
if (cutlass::gemm::threadblock::detail::IsGroupedSwizzle<ThreadblockSwizzle>::value) {
// Wait for all threads to finish their epilogue phases from the previous tile.
__syncthreads();
}
// Construct thread-scoped matrix multiply
B2bMma b2bMma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx, params.problem_size_0.n());
typename B2bMma::FragmentC0 src_accum;
typename B2bMma::FragmentC1 accumulators;
src_accum.clear();
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
b2bMma(gemm_k_iterations_0, accumulators, iterator_A0, iterator_B0,
iterator_Scale0, iterator_Bias0, iterator_B1, src_accum, output_op_0);
//
// Epilogue
//
OutputOp1 output_op_1(params.output_op_1);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * B2bMma::Shape1::kM,
threadblock_tile_offset.n() * B2bMma::Shape1::kN
);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
ElementC *ptr_C1 = static_cast<ElementC *>(params.ref_C1.data());
ElementC *ptr_D1 = static_cast<ElementC *>(params.ref_D1.data());
// Construct the semaphore.
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
if (params.mode == GemmUniversalMode::kGemm) {
// If performing a reduction via split-K, fetch the initial synchronization
if (params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op_1.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
}
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_C1 += threadblock_tile_offset.k() * params.batch_stride_C1;
ptr_D1 += threadblock_tile_offset.k() * params.batch_stride_D1;
}
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C1(
params.params_C1,
ptr_C1,
params.problem_size_1.mn(),
thread_idx,
threadblock_offset
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D1(
params.params_D1,
ptr_D1,
params.problem_size_1.mn(),
thread_idx,
threadblock_offset
);
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Wait on the semaphore - this latency may have been covered by iterator construction
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_offset.k()) {
iterator_C1 = iterator_D1;
}
semaphore.wait(threadblock_tile_offset.k());
__threadfence();
}
// Execute the epilogue operator to update the destination tensor.
epilogue(output_op_1, iterator_D1, accumulators, iterator_C1);
//
// Release the semaphore
//
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_offset.k() + 1;
}
__threadfence();
semaphore.release(lock);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
| examples/13_two_tensor_op_fusion/kernel/b2b_gemm.h/0 | {
"file_path": "examples/13_two_tensor_op_fusion/kernel/b2b_gemm.h",
"repo_id": "examples",
"token_count": 12865
} | 5 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
This example shows how to run CUTLASS's convolution kernels
based on the Implicit GEMM algorithm, that use the Tensor Cores
on an NVIDIA Ampere GPU.
Writing a single high-performance convolution kernel is hard enough,
let alone writing kernels that perform well for multiple problem sizes
and use good software abstractions.
CUTLASS provides simplified abstractions
to compose multiple sections of a convolution kernel.
When used properly, the kernels can reach peak GPU performance.
CUTLASS divides a kernel into hierarchical composable sections
for each level of the GPU hardware hierarchy:
thread, warp, and threadblock.
Each section computes on its own tile shape,
with each higher level's tile shape
being composed from lower-level tile shapes.
Multiple thread tiles (the tile shape each thread computes)
can be used to form warp tiles (the tile shape each warp computes),
and multiple warp tiles can be used to compute threadblock tiles
(the tile shape computed by a threadblock).
In this example, we split variable initialization into two parts.
1. Setting up data properties: describes how tensors are laid out in the memory
and how the kernel can view them (logical to physical mapping)
2. Setting up computation properties: describes how the above tensors
will be used to compute the output of convolution
We begin by setting up the data types
of all the input and output elements of a convolution.
A convolution computes
C = alpha * Conv2dFprop(A, B) + beta * C,
so we set up data types for the input tensor A,
weights tensor B, output tensor C,
and the scaling factors alpha and beta.
CUTLASS divides the convolution into two parts:
the "mainloop" that computes X = Conv2dFprop(A, B),
and the "epilogue" that computes C = alpha * X + beta * C.
The epilogue is an element-wise operation on X and C.
In this case, it is a linear combination,
but other epilogues are possible.
In this example, we want
* the scaling factors alpha and beta to be float,
* the elements of A and B to be cutlass::half_t
(a 16-bit floating-point type),
* the elements of C to be float, and
* intermediate sums to be accumulated in float.
We convey this to the CUTLASS kernel
by setting the following template parameters.
* alpha and beta: ElementComputeEpilogue = float
* Elements of input tensor A: ElementInputA = cutlass::half_t
* Elements of input tensor B: ElementInputB = cutlass::half_t
* Elements of output tensor C: ElementOutput = float
* Accumulation type: ElementAccumulator = float
Next, we describe the layout of the input and output tensors.
We convey this to the CUTLASS kernel
by setting the following template parameters.
* Layout of input tensor A: LayoutInputA = TensorNHWC
* Layout of input tensor B: LayoutInputB = TensorNHWC
* Layout of output tensor C: LayoutOutput = TensorNHWC
After that, we set up rules to compute the epilogue.
The epilogue in this case is a simple linear combination
C = alpha * X + beta * C.
Thus, we set the kernel's template parameter EpilogueOp
to LinearCombination. LinearCombination itself
has template parameters:
* the element type of the output tensor (ElementOutput),
* the number of elements per vector memory access (8),
* the data type of the accumulator (ElementAccumulator),
* and the data type used to compute the linear combination
(ElementComputeEpilogue).
We then define the tile shapes
that each level of the computation uses.
We define these as types that encode the tile shapes
as compile-time integer values.
Each shape expresses the dimensions M x N x K.
Here, the letters refer to the dimensions
of a matrix-matrix multiply.
* ThreadblockShape defines the threadblock tile shape
as 128 x 128 x 64.
* WarpShape defines the warp tile shape as 64 x 64 x 64.
* InstructionShape defines the MMA
(matrix multiply-accumulate) operation shape
as 16 x 8 x 16.
These types become template arguments
of the kernel properties type
cutlass::conv::kernel::DefaultConv2dFprop.
The kernel uses these shapes to deduce
the number of threads needed per threadblock,
the required amount of shared memory,
the internal layouts needed to access
shared memory without bank conflicts,
and many other properties that the kernel needs
for good performance.
CUTLASS deduces all these properties automatically,
so that users don't have to.
DefaultConv2dFprop accepts other template parameters
that describe things like the target CUDA SM architecture.
CUTLASS also supports multiple MMA pipelines in a threadblock.
An MMA pipeline constitutes the whole process
of loading input data from global memory to shared memory,
loading data from shared memory to registers,
doing matrix multiplication,
and storing the result to global memory.
The below flow sequence shows a typical MMA multistage pipeline
(see include/cutlass/conv/threadblock/implicit_gemm_multistage.h).
tensor in global memory
--cp_async-->
tile in shared memory
--smem loads-->
registers
--mma-->
registers
--global stores-->
output to global memory
On NVIDIA Ampere, the kernel uses `cp_async`
to build a multistage software pipeline.
This helps it better hide latency.
At this point, we can define the actual CUTLASS kernel type
as the alias ImplicitGemm, a specialization of
cutlass::conv::device::ImplicitGemmConvolution.
The latter accepts the kernel properties type alias
Conv2dFpropKernel as its one template argument.
This example then sets up a test problem
and arguments to the kernel.
We use CUTLASS utilities to allocate
the input and output tensors
and fill them with sample input data.
We then create the kernel arguments
as an instance of ImplicitGemm::Arguments.
The arguments include
the problem size (N = 1, H = 64, W = 64, C = 128),
filter size (K = 64, R = 3, S = 3, C = 128),
padding, strides, dilation, tensors, alpha, beta,
and the split k-dimension factor.
We also query CUTLASS if the kernel we instantiated
requires any memory for scratch space.
If yes, we reserve scratch space and pass it along
with other arguments to initialize the CUTLASS kernel.
After lauching the CUTLASS kernel, this example runs
a reference convolution kernel (from CUTLASS utilities)
to check correctness.
*/
#include <iostream>
#include <fstream>
#include <sstream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/conv/kernel/default_conv2d_fprop.h"
#include "cutlass/conv/device/implicit_gemm_convolution.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/convolution.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
// Data types for input and output tensors
// and computation between elements
using ElementAccumulator = float; // Data type of accumulator
using ElementComputeEpilogue = float; // Data type of epilogue computation (alpha, beta)
using ElementInputA = cutlass::half_t; // Data type of elements in input tensor
using ElementInputB = cutlass::half_t; // Data type of elements in input tensor
using ElementOutput = float; // Data type of elements in output tensor
using LayoutInputA = cutlass::layout::TensorNHWC;
using LayoutInputB = cutlass::layout::TensorNHWC;
using LayoutOutput = cutlass::layout::TensorNHWC;
// Whether to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// SM architecture number
using SmArch = cutlass::arch::Sm80;
// Threadblock tile shape
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>;
// Warp tile shape
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
// MMA (Tensor Core instruction, in this case) tile shape
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
// How the kernel schedules threadblocks
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>;
// Number of pipeline stages to use
constexpr int NumStages = 3;
// Which iterator algorithm to use: Analytic or Optimized
static cutlass::conv::IteratorAlgorithm const IteratorAlgorithm = cutlass::conv::IteratorAlgorithm::kOptimized;
// Is the output packed or strided
// Use kStride if using strided output
static cutlass::conv::StrideSupport const OutputStride = cutlass::conv::StrideSupport::kUnity;
// The epilogue part of the kernel
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // Data type of output matrix.
128 / cutlass::sizeof_bits<ElementOutput>::value, // The number of elements per vectorized
// memory access. This becomes the vector width of
// math instructions in the epilogue too.
ElementAccumulator, // Data type of accumulator
ElementComputeEpilogue>; // Data type for alpha/beta in linear combination
// Kernel properties type
using Conv2dFpropKernel = typename cutlass::conv::kernel::DefaultConv2dFprop<
ElementInputA, LayoutInputA,
ElementInputB, LayoutInputB,
ElementOutput, LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
cutlass::arch::OpMultiplyAdd,
IteratorAlgorithm,
OutputStride
>::Kernel;
// Type of the actual kernel
using ImplicitGemm = cutlass::conv::device::ImplicitGemmConvolution<Conv2dFpropKernel>;
/////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
cutlass::Tensor4DCoord input_size;
cutlass::Tensor4DCoord filter_size;
cutlass::Tensor4DCoord padding;
cutlass::MatrixCoord conv_stride;
cutlass::MatrixCoord dilation;
bool reference_check;
bool measure_performance;
int iterations;
bool save_workspace;
ElementComputeEpilogue alpha;
ElementComputeEpilogue beta;
bool benchmark;
std::string tag;
Options():
help(false),
input_size(1, 32, 32, 32),
filter_size(32, 3, 3, 32),
padding(1, 1, 1, 1),
conv_stride(1, 1),
dilation(1, 1),
reference_check(false),
measure_performance(true),
iterations(20),
save_workspace(false),
alpha(1),
beta(0),
benchmark(false) { }
// Verify that the problem size is compatible with CUTLASS's convolution implementation
bool valid() {
//
// CUTLASS attempts to load 128b vectors of cutlass::half_t (F16) elements. Consequently,
// all pointers, strides, and tensor extents must be divisible by 8 elements.
//
int const kAlignment = 8;
if ((input_size.c() % kAlignment) ||
(filter_size.n() % kAlignment)) {
// misaligned tensors
return false;
}
// Invalid padding
if ((padding.h() != filter_size.h() / 2) ||
(padding.w() != filter_size.w() / 2)) {
return false;
}
return true;
}
/// Update input and filter sizes
void update(
cutlass::Tensor4DCoord input_size,
cutlass::Tensor4DCoord filter_size) {
this->input_size = input_size;
this->filter_size = filter_size;
padding.n() = filter_size.h() / 2;
padding.h() = filter_size.h() / 2;
padding.w() = filter_size.w() / 2;
padding.c() = filter_size.w() / 2;
}
// Parse command-line arguments
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
if (cmd.check_cmd_line_flag("ref-check")) {
reference_check = true;
}
if (cmd.check_cmd_line_flag("perf-check")) {
measure_performance = true;
}
if (cmd.check_cmd_line_flag("save-workspace")) {
save_workspace = true;
}
if (cmd.check_cmd_line_flag("benchmark")) {
benchmark = true;
}
cmd.get_cmd_line_argument("n", input_size.n());
cmd.get_cmd_line_argument("h", input_size.h());
cmd.get_cmd_line_argument("w", input_size.w());
cmd.get_cmd_line_argument("c", input_size.c());
cmd.get_cmd_line_argument("k", filter_size.n());
cmd.get_cmd_line_argument("r", filter_size.h());
cmd.get_cmd_line_argument("s", filter_size.w());
filter_size.c() = input_size.c();
cmd.get_cmd_line_argument("alpha", alpha);
cmd.get_cmd_line_argument("beta", beta);
cmd.get_cmd_line_argument("iterations", iterations);
cmd.get_cmd_line_argument("tag", tag);
if (filter_size.h() == 3 && filter_size.w() == 3) {
padding = {1, 1, 1, 1};
}
else {
filter_size.h() = 1;
filter_size.w() = 1;
padding = {0, 0, 0, 0};
}
}
/// Print an explanation of the command-line arguments
std::ostream & print_usage(std::ostream &out) const {
out << "16_ampere_tensorop_conv2dfprop example\n\n"
<< " This example uses Ampere's Tensor Core operators on F16 data types\n"
<< " to compute forward convolution on tensors of layout NHWC.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --n=<int> Input tensor extent N\n"
<< " --h=<int> Input tensor extent H\n"
<< " --w=<int> Input tensor extent W\n"
<< " --c=<int> Input tensor extent C\n"
<< " --k=<int> Filter extent K\n"
<< " --r=<int> Filter extent R\n"
<< " --s=<int> Filter extent S\n\n"
<< " --alpha=<float> Epilogue scalar alpha\n"
<< " --beta=<float> Epilogue scalar beta\n\n"
<< " --ref-check If set (true), reference check on the host is computed\n"
<< " --perf-check If set (true), performance is measured.\n"
<< " --benchmark If set (true), performance benchmarking on several layers and batch-size.\n"
<< " --iterations=<int> Number of profiling iterations to perform.\n"
<< " --save-workspace If set, workspace is written to a text file.\n"
<< " --tag=<string> String to replicate across the first column in the results table\n";
out << "\n\nExamples:\n\n"
<< "$ ./examples/16_ampere_tensorop_conv2dfprop/16_ampere_tensorop_conv2dfprop --n=32 --h=224 --w=224 --c=128 --k=256 --r=1 --s=1\n\n"
<< "$ ./examples/16_ampere_tensorop_conv2dfprop/16_ampere_tensorop_conv2dfprop --n=1 --h=224 --w=224 --c=32 --k=32 --r=3 --s=3 --ref-check\n\n";
return out;
}
/// Computes the output tensor size (NPQK)
cutlass::Tensor4DCoord output_size() const {
return cutlass::Tensor4DCoord(
input_size.n(),
(input_size.h() + padding.n() + padding.h() - filter_size.h()) / conv_stride.row() + 1,
(input_size.w() + padding.w() + padding.c() - filter_size.w()) / conv_stride.column() + 1,
filter_size.n());
}
/// Compute performance in Gflop/s
///
/// Gflop/s stands for billions (10^9) of
/// floating-point operations per second (Gflop/s).
double gflops(double runtime_s) const {
// Number of multiply-adds = NPQK * CRS
int64_t fmas = output_size().product() * int64_t(filter_size.h() * filter_size.w() * filter_size.c());
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
cutlass::Status reference_check;
cudaError_t error;
Result():
runtime_ms(0),
gflops(0),
status(cutlass::Status::kSuccess),
reference_check(cutlass::Status::kInvalid),
error(cudaSuccess) { }
static std::ostream& print_header(std::ostream &out, Options const &options) {
if (!options.tag.empty()) {
out << "Name,";
}
out << "Layer,N,H,W,C,K,R,S,Runtime,GFLOPs";
return out;
}
std::ostream & print(std::ostream &out, int idx, Options const &options) {
if (!options.tag.empty()) {
out << options.tag << ",";
}
out
<< "conv_" << idx << ","
<< options.input_size.n() << ","
<< options.input_size.h() << ","
<< options.input_size.w() << ","
<< options.input_size.c() << ","
<< options.filter_size.n() << ","
<< options.filter_size.h() << ","
<< options.filter_size.w() << ","
<< runtime_ms << ","
<< gflops;
return out;
}
};
/// Runs one benchmark
Result profile_convolution(Options const &options) {
Result result;
//
// Allocate host-device tensors using the CUTLASS Utilities.
//
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(options.input_size);
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(options.filter_size);
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(options.output_size());
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(options.output_size());
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(options.output_size());
//
// Initialize tensors
//
// Fill tensor A on host with uniformly distributed random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(7),
ElementInputA(-8),
0);
// Fill tensor B on host with uniformly distributed random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputB(7),
ElementInputB(-8),
0);
// Fill tensor C on host with uniformly distributed random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c.host_view(),
1,
ElementOutput(7),
ElementOutput(-8),
0);
// Fill tensor D on host with zeros
cutlass::reference::host::TensorFill(
tensor_d.host_view());
// Fill tensor D for reference on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_d.host_view());
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_c.sync_device();
tensor_d.sync_device();
tensor_ref_d.sync_device();
//
// Define arguments for CUTLASS Convolution
//
cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation;
// Split K dimension into 1 partitions
int split_k_slices = 1;
// Construct Conv2dProblemSize with user defined output size
cutlass::conv::Conv2dProblemSize problem_size(
options.input_size,
options.filter_size,
options.padding,
options.conv_stride,
options.dilation,
options.output_size(),
mode,
split_k_slices
);
// Construct ImplicitGemm::Argument structure with conv2d
// problem size, data pointers, and epilogue values
typename ImplicitGemm::Arguments arguments{
problem_size,
tensor_a.device_ref(),
tensor_b.device_ref(),
tensor_c.device_ref(),
tensor_d.device_ref(),
{options.alpha, options.beta},
};
//
// Initialize CUTLASS Convolution
//
ImplicitGemm implicit_gemm_op;
size_t workspace_size = implicit_gemm_op.get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
result.status = implicit_gemm_op.can_implement(arguments);
CUTLASS_CHECK(result.status);
result.status = implicit_gemm_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(result.status);
//
// Launch initialized CUTLASS kernel
//
result.status = implicit_gemm_op();
CUTLASS_CHECK(result.status);
//
// Optional reference check
//
if (options.reference_check) {
std::cout << "Verification on host...\n";
// Compute with reference implementation
cutlass::reference::host::Conv2dFprop<
ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementAccumulator
>(
problem_size,
tensor_a.host_ref(),
tensor_b.host_ref(),
tensor_c.host_ref(),
tensor_ref_d.host_ref(),
options.alpha,
options.beta
);
// Check if CUTLASS kernel and reference kernel produced the same output
tensor_d.sync_host();
bool passed = cutlass::reference::host::TensorEquals(
tensor_d.host_view(),
tensor_ref_d.host_view());
if (!passed) {
result.reference_check = cutlass::Status::kErrorInternal;
std::cout << "ERROR - results miscompared.\n";
}
else {
result.reference_check = cutlass::Status::kSuccess;
std::cout << "Passed.\n";
}
}
else {
result.reference_check = cutlass::Status::kInvalid;
}
if (options.save_workspace) {
std::stringstream ss;
ss << "16_ampere_workspace_conv2dfprop_"
<< options.input_size.n() << "x" << options.input_size.h() << "x" << options.input_size.w() << "x" << options.input_size.c()
<< "_"
<< options.filter_size.n() << "x" << options.filter_size.h() << "x" << options.filter_size.w() << "x" << options.filter_size.c()
<< ".dat";
std::ofstream output_workspace(ss.str());
output_workspace
<< "Input = \n" << tensor_a.host_view() << "\n\n"
<< "Filters = \n" << tensor_b.host_view() << "\n\n";
if (options.reference_check) {
output_workspace << "Reference = \n" << tensor_ref_d.host_view() << "\n\n";
}
output_workspace << "Computed = \n" << tensor_d.host_view() << std::endl;
std::cout << "Results written to '" << ss.str() << "'." << std::endl;
}
//
// Performance measurement
//
if (options.measure_performance) {
cudaEvent_t events[2];
for (auto & event : events) {
result.error = cudaEventCreate(&event);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
}
// Record an event at the start of a series of convolution operations.
result.error = cudaEventRecord(events[0]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Launch a sequence of implicit GEMM operations on the device.
for (int iteration = 0; iteration < options.iterations; ++iteration) {
result.status = implicit_gemm_op();
CUTLASS_CHECK(result.status);
}
// Record an event when the convolutions have been launched.
result.error = cudaEventRecord(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Wait for work on the device to complete.
result.error = cudaEventSynchronize(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Measure elapsed runtime.
float runtime_ms = 0;
result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Print average run time and floating-point throughput (Gflop/s).
result.runtime_ms = double(runtime_ms) / double(options.iterations);
result.gflops = options.gflops(result.runtime_ms / 1000.0);
// Cleanup
for (auto event : events) {
(void)cudaEventDestroy(event);
}
}
return result;
}
int main(int argc, char const **args) {
bool notSupported = false;
// Ampere Tensor Core operations exposed with mma.sync are first available in CUDA 11.0.
//
// CUTLASS must be compiled with CUDA 11 Toolkit to run Conv2dFprop examples.
if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0))) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
cudaDeviceProp props;
CUDA_CHECK(cudaGetDeviceProperties(&props, 0));
if (!(props.major >= 8)) {
std::cerr << "Ampere Tensor Ops must be run on a machine with compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
return 0;
}
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
if (options.benchmark) {
// Benchmark several layers
int batch_sizes[] = {1, 32, 64, 128, 256, 512};
struct Benchmark {
int h, w, c, k, r, s;
} layers[] = {
{56, 56, 64, 256, 1, 1},
{56, 56, 64, 64, 1, 1},
{56, 56, 64, 64, 3, 3},
{56, 56, 256, 64, 1, 1},
{56, 56, 256, 512, 1, 1},
{56, 56, 256, 128, 1, 1},
{28, 28, 128, 128, 3, 3},
{28, 28, 128, 512, 1, 1},
{28, 28, 512, 128, 1, 1},
{28, 28, 512, 1024, 1, 1},
{28, 28, 512, 256, 1, 1},
{14, 14, 256, 256, 3, 3},
{14, 14, 256, 1024, 1, 1},
{14, 14, 1024, 256, 1, 1},
{14, 14, 1024, 2048, 1, 1},
{14, 14, 1024, 512, 1, 1},
{7, 7, 512, 512, 3, 3},
};
Result::print_header(std::cout, options) << std::endl;
int idx = 1;
for (auto const &layer : layers) {
for (auto N : batch_sizes) {
options.update({N, layer.h, layer.w, layer.c}, {layer.k, layer.r, layer.s, layer.c});
Result result = profile_convolution(options);
result.print(std::cout, idx, options) << std::endl;
}
++idx;
}
}
else {
// Execute one problem size
if (!options.valid()) {
std::cerr << "Invalid problem." << std::endl;
return -1;
}
Result result = profile_convolution(options);
Result::print_header(std::cout, options) << std::endl;
result.print(std::cout, 1, options) << std::endl;
}
return 0;
}
| examples/16_ampere_tensorop_conv2dfprop/ampere_tensorop_conv2dfprop.cu/0 | {
"file_path": "examples/16_ampere_tensorop_conv2dfprop/ampere_tensorop_conv2dfprop.cu",
"repo_id": "examples",
"token_count": 10130
} | 6 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief GEMM Grouped Example.
This workload computes a batch of GEMM operations with distinct problem sizes. Pointers to matrices
in Global Memory are passed to the kernel in array (also held in Global Memory). Similarly,
leading dimensions and problem sizes are stored in arrays in GMEM.
This differs from "Batched Array" GEMM because the size of each GEMM problem in the Grouped GEMM
concept may be distinct.
This benchmark program initializes a workspace with random problem sizes for a given number of
groups. Command line options enable overriding M, N, and/or K dimensions with uniform values to
model problems more similar to the traditional batched GEMM.
Additionally, problem sizes are collected and binned to compute the same problem as a series of
conventional batched GEMMs (setup for this problem is not timed). This demonstrates the performance
enhancement achieved by implementing a specialized grouped GEMM kernel.
Examples:
# Runs a grouped GEMM with 100 random problem sizes
$ ./examples/24_gemm_grouped/24_gemm_grouped --groups=100
# Runs a grouped GEMM with 100 random problem sizes (with GEMM-K dimension equal to 1024)
$ ./examples/24_gemm_grouped/24_gemm_grouped --groups=100 --k=1024 --verbose=true
# Runs a grouped GEMM that is equivalent to a batched GEMM
$ ./examples/24_gemm_grouped/24_gemm_grouped --groups=100 --m=2048 --n=1024 --k=1024 --verbose=true
# Execute Grouped GEMM and profile with NSight
$ nv-nsight-cu-cli ./examples/24_gemm_grouped/24_gemm_grouped --m=256 --n=256 --k=256 --verbose=true \
--iterations=1 --reference-check=false
*/
/////////////////////////////////////////////////////////////////////////////////////////////////
#include <chrono>
#include <iostream>
#include <fstream>
#include <sstream>
#include <vector>
#include <map>
#include <unordered_map>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/kernel/gemm_grouped.h"
#include "cutlass/gemm/kernel/default_gemm_grouped.h"
#include "cutlass/gemm/device/gemm_grouped.h"
#include "cutlass/gemm/device/gemm_universal.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/device_memory.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/gemm_complex.h"
#include "cutlass/util/reference/device/gemm_complex.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/device/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_norm.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Result structure
struct Result {
double runtime_ms;
double initialization_time_ms;
double gflops;
cutlass::Status status;
cudaError_t error;
bool passed;
//
// Methods
//
Result(
double runtime_ms = 0,
double initialization_time_ms = 0,
double gflops = 0,
cutlass::Status status = cutlass::Status::kSuccess,
cudaError_t error = cudaSuccess
):
runtime_ms(runtime_ms), initialization_time_ms(initialization_time_ms), gflops(gflops),
status(status), error(error), passed(true) { }
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Hash function for cutlass::gemm::GemmCoord
struct HashGemmCoord {
size_t operator()(cutlass::gemm::GemmCoord const &problem) const {
std::hash<int> hasher;
return (hasher(problem.m() * 3)) ^ (hasher(1 + problem.n() * 5)) ^ (hasher(2 + problem.k() * 7));
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
bool error;
bool reference_check;
bool profile_initialization;
bool sort_problems;
std::vector<cutlass::gemm::GemmCoord> problem_sizes;
// problem size bins
std::unordered_map<
cutlass::gemm::GemmCoord,
std::vector<int32_t>,
HashGemmCoord> problem_bins;
int alignment;
int problem_count;
int iterations;
int cuda_streams;
bool verbose;
float alpha;
float beta;
std::string benchmark_path;
std::string output_tag;
std::ofstream output_file;
using GroupScheduleMode = cutlass::gemm::kernel::GroupScheduleMode;
std::vector<GroupScheduleMode> scheduler_modes;
std::unordered_map<std::string, GroupScheduleMode>
str_to_scheduler_mode = {
{"kDeviceOnly", GroupScheduleMode::kDeviceOnly},
{"kHostPrecompute", GroupScheduleMode::kHostPrecompute}
};
struct GroupScheduleModeHash {
size_t operator()(GroupScheduleMode m) const {
return static_cast<size_t>(m);
}
};
std::unordered_map<GroupScheduleMode, std::string, GroupScheduleModeHash>
scheduler_mode_to_str = {
{GroupScheduleMode::kDeviceOnly, "kDeviceOnly"},
{GroupScheduleMode::kHostPrecompute, "kHostPrecompute"}
};
std::vector<GroupScheduleMode> all_scheduler_modes = {GroupScheduleMode::kDeviceOnly, GroupScheduleMode::kHostPrecompute};
//
// Methods
//
Options():
help(false),
error(false),
alignment(8),
reference_check(true),
profile_initialization(false),
sort_problems(false),
problem_count(15),
iterations(20),
cuda_streams(0),
verbose(false),
alpha(1),
beta(),
scheduler_modes({GroupScheduleMode::kDeviceOnly})
{ }
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
return;
}
cmd.get_cmd_line_argument("alignment", alignment, 8);
cmd.get_cmd_line_argument("groups", problem_count, 15);
cmd.get_cmd_line_argument("alpha", alpha, 1.0f);
cmd.get_cmd_line_argument("beta", beta, 0.0f);
cmd.get_cmd_line_argument("iterations", iterations, 20);
cmd.get_cmd_line_argument("streams", cuda_streams, 0);
cmd.get_cmd_line_argument("verbose", verbose, false);
cmd.get_cmd_line_argument("reference-check", reference_check, true);
cmd.get_cmd_line_argument("profile-initialization", profile_initialization, false);
cmd.get_cmd_line_argument("sort-problems", sort_problems, false);
cmd.get_cmd_line_argument("benchmark", benchmark_path);
std::vector<std::string> scheduler_mode_strs;
cmd.get_cmd_line_arguments("scheduler-modes", scheduler_mode_strs);
if (!scheduler_mode_strs.empty()) {
scheduler_modes.clear();
if (scheduler_mode_strs.size() == 1 && scheduler_mode_strs[0] == "all") {
scheduler_modes = all_scheduler_modes;
} else {
for (std::string precomp_str : scheduler_mode_strs) {
auto it = str_to_scheduler_mode.find(precomp_str);
if (it != str_to_scheduler_mode.end()) {
scheduler_modes.push_back(it->second);
} else if (precomp_str == "all") {
std::cerr << "Flag --scheduler-modes=all must not contain other scheduler modes in list." << std::endl;
error = true;
return;
} else {
std::cerr << "Unrecognized scheduler mode '" << precomp_str << "'" << std::endl;
error = true;
return;
}
}
}
}
std::string output_path;
cmd.get_cmd_line_argument("tag", output_tag);
cmd.get_cmd_line_argument("output_file", output_path);
if (!output_path.empty()) {
std::ios_base::openmode open_mode = std::ios_base::out;
std::ifstream input_file(output_path.c_str());
if (input_file.good()) {
open_mode = std::ios_base::app;
input_file.close();
}
output_file.open(output_path.c_str(), open_mode);
if (output_file.good() && open_mode != std::ios_base::app) {
output_file << "Tag,Provider,Kind,Groups,Runtime,GFLOPs\n";
}
}
// Decide how to initialize the problems
if (!benchmark_path.empty()) {
if (!benchmark_problems()) {
error = true;
problem_sizes.clear();
return;
}
}
else {
randomize_problems(cmd);
}
// Post-process the problem sizes
bin_problems();
}
void randomize_problems(cutlass::CommandLine &cmd) {
//
// For now, randomly choose the problem sizes.
//
int cmd_line_m = -1;
int cmd_line_n = -1;
int cmd_line_k = -1;
cmd.get_cmd_line_argument("m", cmd_line_m);
cmd.get_cmd_line_argument("n", cmd_line_n);
cmd.get_cmd_line_argument("k", cmd_line_k);
problem_sizes.reserve(problem_count);
for (int i = 0; i < problem_count; ++i) {
int m = cmd_line_m;
int n = cmd_line_n;
int k = cmd_line_k;
if (m < 1) {
m = alignment * ((rand() % 256) + 1);
}
if (n < 1) {
n = alignment * ((rand() % 256) + 1);
}
if (k < 1) {
k = alignment * ((rand() % 256) + 1);
}
cutlass::gemm::GemmCoord problem(m, n, k);
problem_sizes.push_back(problem);
}
}
/// Load a benchmark
bool benchmark_problems() {
std::ifstream file(benchmark_path);
if (!file.good()) {
return false;
}
while (file.good()) {
int idx = -1;
std::string extent_str;
file >> idx >> extent_str;
if (idx < 0 || extent_str.empty()) {
break;
}
cutlass::gemm::GemmCoord extent;
std::vector<std::string> tokens;
cutlass::CommandLine::tokenize(tokens, extent_str, 'x');
for (int i = 0; i < int(tokens.size()); ++i) {
int x = std::atoi(tokens.at(i).c_str());
// round up
if (x % alignment) {
x += (alignment - (x % alignment));
}
extent.at(i) = x;
}
if (extent.product()) {
problem_sizes.push_back(extent);
}
}
return true;
}
/// Post processes the problems
void bin_problems() {
problem_bins.clear();
problem_count = int(problem_sizes.size());
//
// Insert the problem sizes into a sorted container class. This is *NOT* necessary
// to run the CUTLASS kernel, but it enables the execution of cublas's batched GEMM.
//
for (int i = 0; i < int(problem_sizes.size()); ++i) {
auto it = problem_bins.find(problem_sizes.at(i));
if (it == problem_bins.end()) {
problem_bins.insert({problem_sizes.at(i), std::vector<int32_t>({i}) });
}
else {
it->second.push_back(i);
}
}
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "24_gemm_grouped\n\n"
<< " This example profiles the performance of a 'grouped' GEMM kernel. This is similar to batched GEMM\n"
<< " in that multiple, independent GEMMs are computed by one grid launch. It differs in that each\n"
<< " 'group' may compute a unique problem size. Problem sizes and pointers to matrices are both stored\n"
<< " in device Global Memory and loaded by the kernel.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --benchmark=<str> Executes a benchmark problem size.\n"
<< " --output_file=<str> Path to a CSV file to output results. If it exists already, results are appended.\n"
<< " --tag=<str> String tag to prepend to the CSV file.\n"
<< " --groups=<int> Number of individual GEMM problems (default: --groups=15)\n"
<< " --m=<int> Sets the M dimension for all groups. Otherwise, it is selected randomly\n"
<< " --n=<int> Sets the N dimension for all groups. Otherwise, it is selected randomly\n"
<< " --k=<int> Sets the K dimension for all groups. Otherwise, it is selected randomly\n"
<< " --alpha=<f32> Epilogue scalar alpha (real part)\n"
<< " --beta=<f32> Epilogue scalar beta (real part)\n"
<< " --scheduler-modes=<str> List of scheduler modes to be profile for grouped GEMM scheduler (default: --scheduler_modes=kDeviceOnly)\n"
<< " --iterations=<int> Number of profiling iterations to perform.\n"
<< " --reference-check=<bool> If true, performs reference check.\n"
<< " --verbose=<bool> If true, prints problem sizes and batching structure.\n"
<< " --profile-initialization=<bool> If true, profiles the device-level kernel's initialization.\n"
<< " --sort-problems=<bool> If true, sorts problem sizes in descending order of GEMM-K dimension.\n";
out << "\n\nExamples:\n\n"
<< "# Runs a grouped GEMM with 100 random problem sizes\n"
<< "$ ./examples/24_gemm_grouped/24_gemm_grouped --groups=100\n\n"
<< "# Runs a grouped GEMM with 100 random problem sizes (with GEMM-K dimension equal to 1024)\n"
<< "$ ./examples/24_gemm_grouped/24_gemm_grouped --groups=100 --k=1024 --verbose=true\n\n"
<< "# Runs a grouped GEMM that is equivalent to a batched GEMM\n"
<< "$ ./examples/24_gemm_grouped/24_gemm_grouped --groups=100 --m=2048 --n=1024 --k=1024 --verbose=true\n\n"
<< "# Runs a grouped GEMM with each different scheduler mode\n"
<< "$ ./examples/24_gemm_grouped/24_gemm_grouped --scheduler-modes=all\n\n"
<< "# Runs a grouped GEMM with each different scheduler mode and profiles host-side initialization time\n"
<< "$ ./examples/24_gemm_grouped/24_gemm_grouped --scheduler-modes=all --profile-initialization=true\n\n"
<< "# Runs a grouped GEMM problem given an externally supplied benchmark file. This is a text file in which\n"
<< "# Each line contains a unique group index and an MxNxK triple indicating problemsize.\n"
<< "#\n"
<< "# For example, assume the following are the contents of 'problems.txt'\n"
<< "#\n"
<< "# 0 1024x256x520\n"
<< "# 1 520x264x1024\n"
<< "# 2 96x48x1024\n"
<< "#\n"
<< "$ ./examples/24_gemm_grouped/24_gemm_grouped --benchmark=problems.txt\n\n"
<< "# Execute Grouped GEMM and profile with NSight\n"
<< "$ nv-nsight-cu-cli ./examples/24_gemm_grouped/24_gemm_grouped --m=256 --n=256 --k=256 --verbose=true --iterations=1 --reference-check=false\n\n";
return out;
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of real-valued multiply-adds
int64_t fmas = int64_t();
for (auto const & problem : problem_sizes) {
fmas += problem.product();
}
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Gemm>
class BaseTestbed {
public:
//
// Type definitions
//
using ElementA = typename Gemm::ElementA;
using ElementB = typename Gemm::ElementB;
using ElementC = typename Gemm::ElementC;
using ElementAccumulator = typename Gemm::ElementAccumulator;
using EpilogueOutputOp = typename Gemm::GemmKernel::Epilogue::OutputOp;
using ElementCompute = typename EpilogueOutputOp::ElementCompute;
using LayoutA = typename Gemm::LayoutA;
using LayoutB = typename Gemm::LayoutB;
using LayoutC = typename Gemm::LayoutC;
using MatrixCoord = typename LayoutC::TensorCoord;
//
// Data members
//
Options & options;
/// Initialization
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::Distribution::Kind init_C;
uint32_t seed;
cutlass::DeviceAllocation<cutlass::gemm::GemmCoord> problem_sizes_device;
std::vector<int64_t> offset_A;
std::vector<int64_t> offset_B;
std::vector<int64_t> offset_C;
std::vector<int64_t> offset_D;
std::vector<int64_t> lda_host;
std::vector<int64_t> ldb_host;
std::vector<int64_t> ldc_host;
std::vector<int64_t> ldd_host;
cutlass::DeviceAllocation<int64_t> lda;
cutlass::DeviceAllocation<int64_t> ldb;
cutlass::DeviceAllocation<int64_t> ldc;
cutlass::DeviceAllocation<int64_t> ldd;
cutlass::DeviceAllocation<ElementA> block_A;
cutlass::DeviceAllocation<ElementB> block_B;
cutlass::DeviceAllocation<ElementC> block_C;
cutlass::DeviceAllocation<ElementC> block_D;
cutlass::DeviceAllocation<ElementA *> ptr_A;
cutlass::DeviceAllocation<ElementB *> ptr_B;
cutlass::DeviceAllocation<ElementC *> ptr_C;
cutlass::DeviceAllocation<ElementC *> ptr_D;
BaseTestbed(
Options &options_,
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint32_t seed_ = 3080
):
options(options_), init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { }
int problem_count() const {
return options.problem_count;
}
/// Helper to initialize a tensor view
template <typename Element>
void initialize_tensor(
Element *ptr,
size_t capacity,
cutlass::Distribution::Kind dist_kind,
uint32_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
Element scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
int bits_output = cutlass::sizeof_bits<typename Gemm::ElementC>::value;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
} else if (bits_input <= 8) {
scope_max = 2;
scope_min = -2;
} else if (bits_output == 16) {
if (cutlass::sizeof_bits<ElementAccumulator>::value <= 16) {
scope_max = 5;
scope_min = -5;
}
else {
scope_max = 8;
scope_min = -8;
}
} else {
scope_max = 8;
scope_min = -8;
}
cutlass::reference::device::BlockFillRandomUniform(
ptr, capacity, seed, scope_max, scope_min, 0);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::device::BlockFillRandomGaussian(
ptr, capacity, seed, Element(), Element(0.5f));
}
else if (dist_kind == cutlass::Distribution::Sequential) {
// Fill with increasing elements
cutlass::reference::device::BlockFillSequential(
ptr, capacity, Element(1), Element());
}
else {
// Fill with all 1s
cutlass::reference::device::BlockFillSequential(
ptr, capacity, Element(), Element(1));
}
}
/// Allocates device-side data
void allocate() {
int64_t total_elements_A = 0;
int64_t total_elements_B = 0;
int64_t total_elements_C = 0;
int64_t total_elements_D = 0;
lda_host.resize(problem_count());
ldb_host.resize(problem_count());
ldc_host.resize(problem_count());
ldd_host.resize(problem_count());
for (int32_t i = 0; i < problem_count(); ++i) {
auto problem = options.problem_sizes.at(i);
lda_host.at(i) = LayoutA::packed({problem.m(), problem.k()}).stride(0);
ldb_host.at(i) = LayoutB::packed({problem.k(), problem.n()}).stride(0);
ldc_host.at(i) = LayoutC::packed({problem.m(), problem.n()}).stride(0);
ldd_host.at(i) = LayoutC::packed({problem.m(), problem.n()}).stride(0);
offset_A.push_back(total_elements_A);
offset_B.push_back(total_elements_B);
offset_C.push_back(total_elements_C);
offset_D.push_back(total_elements_D);
int64_t elements_A = problem.m() * problem.k();
int64_t elements_B = problem.k() * problem.n();
int64_t elements_C = problem.m() * problem.n();
int64_t elements_D = problem.m() * problem.n();
total_elements_A += elements_A;
total_elements_B += elements_B;
total_elements_C += elements_C;
total_elements_D += elements_D;
}
lda.reset(problem_count());
ldb.reset(problem_count());
ldc.reset(problem_count());
ldd.reset(problem_count());
block_A.reset(total_elements_A);
block_B.reset(total_elements_B);
block_C.reset(total_elements_C);
block_D.reset(total_elements_D);
}
/// Initializes device-side data
void initialize() {
problem_sizes_device.reset(problem_count());
problem_sizes_device.copy_from_host(options.problem_sizes.data());
lda.copy_from_host(lda_host.data());
ldb.copy_from_host(ldb_host.data());
ldc.copy_from_host(ldc_host.data());
ldd.copy_from_host(ldd_host.data());
//
// Assign pointers
//
std::vector<ElementA *> ptr_A_host(problem_count());
std::vector<ElementB *> ptr_B_host(problem_count());
std::vector<ElementC *> ptr_C_host(problem_count());
std::vector<ElementC *> ptr_D_host(problem_count());
for (int32_t i = 0; i < problem_count(); ++i) {
ptr_A_host.at(i) = block_A.get() + offset_A.at(i);
ptr_B_host.at(i) = block_B.get() + offset_B.at(i);
ptr_C_host.at(i) = block_C.get() + offset_C.at(i);
ptr_D_host.at(i) = block_D.get() + offset_D.at(i);
}
ptr_A.reset(problem_count());
ptr_A.copy_from_host(ptr_A_host.data());
ptr_B.reset(problem_count());
ptr_B.copy_from_host(ptr_B_host.data());
ptr_C.reset(problem_count());
ptr_C.copy_from_host(ptr_C_host.data());
ptr_D.reset(problem_count());
ptr_D.copy_from_host(ptr_D_host.data());
//
// Initialize the problems of the workspace
//
initialize_tensor(block_A.get(), block_A.size(), init_A, seed * 2021);
initialize_tensor(block_B.get(), block_B.size(), init_B, seed * 2022);
initialize_tensor(block_C.get(), block_C.size(), init_C, seed * 2023);
cutlass::reference::device::BlockFillSequential(
block_D.get(), block_D.size(), ElementC(), ElementC());
}
/// Verifies the result is a GEMM
bool verify() {
bool passed = true;
for (int32_t i = 0; i < problem_count(); ++i) {
cutlass::gemm::GemmCoord problem = options.problem_sizes.at(i);
LayoutA layout_A(lda_host.at(i));
LayoutB layout_B(ldb_host.at(i));
LayoutC layout_C(ldc_host.at(i));
LayoutC layout_D(ldd_host.at(i));
MatrixCoord extent_A{problem.m(), problem.k()};
MatrixCoord extent_B{problem.k(), problem.n()};
MatrixCoord extent_C{problem.m(), problem.n()};
cutlass::TensorView<ElementA, LayoutA> view_A(block_A.get() + offset_A.at(i), layout_A, extent_A);
cutlass::TensorView<ElementB, LayoutB> view_B(block_B.get() + offset_B.at(i), layout_B, extent_B);
cutlass::TensorView<ElementC, LayoutC> view_C(block_C.get() + offset_C.at(i), layout_C, extent_C);
cutlass::DeviceAllocation<ElementC> block_Ref(layout_D.capacity(extent_C));
cutlass::TensorView<ElementC, LayoutC> view_Ref_device(block_Ref.get(), layout_D, extent_C);
// Reference GEMM
cutlass::reference::device::GemmComplex<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementCompute, ElementAccumulator
>(
problem,
options.alpha,
view_A,
Gemm::kTransformA,
view_B,
Gemm::kTransformB,
options.beta,
view_C,
view_Ref_device,
ElementAccumulator(0)
);
// Copy to host memory
std::vector<ElementC> matrix_D(layout_D.capacity(extent_C));
std::vector<ElementC> matrix_Ref(layout_D.capacity(extent_C));
cutlass::device_memory::copy_to_host(matrix_D.data(), block_D.get() + offset_D.at(i), matrix_D.size());
cutlass::device_memory::copy_to_host(matrix_Ref.data(), block_Ref.get(), matrix_D.size());
cutlass::TensorView<ElementC, LayoutC> view_D( matrix_D.data(), layout_D, extent_C);
cutlass::TensorView<ElementC, LayoutC> view_Ref(matrix_Ref.data(), layout_D, extent_C);
// Reference check
passed = cutlass::reference::host::TensorEquals(view_D, view_Ref);
if (!passed) {
std::cerr << "\n***\nError - problem " << i << " failed the QA check\n***\n" << std::endl;
return passed;
}
}
return passed;
}
};
template <typename Gemm>
class TestbedBatched : BaseTestbed<Gemm> {
public:
TestbedBatched(
Options &options_,
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint32_t seed_ = 3080
): BaseTestbed<Gemm>(options_, init_A_, init_B_, init_C_, seed_) {}
void print_problem_sizes() {
std::cout << std::endl;
size_t bin_idx = 0;
size_t problem_count_check = 0;
std::cout << "Conventionally executed as " << this->options.problem_bins.size() << " batched GEMMs:\n";
for (auto const & bin : this->options.problem_bins) {
std::cout << " [" << bin_idx << "]: "
<< bin.first.m() << "-by-" << bin.first.n() << "-by-" << bin.first.k()
<< ", batch count: " << bin.second.size() << "\n";
++bin_idx;
problem_count_check += bin.second.size();
}
if (problem_count_check != size_t(this->problem_count())) {
std::cout << "\n***\nERROR in BINNING LOGIC!\n***\n" << std::endl;
}
std::cout << std::endl;
}
/// Executes a batched kernel and measures runtime
Result profile() {
std::cout << "Batched GEMM:\n"
<< "====================================================" << std::endl;
Result result;
result.passed = false;
// Initialize the problem
this->allocate();
this->initialize();
if (this->options.verbose) {
print_problem_sizes();
}
//
// Prepare batched GEMM environment
//
int32_t effective_streams = (this->options.cuda_streams ? this->options.cuda_streams : 1);
// Array of leading dimensions used by batched GEMM calls
std::vector<cutlass::gemm::GemmCoord> bin_problem_sizes;
std::vector<int32_t> bin_count;
std::vector<int32_t> bin_ldm_A;
std::vector<int32_t> bin_ldm_B;
std::vector<int32_t> bin_ldm_C;
std::vector<int32_t> bin_start;
std::vector<void const *> ptr_A_batched_host;
std::vector<void const *> ptr_B_batched_host;
std::vector<void *> ptr_C_batched_host;
for (auto const & bin : this->options.problem_bins) {
int first_idx = bin.second.front();
bin_problem_sizes.push_back(this->options.problem_sizes.at(first_idx));
bin_count.push_back(int32_t(bin.second.size()));
bin_ldm_A.push_back(static_cast<int32_t>(this->lda_host.at(first_idx)));
bin_ldm_B.push_back(static_cast<int32_t>(this->ldb_host.at(first_idx)));
bin_ldm_C.push_back(static_cast<int32_t>(this->ldc_host.at(first_idx)));
if (ptr_A_batched_host.size() % 2) {
ptr_A_batched_host.push_back(nullptr);
ptr_B_batched_host.push_back(nullptr);
ptr_C_batched_host.push_back(nullptr);
}
bin_start.push_back(int32_t(ptr_A_batched_host.size()));
for (int idx : bin.second) {
if (bin_problem_sizes.back() != this->options.problem_sizes.at(idx)) {
std::cerr << "Error - failed to group problems.\n";
return result;
}
if (bin_ldm_A.back() != this->lda_host.at(idx)) {
std::cerr << "Error - failed to group problems.\n";
return result;
}
if (bin_ldm_B.back() != this->ldb_host.at(idx)) {
std::cerr << "Error - failed to group problems.\n";
return result;
}
if (bin_ldm_C.back() != this->ldc_host.at(idx)) {
std::cerr << "Error - failed to group problems.\n";
return result;
}
ptr_A_batched_host.push_back(this->block_A.get() + this->offset_A.at(idx));
ptr_B_batched_host.push_back(this->block_B.get() + this->offset_B.at(idx));
ptr_C_batched_host.push_back(this->block_D.get() + this->offset_C.at(idx));
}
}
// Array of GMEM pointers used by batched array GEMM calls
cutlass::DeviceAllocation<void const *> ptr_A_batched;
cutlass::DeviceAllocation<void const *> ptr_B_batched;
cutlass::DeviceAllocation<void *> ptr_C_batched;
ptr_A_batched.reset(ptr_A_batched_host.size());
ptr_B_batched.reset(ptr_A_batched_host.size());
ptr_C_batched.reset(ptr_A_batched_host.size());
ptr_A_batched.copy_from_host(ptr_A_batched_host.data());
ptr_B_batched.copy_from_host(ptr_B_batched_host.data());
ptr_C_batched.copy_from_host(ptr_C_batched_host.data());
//
// Create CUDA streams to maximize concurrency of batched-array GEMM kernels
//
std::vector<cudaStream_t> cuda_streams;
//
// Warmup run
//
if (this->options.cuda_streams) {
for (int i = 0; i < this->options.cuda_streams; ++i) {
cudaStream_t stream;
result.error = cudaStreamCreate(&stream);
if (result.error != cudaSuccess) {
std::cerr << "Failed to create CUDA stream." << std::endl;
return result;
}
cuda_streams.push_back(stream);
}
}
else {
cuda_streams.push_back(nullptr);
}
// Use 'D' for the in/out workspace
this->block_D.copy_from_device(this->block_C.get());
for (int bin_idx = 0; bin_idx < int32_t(bin_problem_sizes.size()); ++bin_idx) {
cutlass::gemm::GemmCoord const & problem = bin_problem_sizes[bin_idx];
int32_t batch_count = bin_count[bin_idx];
int32_t bin_start_idx = bin_start[bin_idx];
int32_t lda = bin_ldm_A[bin_idx];
int32_t ldb = bin_ldm_B[bin_idx];
int32_t ldc = bin_ldm_C[bin_idx];
void const ** ptr_A_array = ptr_A_batched.get() + bin_start[bin_idx];
void const ** ptr_B_array = ptr_B_batched.get() + bin_start[bin_idx];
void ** ptr_C_array = ptr_C_batched.get() + bin_start[bin_idx];
//
// Initialize the CUTLASS GEMM operator
//
// Configure the GEMM arguments
typename Gemm::EpilogueOutputOp::Params epilogue_op(this->options.alpha, this->options.beta);
typename Gemm::Arguments arguments{
cutlass::gemm::GemmUniversalMode::kArray,
problem,
batch_count,
epilogue_op,
(void const *)ptr_A_array,
(void const *)ptr_B_array,
(void const *)ptr_C_array,
(void *)ptr_C_array,
int64_t(),
int64_t(),
int64_t(),
int64_t(),
int64_t(lda),
int64_t(ldb),
int64_t(ldc),
int64_t(ldc)
};
Gemm gemm_op;
cutlass::Status status = gemm_op.initialize(arguments);
if (status != cutlass::Status::kSuccess) {
std::cerr << "CUTLASS error on line " << __LINE__ << std::endl;
return result;
}
status = gemm_op();
if (status != cutlass::Status::kSuccess) {
std::cerr << "CUTLASS error on line " << __LINE__ << std::endl;
return result;
}
}
//
// Wait for completion
//
result.error = cudaDeviceSynchronize();
if (result.error != cudaSuccess) {
std::cerr << "Kernel execution error: " << cudaGetErrorString(result.error);
return result;
}
//
// Construct events
//
cudaEvent_t events[2];
for (auto & event : events) {
result.error = cudaEventCreate(&event);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl;
return -1;
}
}
//
// Wait for completion
//
result.error = cudaDeviceSynchronize();
if (result.error != cudaSuccess) {
std::cerr << "Kernel execution error: " << cudaGetErrorString(result.error);
return result;
}
// Record an event at the start of a series of GEMM operations
result.error = cudaEventRecord(events[0]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
//
// Run profiling loop
//
int last_stream_idx = 0;
for (int iter = 0; iter < this->options.iterations; ++iter) {
for (int bin_idx = 0; bin_idx < int32_t(bin_problem_sizes.size()); ++bin_idx) {
cutlass::gemm::GemmCoord const & problem = bin_problem_sizes[bin_idx];
int32_t batch_count = bin_count[bin_idx];
int32_t bin_start_idx = bin_start[bin_idx];
int32_t lda = bin_ldm_A[bin_idx];
int32_t ldb = bin_ldm_B[bin_idx];
int32_t ldc = bin_ldm_C[bin_idx];
void const ** ptr_A_array = ptr_A_batched.get() + bin_start[bin_idx];
void const ** ptr_B_array = ptr_B_batched.get() + bin_start[bin_idx];
void ** ptr_C_array = ptr_C_batched.get() + bin_start[bin_idx];
last_stream_idx = (bin_idx % effective_streams);
//
// Initialize the CUTLASS GEMM operator
//
// Configure the GEMM arguments
typename Gemm::EpilogueOutputOp::Params epilogue_op(this->options.alpha, this->options.beta);
typename Gemm::Arguments arguments{
cutlass::gemm::GemmUniversalMode::kArray,
problem,
batch_count,
epilogue_op,
(void const *)ptr_A_array,
(void const *)ptr_B_array,
(void const *)ptr_C_array,
(void *)ptr_C_array,
int64_t(),
int64_t(),
int64_t(),
int64_t(),
int64_t(lda),
int64_t(ldb),
int64_t(ldc),
int64_t(ldc)
};
Gemm gemm_op;
cutlass::Status status = gemm_op.initialize(arguments);
if (status != cutlass::Status::kSuccess) {
std::cerr << "CUTLASS error on line " << __LINE__ << std::endl;
return result;
}
status = gemm_op(cuda_streams[last_stream_idx]);
if (status != cutlass::Status::kSuccess) {
std::cerr << "CUTLASS error on line " << __LINE__ << std::endl;
return result;
}
}
}
//
// Stop profiling loop
//
// Record an event when the GEMM operations have been launched.
result.error = cudaEventRecord(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
//
// Wait for work to be completed
//
result.error = cudaDeviceSynchronize();
if (result.error != cudaSuccess) {
std::cerr << "Kernel execution error: " << cudaGetErrorString(result.error);
return result;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Compute average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(this->options.iterations);
result.gflops = this->options.gflops(result.runtime_ms / 1000.0);
//
// Cleanup
//
for (auto event : events) {
(void)cudaEventDestroy(event);
}
for (auto stream : cuda_streams) {
if (stream) {
(void)cudaStreamDestroy(stream);
}
}
std::cout << " " << this->options.problem_bins.size() << " batched GEMMs launched" << std::endl;
std::cout << std::endl;
std::cout << " " << "Batched Runtime: " << result.runtime_ms << " ms" << std::endl;
std::cout << " " << "Batched GFLOPs: " << result.gflops << std::endl;
std::string provider = "CUTLASS";
if (this->options.output_file.good()) {
this->options.output_file << this->options.output_tag << "," << provider << ",batched,"
<< this->options.problem_count << "," << result.runtime_ms << "," << result.gflops << std::endl;
}
result.passed = true;
return result;
}
};
template <typename Gemm_, cutlass::gemm::kernel::GroupScheduleMode GroupScheduleMode_>
class TestbedGrouped : BaseTestbed<Gemm_> {
public:
TestbedGrouped(
Options &options_,
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint32_t seed_ = 3080
): BaseTestbed<Gemm_>(options_, init_A_, init_B_, init_C_, seed_) {}
// Redefine GEMM with different GroupScheduleMode_
using GemmKernel = typename cutlass::gemm::kernel::DefaultGemmGrouped<
typename Gemm_::ElementA,
typename Gemm_::LayoutA,
Gemm_::kTransformA,
Gemm_::kAlignmentA,
typename Gemm_::ElementB,
typename Gemm_::LayoutB,
Gemm_::kTransformB,
Gemm_::kAlignmentB,
typename Gemm_::ElementC,
typename Gemm_::LayoutC,
typename Gemm_::ElementAccumulator,
typename Gemm_::OperatorClass,
typename Gemm_::ArchTag,
typename Gemm_::ThreadblockShape,
typename Gemm_::WarpShape,
typename Gemm_::InstructionShape,
typename Gemm_::EpilogueOutputOp,
typename Gemm_::ThreadblockSwizzle,
Gemm_::kStages,
GroupScheduleMode_>::GemmKernel;
using Gemm = cutlass::gemm::device::GemmGrouped<GemmKernel>;
/// Verbose printing of problem sizes
void print_problem_sizes() {
std::cout << std::endl;
// Print groups
std::cout << this->problem_count() << " groups:\n";
int32_t idx = 0;
int64_t total_tiles = 0;
for (auto const & problem : this->options.problem_sizes) {
int tiles = Gemm::problem_tile_count(problem);
total_tiles += tiles;
std::cout << " [" << idx << "]: "
<< problem.m() << "-by-" << problem.n() << "-by-" << problem.k()
<< " (" << tiles << " threadblock tiles)" << "\n";
++idx;
}
std::cout << std::endl;
}
/// Sort problems in descending order of problem-K dimension
void sort_problems() {
Gemm::sort_problems(this->options.problem_count,
this->options.problem_sizes.data(),
this->lda_host.data(),
this->ldb_host.data(),
this->ldc_host.data(),
this->ldd_host.data(),
this->offset_A.data(),
this->offset_B.data(),
this->offset_C.data(),
this->offset_D.data());
}
/// Executes a grouped kernel and measures runtime
Result profile() {
std::string sched_mode = this->options.scheduler_mode_to_str.find(GroupScheduleMode_)->second;
std::cout << std::endl;
std::cout << "Grouped GEMM (CUTLASS) with mode " << sched_mode << ":\n"
<< "====================================================" << std::endl;
Result result;
int threadblock_count = Gemm::sufficient(this->options.problem_sizes.data(), this->options.problem_count);
// Early exit
if (!threadblock_count) {
std::cout << "Active CUDA device lacks hardware resources to run CUTLASS Grouped GEMM kernel." << std::endl;
return result;
}
result.passed = false;
// Initialize the problem
this->allocate();
if (this->options.sort_problems) {
sort_problems();
}
this->initialize();
if (this->options.verbose) {
print_problem_sizes();
}
// Configure the GEMM arguments
typename Gemm::EpilogueOutputOp::Params epilogue_op(this->options.alpha, this->options.beta);
// Configure GEMM arguments
typename Gemm::Arguments args(
this->problem_sizes_device.get(),
this->problem_count(),
threadblock_count,
epilogue_op,
this->ptr_A.get(),
this->ptr_B.get(),
this->ptr_C.get(),
this->ptr_D.get(),
this->lda.get(),
this->ldb.get(),
this->ldc.get(),
this->ldd.get(),
this->options.problem_sizes.data()
);
// Initialize the GEMM object
Gemm gemm;
size_t workspace_size = gemm.get_workspace_size(args);
cutlass::DeviceAllocation<uint8_t> workspace(workspace_size);
result.status = gemm.initialize(args, workspace.get());
if (result.status != cutlass::Status::kSuccess) {
std::cerr << "Failed to initialize CUTLASS Grouped GEMM kernel." << std::endl;
return result;
}
// Run the grouped GEMM object
result.status = gemm.run();
if (result.status != cutlass::Status::kSuccess) {
std::cerr << "Failed to run CUTLASS Grouped GEMM kernel." << std::endl;
return result;
}
// Wait for completion
result.error = cudaDeviceSynchronize();
if (result.error != cudaSuccess) {
std::cerr << "Kernel execution error: " << cudaGetErrorString(result.error);
return result;
}
//
// Verify correctness
//
result.passed = true;
if (this->options.reference_check) {
result.passed = this->verify();
}
//
// Warm-up run of the grouped GEMM object
//
result.status = gemm.run();
if (result.status != cutlass::Status::kSuccess) {
std::cerr << "Failed to run CUTLASS Grouped GEMM kernel." << std::endl;
return result;
}
//
// Construct events
//
cudaEvent_t events[2];
for (auto & event : events) {
result.error = cudaEventCreate(&event);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl;
return -1;
}
}
// Record an event at the start of a series of GEMM operations
result.error = cudaEventRecord(events[0]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
//
// Run profiling loop
//
for (int iter = 0; iter < this->options.iterations; ++iter) {
gemm();
}
//
// Stop profiling loop
//
// Record an event when the GEMM operations have been launched.
result.error = cudaEventRecord(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Wait for work on the device to complete.
result.error = cudaEventSynchronize(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Compute average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(this->options.iterations);
result.gflops = this->options.gflops(result.runtime_ms / 1000.0);
//
// Cleanup
//
for (auto event : events) {
(void)cudaEventDestroy(event);
}
// Optionally profile initialization
if (this->options.profile_initialization) {
// Warm up
gemm.initialize(args, workspace.get());
auto start_time = std::chrono::high_resolution_clock::now();
for (int32_t i = 0; i < this->options.iterations; ++i) {
gemm.initialize(args, workspace.get());
}
auto end_time = std::chrono::high_resolution_clock::now();
std::chrono::duration<double, std::milli> duration = end_time - start_time;
duration /= double(this->options.iterations);
result.initialization_time_ms = duration.count();
}
int64_t total_tiles = Gemm::group_tile_count(args);
std::cout << " " << total_tiles << " total threadblock tiles." << std::endl;
std::cout << std::endl;
std::cout << " " << "Grouped Runtime: " << result.runtime_ms << " ms" << std::endl;
std::cout << " " << "Grouped GFLOPs: " << result.gflops << std::endl;
if (this->options.profile_initialization) {
std::cout << " " << "Init Runtime: " << result.initialization_time_ms << " ms" << std::endl;
}
if (this->options.output_file.good()) {
this->options.output_file << this->options.output_tag << ",CUTLASS,grouped-" << sched_mode << ","
<< this->options.problem_count << "," << result.runtime_ms << "," << result.gflops << std::endl;
}
std::cout << "\nPassed\n";
return result;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char const **args) {
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl;
return -1;
}
if (__CUDACC_VER_MAJOR__ < 11 || props.major < 8) {
//
// This example requires an NVIDIA Ampere-architecture GPU.
//
std::cout
<< "CUTLASS's Grouped GEMM example requires a GPU of NVIDIA's Ampere Architecture or "
<< "later (compute capability 80 or greater).\n";
return 0;
}
//
// Parse options
//
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
if (options.error) {
std::cerr << "Aborting execution." << std::endl;
return -1;
}
//
// Define the Grouped and Batched GEMM types
//
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementOutput = cutlass::half_t;
using ElementAccumulator = float;
using LayoutA = cutlass::layout::ColumnMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
// Gemm operator cutlass_tensorop_f16_s16816gemm_f16_128x128_32x4_nt_align8
using GemmBatched = cutlass::gemm::device::GemmUniversal<
ElementA, LayoutA,
ElementB, LayoutB,
ElementOutput, LayoutC,
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<128, 128, 32>,
cutlass::gemm::GemmShape<64, 64, 32>,
cutlass::gemm::GemmShape<16, 8, 16>,
cutlass::epilogue::thread::LinearCombination<
ElementOutput,
128 / cutlass::sizeof_bits<ElementOutput>::value,
ElementAccumulator,
ElementAccumulator
>,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>,
4
>;
// Define a grouped GEMM kernel with all template parameters set except
// for scheduling mode. This will be used as the template for all scheduling
// modes executed.
using GemmKernel = typename cutlass::gemm::kernel::DefaultGemmGrouped<
ElementA,
LayoutA,
cutlass::ComplexTransform::kNone,
8,
ElementB,
LayoutB,
cutlass::ComplexTransform::kNone,
8,
ElementOutput, LayoutC,
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<128, 128, 32>,
cutlass::gemm::GemmShape<64, 64, 32>,
cutlass::gemm::GemmShape<16, 8, 16>,
cutlass::epilogue::thread::LinearCombination<
ElementOutput, 128 / cutlass::sizeof_bits<ElementOutput>::value,
ElementAccumulator, ElementAccumulator>,
// NOTE: Threadblock swizzling is currently not supported by CUTLASS's grouped kernels.
// This parameter is passed in at present to match the APIs of other kernels. The parameter
// is unused within the kernel.
cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle,
4>::GemmKernel;
using GemmGrouped = cutlass::gemm::device::GemmGrouped<GemmKernel>;
//
// Profile it
//
TestbedBatched<GemmBatched> testbed_batched(options);
Result result = testbed_batched.profile();
if (result.error) {
return 1;
}
using GroupScheduleMode = cutlass::gemm::kernel::GroupScheduleMode;
for (GroupScheduleMode mode : options.scheduler_modes) {
Result result;
switch (mode) {
case GroupScheduleMode::kDeviceOnly:
{
TestbedGrouped<GemmGrouped, GroupScheduleMode::kDeviceOnly> runner(options);
result = runner.profile();
break;
}
case GroupScheduleMode::kHostPrecompute:
{
TestbedGrouped<GemmGrouped, GroupScheduleMode::kHostPrecompute> runner(options);
result = runner.profile();
break;
}
}
if (result.error != cudaSuccess) {
return 1;
}
// Override verbose flag to avoid printing duplicate information for each scheduling mode
options.verbose = false;
}
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/24_gemm_grouped/gemm_grouped.cu/0 | {
"file_path": "examples/24_gemm_grouped/gemm_grouped.cu",
"repo_id": "examples",
"token_count": 20904
} | 7 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <float.h>
#include <stdio.h>
#include <cmath>
////////////////////////////////////////////////////////////////////////////////
// Debugging functions
////////////////////////////////////////////////////////////////////////////////
// Nans & inf detection
#define NANCHECK(frag) \
{ \
for (size_t _i = 0; _i < frag.size(); ++_i) { \
assert(std::isfinite(float(frag[_i]))); \
assert(!std::isnan(float(frag[_i]))); \
} \
}
// Print on the first thread of the first block
#if 1
#define PRINT_WARP_ID 0
#define PRINT_LANE_ID 0
#define PRINT_B0_T0(msg, ...) \
if (blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && \
threadIdx.x == PRINT_LANE_ID && threadIdx.y == PRINT_WARP_ID && \
threadIdx.z == 0) { \
printf(msg "\n", ##__VA_ARGS__); \
}
#define PRINT_T0(msg, ...) \
if (threadIdx.x == PRINT_LANE_ID && threadIdx.y == PRINT_WARP_ID && \
threadIdx.z == 0) { \
printf(msg "\n", ##__VA_ARGS__); \
}
#define PRINT_TX_LX(msg, ...) \
for (int bx = 0; bx < gridDim.x; ++bx) { \
for (int by = 0; by < gridDim.y; ++by) { \
for (int bz = 0; bz < gridDim.z; ++bz) { \
for (int tx = 0; tx < blockDim.x; ++tx) { \
for (int ty = 0; ty < blockDim.y; ++ty) { \
for (int tz = 0; tz < blockDim.z; ++tz) { \
__syncthreads(); \
if (blockIdx.x == bx && blockIdx.y == by && blockIdx.z == bz && \
threadIdx.x == tx && threadIdx.y == ty && \
threadIdx.z == tz) { \
printf( \
"[%d,%d,%d][%d,%d,%d]" msg "\n", \
bx, \
by, \
bz, \
tx, \
ty, \
tz, \
##__VA_ARGS__); \
} \
} \
} \
} \
} \
} \
}
#else
#define PRINT_B0_T0
#define PRINT_TX_LX
#endif
struct __string_view {
char const* data;
std::size_t size;
};
#if __cplusplus >= 201402L
template <class T>
constexpr __string_view __get_type_name() {
char const* p = __PRETTY_FUNCTION__;
while (*p++ != '=')
;
for (; *p == ' '; ++p)
;
char const* p2 = p;
int count = 1;
for (;; ++p2) {
switch (*p2) {
case '[':
++count;
break;
case ']':
--count;
if (!count)
return {p, std::size_t(p2 - p)};
}
}
return {};
}
#else
template <class T>
constexpr __string_view __get_type_name() {
return {"unsupported", 11};
}
#endif
// Print a given array
#define PRINT_ACCUM8_T0_L0_START(name, accum, start) \
PRINT_B0_T0( \
"%s[%d:%d] - {%f, %f, %f, %f, %f, %f, %f, %f}", \
name, \
int(start), \
int(start + 8), \
float(accum[start + 0]), \
float(accum[start + 1]), \
float(accum[start + 2]), \
float(accum[start + 3]), \
float(accum[start + 4]), \
float(accum[start + 5]), \
float(accum[start + 6]), \
float(accum[start + 7]));
#define PRINT_ACCUM8_T0_L0(name, accum) PRINT_ACCUM8_T0_L0_START(name, accum, 0)
#define PRINT_FRAG_T0_L0(name, frag) \
{ \
auto typeStr = __get_type_name<decltype(frag)>(); \
PRINT_B0_T0("printing %s (%s)", name, typeStr.data); \
for (size_t _start = 0; _start < frag.size(); _start += 8) { \
PRINT_ACCUM8_T0_L0_START(" ", frag, _start); \
} \
/*__syncthreads(); \
NANCHECK(frag); */ \
}
#define PRINT_ARRAY_T0_L0_INCR(name, array, length, incr) \
{ \
PRINT_B0_T0("printing %s (len=%d)", name, int(length)); \
for (int _start = 0; _start < length; _start += incr) { \
PRINT_ACCUM8_T0_L0_START(" ", array, _start); \
} \
}
#define PRINT_ARRAY_T0_L0(name, array, length) \
PRINT_ARRAY_T0_L0_INCR(name, array, length, 8)
// Print a 4x4 matrix
#define PRINT_TENSOR4x4_T0_L0_START(name, ref, start_x, start_y) \
PRINT_B0_T0( \
"%s[%d:%d, %d:%d]:\n %f, %f, %f, %f\n %f, %f, %f, %f\n %f, %f, %f, %f\n %f, %f, %f, %f", \
name, \
int(start_x), \
int(start_x + 4), \
int(start_y), \
int(start_y + 4), \
float(ref.at({start_x + 0, start_y + 0})), \
float(ref.at({start_x + 0, start_y + 1})), \
float(ref.at({start_x + 0, start_y + 2})), \
float(ref.at({start_x + 0, start_y + 3})), \
float(ref.at({start_x + 1, start_y + 0})), \
float(ref.at({start_x + 1, start_y + 1})), \
float(ref.at({start_x + 1, start_y + 2})), \
float(ref.at({start_x + 1, start_y + 3})), \
float(ref.at({start_x + 2, start_y + 0})), \
float(ref.at({start_x + 2, start_y + 1})), \
float(ref.at({start_x + 2, start_y + 2})), \
float(ref.at({start_x + 2, start_y + 3})), \
float(ref.at({start_x + 3, start_y + 0})), \
float(ref.at({start_x + 3, start_y + 1})), \
float(ref.at({start_x + 3, start_y + 2})), \
float(ref.at({start_x + 3, start_y + 3})));
#define PRINT_TENSOR4x4_T0_L0(name, ref) \
PRINT_TENSOR4x4_T0_L0_START(name, ref, 0, 0)
#define PRINT_PROBLEM_SIZE(name, ps) \
PRINT_B0_T0( \
"%s.problem_size: {.m=%d, .n=%d, .k=%d}", \
name, \
int(ps.m()), \
int(ps.n()), \
int(ps.k()))
template <typename LambdaIterator, typename LaneOffsetT, typename AccumT>
CUTLASS_DEVICE void print_warp_accum(
AccumT accum,
LaneOffsetT lane_offset,
int32_t num_rows,
int32_t num_cols) {
bool is_main = blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 &&
threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0;
for (int row = 0; row < num_rows; ++row) {
for (int col = 0; col < num_cols; ++col) {
if (col % 32 == 0) {
if (is_main) {
printf("\nmat[%3d, %3d:%3d]", row, col, col + 32);
}
__syncthreads();
}
LambdaIterator::iterateRows(
lane_offset,
[&](int accum_m) {},
[&](int accum_m, int accum_n, int idx) {
if (row == accum_m && col == accum_n &&
(blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0)) {
printf(" %6.1f", float(accum[idx]));
}
},
[&](int accum_m) {});
__syncthreads();
}
if (is_main) {
printf("\n");
}
}
}
| examples/41_fused_multi_head_attention/debug_utils.h/0 | {
"file_path": "examples/41_fused_multi_head_attention/debug_utils.h",
"repo_id": "examples",
"token_count": 7526
} | 8 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/functional.h"
#include "cutlass/gemm/warp/mma_simt_tile_iterator.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm70.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h"
#include "cutlass/matrix_shape.h"
/*
TensorCores have different accumulator layouts.
This file provides a class to easily map the accumulator
i-th element with the corresponding matrix row/col.
*/
template <typename T, typename accum_t, int kWarpSize>
struct AccumLambdaIteratorSm80 {
static_assert(
cutlass::platform::
is_same<typename T::Layout, cutlass::layout::RowMajor>::value,
"only RowMajor is supported");
using Policy = typename T::Policy;
using InstructionShape = typename T::InstructionShape;
using OpDelta = typename T::OpDelta;
using Shape = typename T::Shape;
static int const kElementsPerAccess = InstructionShape::kN / 4;
static int const kRowsPerTile = 8;
static int const kAccumulatorRows = InstructionShape::kM / kRowsPerTile;
static cutlass::MatrixCoord CUTLASS_DEVICE get_lane_offset(
int8_t lane_id,
int8_t warp_id,
typename T::TensorCoord const& tile_offset) {
int quad = (lane_id >> 2);
int lane_in_quad = (lane_id & 3);
return cutlass::MatrixCoord(
quad + tile_offset.row() * Shape::kRow,
lane_in_quad * kElementsPerAccess +
tile_offset.column() * Shape::kColumn);
}
template <typename FA, typename FB, typename FC>
CUTLASS_DEVICE static void iterateRows(
cutlass::MatrixCoord& lane_offset,
FA beginRow,
FB op,
FC endRow) {
// See cutlass/gemm/warp/mma_tensor_op_tile_iterator.h
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < kAccumulatorRows; ++row) {
int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow +
row * kRowsPerTile + lane_offset.row();
beginRow(accum_m);
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) {
int mma_accum_start = kAccumulatorRows * kElementsPerAccess *
(mma_n * Policy::MmaIterations::kRow + mma_m);
CUTLASS_PRAGMA_UNROLL
for (int col = 0; col < kElementsPerAccess; ++col) {
int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn +
col + lane_offset.column();
int idx = mma_accum_start + row * kElementsPerAccess + col;
op(accum_m, accum_n, idx);
}
}
endRow(accum_m);
}
}
}
template <typename DT, typename F>
CUTLASS_DEVICE static bool reduceSameRow(int lane_id, DT& myValue, F fn) {
// In each warp, 4 threads will work on the same row
// - the ones with the same `quad`
auto otherV = __shfl_xor_sync(0xffffffff, myValue, 1);
myValue = fn(myValue, otherV);
otherV = __shfl_xor_sync(0xffffffff, myValue, 2);
myValue = fn(myValue, otherV);
int lane_in_quad = (lane_id & 3);
return lane_in_quad == 0;
}
};
template <typename T, typename accum_t, int kWarpSize>
struct AccumLambdaIteratorSm70 {
static_assert(
cutlass::platform::
is_same<typename T::Layout, cutlass::layout::RowMajor>::value,
"only RowMajor is supported");
using Policy = typename T::Policy;
using InstructionShape = typename T::InstructionShape;
using OpDelta = typename T::OpDelta;
using Shape = typename T::Shape;
using Element = accum_t;
static int const kElementsPerPartial = 4;
using EleShapePerPatial = typename cutlass::platform::conditional<
cutlass::platform::is_same<Element, float>::value,
cutlass::MatrixShape<2, 2>,
cutlass::MatrixShape<1, 4>>::type;
static int const kElementsPerMma = 8;
static int const kAccumulatorPatials = 2;
using QuadShapePerPatialMma = cutlass::MatrixShape<4, 4>;
static cutlass::MatrixCoord CUTLASS_DEVICE get_lane_offset(
int8_t lane_id,
int8_t warp_id,
typename T::TensorCoord const& tile_offset) {
int quad = (lane_id >> 2);
int lane_in_quad = (lane_id & 3);
int accum_m, accum_n;
if (cutlass::platform::is_same<Element, float>::value) {
// (quad[2],quad[0])+lane_in_quad[0]
accum_m = (((quad & 0x4) >> 1) + (quad & 0x1)) * 8 + (lane_in_quad & 1);
// (quad[1])+lane_in_quad[1]
accum_n =
((quad >> 1) & 0x1) * kElementsPerPartial * kAccumulatorPatials +
(lane_in_quad & 2);
} else {
accum_m = (((quad & 0x4) >> 1) + (quad & 0x1)) * 8 +
lane_in_quad; // (quad[2],quad[0])
accum_n = ((quad >> 1) & 0x1) * kElementsPerPartial * kAccumulatorPatials;
}
return cutlass::MatrixCoord(
accum_m + tile_offset.row() * Shape::kRow,
accum_n + tile_offset.column() * Shape::kColumn);
}
template <typename DT, typename F>
CUTLASS_DEVICE static bool reduceSameRow(int lane_id, DT& myValue, F fn) {
static_assert(
cutlass::platform::is_same<Element, float>::value,
"update to support non-float accum");
// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#warp-level-matrix-fragment-mma-884-f16
// T0 & T2 share same line within a quad
auto otherV = __shfl_xor_sync(0xffffffff, myValue, 1 << 1);
myValue = fn(myValue, otherV);
// quad 0 and quad 2 are on the same lines
otherV = __shfl_xor_sync(0xffffffff, myValue, 1 << 3);
myValue = fn(myValue, otherV);
return (lane_id & ((1 << 1) | (1 << 3))) == 0;
}
template <typename FA, typename FB, typename FC>
CUTLASS_DEVICE static void iterateRows(
cutlass::MatrixCoord& lane_offset,
FA beginRow,
FB op,
FC endRow) {
CUTLASS_PRAGMA_UNROLL
for (int tile_m = 0; tile_m < Policy::TileIterations::kRow; ++tile_m) {
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < EleShapePerPatial::kRow; ++m) {
int accum_m = tile_m * Policy::InterleavedTile::kRow +
mma_m * QuadShapePerPatialMma::kRow + m * 2 + lane_offset.row();
beginRow(accum_m);
CUTLASS_PRAGMA_UNROLL
for (int tile_n = 0; tile_n < Policy::TileIterations::kColumn;
++tile_n) {
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn;
++mma_n) {
CUTLASS_PRAGMA_UNROLL
for (int p = 0; p < kAccumulatorPatials; ++p) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < EleShapePerPatial::kColumn; ++n) {
int mma_accum_start =
(((tile_n * Policy::TileIterations::kRow + tile_m) *
Policy::MmaIterations::kColumn +
mma_n) *
Policy::MmaIterations::kRow +
mma_m) *
kElementsPerMma;
int accum_n = tile_n * Policy::InterleavedTile::kColumn +
mma_n * QuadShapePerPatialMma::kColumn +
p * Policy::InterleavedTile::kColumn / 2 + n +
lane_offset.column();
int idx = mma_accum_start + p * kElementsPerPartial +
m * EleShapePerPatial::kColumn + n;
op(accum_m, accum_n, idx);
}
}
}
}
endRow(accum_m);
}
}
}
}
};
template <typename T, typename accum_t, int kWarpSize>
struct AccumLambdaIteratorSimt {
using Policy = typename T::Policy;
using Iterations = typename T::Iterations;
using Element = typename T::Element;
using Delta = typename T::Delta;
using Shape = typename T::Shape;
static_assert(
cutlass::platform::
is_same<typename T::Layout, cutlass::layout::RowMajor>::value,
"only RowMajor is supported");
template <typename DT, typename F>
CUTLASS_DEVICE static bool reduceSameRow(int lane_id, DT& myValue, F fn) {
CUTLASS_PRAGMA_UNROLL
for (int bit = 1; bit < Policy::WarpShape::kColumn; bit *= 2) {
auto otherV = __shfl_xor_sync(0xffffffff, myValue, bit);
myValue = fn(myValue, otherV);
}
return (lane_id & (Policy::WarpShape::kColumn - 1)) == 0;
}
template <typename FA, typename FB, typename FC>
CUTLASS_DEVICE static void iterateRows(
cutlass::MatrixCoord& lane_offset,
FA beginRow,
FB op,
FC endRow) {
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Iterations::kRow; ++mma_m) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < Policy::LaneMmaShape::kM; ++m) {
int accum_m = mma_m * Delta::kRow + m + lane_offset.row();
beginRow(accum_m);
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Iterations::kColumn; ++mma_n) {
int accum_n =
mma_n * Policy::WarpShape::kColumn * Policy::LaneMmaShape::kN +
lane_offset.column();
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::LaneMmaShape::kN; ++n) {
int idx = n +
Policy::LaneMmaShape::kN *
(mma_n +
Iterations::kColumn *
(m + mma_m * Policy::LaneMmaShape::kM));
op(accum_m, accum_n + n, idx);
}
}
endRow(accum_m);
}
}
}
static cutlass::MatrixCoord CUTLASS_DEVICE get_lane_offset(
int8_t lane_id,
int8_t warp_id,
typename T::TensorCoord const& tile_offset) {
static_assert(
cutlass::platform::is_same<
typename Policy::LaneLayout,
cutlass::layout::RowMajorInterleaved<1>>::value,
"");
typename Policy::LaneLayout lane_layout = Policy::get_lane_layout();
cutlass::MatrixCoord lane_offset = lane_layout.inverse(lane_id) *
cutlass::MatrixCoord(Policy::LaneMmaShape::kM,
Policy::LaneMmaShape::kN);
return lane_offset +
tile_offset * cutlass::MatrixCoord(Shape::kRow, Shape::kColumn);
}
};
template <typename T, typename accum_t, int kWarpSize>
struct DefaultMmaAccumLambdaIterator;
// Simt
template <typename S, typename P, typename accum_t, int kWarpSize>
struct DefaultMmaAccumLambdaIterator<
cutlass::gemm::warp::MmaSimtTileIterator<
S,
cutlass::gemm::Operand::kC,
accum_t,
cutlass::layout::RowMajor,
P,
1,
1>,
accum_t,
kWarpSize> {
using WarpIterator = typename cutlass::gemm::warp::MmaSimtTileIterator<
S,
cutlass::gemm::Operand::kC,
accum_t,
cutlass::layout::RowMajor,
P,
1,
1>;
using Iterator = AccumLambdaIteratorSimt<WarpIterator, accum_t, kWarpSize>;
};
// TensorOp - Volta
template <typename S1, typename S2, typename accum_t, int kWarpSize>
struct DefaultMmaAccumLambdaIterator<
cutlass::gemm::warp::MmaVoltaTensorOpAccumulatorTileIterator<
S1,
accum_t,
cutlass::layout::RowMajor,
S2,
cutlass::MatrixShape<1, 1>>,
accum_t,
kWarpSize> {
using WarpIterator =
typename cutlass::gemm::warp::MmaVoltaTensorOpAccumulatorTileIterator<
S1,
accum_t,
cutlass::layout::RowMajor,
S2,
cutlass::MatrixShape<1, 1>>;
using Iterator = AccumLambdaIteratorSm70<WarpIterator, accum_t, kWarpSize>;
};
// TensorOp - Sm75+
template <
typename S1,
typename S2,
typename S3,
typename accum_t,
int kWarpSize>
struct DefaultMmaAccumLambdaIterator<
cutlass::gemm::warp::MmaTensorOpAccumulatorTileIterator<
S1,
accum_t,
cutlass::layout::RowMajor,
S2,
S3>,
accum_t,
kWarpSize> {
using WarpIterator =
typename cutlass::gemm::warp::MmaTensorOpAccumulatorTileIterator<
S1,
accum_t,
cutlass::layout::RowMajor,
S2,
S3>;
using Iterator = AccumLambdaIteratorSm80<WarpIterator, accum_t, kWarpSize>;
};
| examples/41_fused_multi_head_attention/gemm/mma_accum_lambda_iterator.h/0 | {
"file_path": "examples/41_fused_multi_head_attention/gemm/mma_accum_lambda_iterator.h",
"repo_id": "examples",
"token_count": 6214
} | 9 |
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import helper
import gen_ir as ir
class gen_test:
def __init__(self, fuse_gemm_info, gen_class_name, user_header_file, output_dir = "../"):
self.fuse_gemm_info = fuse_gemm_info
self.gen_class_name = gen_class_name
self.user_header_file = user_header_file
self.sample_dir = output_dir
self.b2b_num = len(fuse_gemm_info)
def gen_cpp_sample(self):
code = "/* Auto Generated code - Do not edit.*/\n"
code += "#include <stdio.h> \n"
code += "#include \"cutlass/gemm/device/gemm_batched.h\" \n"
code += "#include \"cutlass/cutlass.h\" \n"
code += "#include \"../cutlass_irrelevant.h\" \n"
code += "#include \"../cutlass_verify.h\" \n"
code += "#include \"leaky_bias.h\" \n"
code += "#include \"utils.h\" \n"
code += "int main(int args, char * argv[]) {\n"
code += " " + "int M = atoi(argv[1]);\n"
code += " " + "int K0 = " + str(self.fuse_gemm_info[0]['mnk'][0]) + ";\n"
code += " " + "if(args == 3);\n"
code += " " + " " + "K0 = atoi(argv[2]);\n"
code += " " + "int B = 1;\n"
code += " " + "if(args == 4);\n"
code += " " + " " + "B = atoi(argv[3]);\n"
code += " " + "srand(1234UL);\n"
code += " " + "int device_id = 0;\n"
code += " " + "cudaGetDevice(&device_id);\n"
code += " " + "cudaDeviceProp prop;\n"
code += " " + "cudaGetDeviceProperties(&prop, device_id);\n"
code += " " + "int sm = prop.major *10 + prop.minor;\n"
code += "using ElementCompute = cutlass::half_t;\n"
for i in range(self.b2b_num):
code += " " + helper.var_idx("ElementCompute alpha", i) + " = ElementCompute(1);\n"
addbias = helper.get_epilogue_add_bias_or_not( self.fuse_gemm_info[i])
if addbias:
code += " " + helper.var_idx("ElementCompute beta", i) + " = ElementCompute(1);\n"
else:
code += " " + helper.var_idx("ElementCompute beta", i) + " = ElementCompute(0);\n"
code += " " + "size_t flops = 0;\n"
for i in range(self.b2b_num):
m = self.fuse_gemm_info[i]['mnk'][0]
n = self.fuse_gemm_info[i]['mnk'][1]
k = self.fuse_gemm_info[i]['mnk'][2]
bias_shape = helper.get_epilogue_bias_shape(self.fuse_gemm_info[i])
this_k = "K0"
if (i > 0):
this_k = str(k)
code += " " + "flops += size_t(2) * size_t(M) * size_t(B) * " + "size_t(" + str(n) + ") * size_t(" + this_k + ");\n"
code += " " + helper.var_idx("cutlass::gemm::GemmCoord problem_size_", i) + "(" + "M" + ", " + str(n) + ", " + this_k + ");\n"
code += " " + helper.var_idx("memory_unit<cutlass::half_t> Mat_A", i) + helper.var_idx("(B * problem_size_", i) + helper.var_idx(".m() * problem_size_", i) + ".k());\n"
code += " " + helper.var_idx("memory_unit<cutlass::half_t> Mat_B", i) + helper.var_idx("(B * problem_size_", i) + helper.var_idx(".n() * problem_size_", i) + ".k());\n"
code += " " + helper.var_idx("memory_unit<cutlass::half_t> Mat_C", i) + "(B * " + str(bias_shape[0]) + " * " + str(bias_shape[1]) + ");\n"
code += " " + helper.var_idx("memory_unit<cutlass::half_t> Mat_D_cutlass_ref", i) + helper.var_idx("(B * problem_size_", i) + helper.var_idx(".m() * problem_size_", i) + ".n());\n"
code += " " + helper.var_idx("Mat_A", i) + ".init();\n"
code += " " + helper.var_idx("Mat_B", i) + ".init();\n"
code += " " + helper.var_idx("Mat_C", i) + ".init();\n"
code += " " + helper.var_idx("memory_unit<cutlass::half_t> Mat_D", self.b2b_num - 1) + helper.var_idx("(B * problem_size_", i) + helper.var_idx(".m() * problem_size_",self.b2b_num - 1) + ".n());\n"
params = []
params.append("M")
params.append("B")
params.append("Mat_A0.device_ptr")
for i in range(self.b2b_num):
params.append(helper.var_idx("Mat_B", i) + ".device_ptr")
params.append(helper.var_idx("Mat_C", i) + ".device_ptr")
if i != self.b2b_num-1:
params.append(helper.var_idx("Mat_D_cutlass_ref", i) + ".device_ptr")
params.append(helper.var_idx("Mat_D", self.b2b_num - 1) + ".device_ptr")
code += " " + "Param arguments = {\n"
code += " " + " " + "M,\n"
code += " " + " " + "K0,\n"
code += " " + " " + "B,\n"
code += " " + " " + "reinterpret_cast<const void*>(Mat_A0.device_ptr),\n"
cnt = 1
for i in range(self.b2b_num):
bias_flag = helper.get_epilogue_add_bias_or_not( self.fuse_gemm_info[i])
code += " " + " " + "reinterpret_cast<const void*>(" + helper.var_idx("Mat_B", i) + ".device_ptr" + "),\n"
cnt += 1
if bias_flag:
code += " " + " " + "reinterpret_cast<const void*>(" + helper.var_idx("Mat_C", i) + ".device_ptr" + "),\n"
cnt += 1
else:
code += " " + " " + "reinterpret_cast<const void*>(NULL),\n"
epilogue_args = helper.get_epilogue_args(self.fuse_gemm_info[i])
acc_tp = helper.get_epilogue_compute_tp(self.fuse_gemm_info[i])
for arg in epilogue_args:
arg_value = str(arg[2])
code += " " + " " + helper.type_2_cutlass_type(acc_tp) + "(" + arg_value + "),\n"
if i != self.b2b_num - 1:
code += " " + " " + "reinterpret_cast<void*>(" + helper.var_idx("Mat_D_cutlass_ref", i) + ".device_ptr" + "),\n"
else:
code += " " + " " + "reinterpret_cast<void*>(" + helper.var_idx("Mat_D", i) + ".device_ptr" + ")};\n"
code += " " + "TI(FUSED_CUTLASS);\n"
code += " " + "for(int i = 0; i < 100; i++){\n"
code += " " + " " + "one_api(arguments, sm, NULL);\n"
code += " " + "}\n"
code += " " + "TO(FUSED_CUTLASS, \"FUSED_CUTLASS\", 100);\n"
code += "\n"
for i in range(self.b2b_num):
code_this = ""
N_str = str(self.fuse_gemm_info[i]['mnk'][1])
code_this += " " + helper.var_idx("typename Gemm", i) + helper.var_idx("::Arguments arguments_", i) + "{\n"
code_this += " " + " " + helper.var_idx("problem_size_", i) + ",\n"
ldmA = str(self.fuse_gemm_info[i]['mnk'][2])
if i == 0:
ldmA = "K0"
ldmB = str(self.fuse_gemm_info[i]['mnk'][2])
if i == 0:
ldmB = "K0"
ldmC = str(self.fuse_gemm_info[i]['mnk'][1])
ldmBias = str(helper.get_epilogue_bias_ldm(self.fuse_gemm_info[i]))
if self.fuse_gemm_info[i]['A_format'] is 'Col':
ldmA = "M"
if self.fuse_gemm_info[i]['B_format'] is 'Row':
ldmB = str(self.fuse_gemm_info[i]['mnk'][1])
if self.fuse_gemm_info[i]['C_format'] is 'Col':
ldmC = "M"
if i == 0:
code_this += " " + " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['A_tp']) + "*>(" + helper.var_idx("Mat_A", i) + ".device_ptr), " + ldmA + "}, " + "M * " + ldmA + ",\n"
else:
code_this += " " + " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['A_tp']) + "*>(" + helper.var_idx("Mat_D_cutlass_ref", i - 1) + ".device_ptr), " + ldmA + "}, " + "M * " + ldmA + ",\n"
code_this += " " + " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['B_tp']) + "*>(" + helper.var_idx("Mat_B", i) + ".device_ptr), " + ldmB + "}, " + N_str + " * " + ldmB + ",\n"
M_bias = str(helper.get_epilogue_bias_shape(self.fuse_gemm_info[i])[0])
code_this += " " + " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['C_tp']) + "*>(" + helper.var_idx("Mat_C", i) + ".device_ptr), " + ldmBias + "}, " + M_bias + " * " + N_str + ",\n"
code_this += " " + " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['C_tp']) + "*>(" + helper.var_idx("Mat_D_cutlass_ref", i) + ".device_ptr), " + ldmC + "}, " + "M * " + ldmC + ",\n"
code_this += " " + " " + "{ " + helper.var_idx("alpha", i) + ", " + helper.var_idx("beta", i)
for epilogue_arg in helper.get_epilogue_args(self.fuse_gemm_info[i]):
arg_value = str(epilogue_arg[2])
code_this += ", " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + "(" + str(arg_value) + ")"
code_this += " " + " },\n"
code_this += " " + " " + "B};\n"
code += code_this
code += " " + "TI(UNFUSED_CUTLASS);\n"
code += " " + "for(int i = 0; i < 100; i++){\n"
code += " " + " " + self.gen_class_name + "_verify(\n"
for i in range(self.b2b_num):
code += " " + " " + " " + helper.var_idx("arguments_", i) + ",\n"
code += " " + " " + " " + "NULL);\n"
code += " " + "}\n"
code += " " + "TO(UNFUSED_CUTLASS, \"UNFUSED_CUTLASS\", 100);\n"
code += " " + helper.var_idx("Mat_D_cutlass_ref", self.b2b_num - 1) + ".d2h();\n"
code += " " + helper.var_idx("Mat_D", self.b2b_num - 1) + ".d2h();\n"
code += " " + helper.var_idx("check_result(Mat_D_cutlass_ref", self.b2b_num - 1) + helper.var_idx(".host_ptr, Mat_D", self.b2b_num - 1) \
+ helper.var_idx(".host_ptr, Mat_D", self.b2b_num - 1) + ".elements);\n"
code += "\n\n}\n"
with open(self.sample_dir + "sample.cu", "w+") as f:
f.write(code)
| examples/44_multi_gemm_ir_and_codegen/ir_gen/gen_sample.py/0 | {
"file_path": "examples/44_multi_gemm_ir_and_codegen/ir_gen/gen_sample.py",
"repo_id": "examples",
"token_count": 5946
} | 10 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Functor performing linear combination operations used by epilogues.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/epilogue/thread/scale_type.h"
#include "cutlass/epilogue/thread/linear_combination_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Applies a linear combination operator to an array of elements.
///
/// D = alpha * accumulator + beta * source + uniform
///
template <
typename ElementOutput_, ///< Data type used to load and store tensors
int Count, ///< Number of elements computed per operation.
///< Usually it is 128/sizeof_bits<ElementOutput_>,
///< but we use 64 or 32 sometimes when there are not enough data to store
typename ElementAccumulator_ = ElementOutput_, ///< Accumulator data type
typename ElementCompute_ = ElementOutput_, ///< Data type used to compute linear combination
FloatRoundStyle Round = FloatRoundStyle::round_to_nearest
>
class LeftSiLUAndMul {
public:
using ElementOutput = ElementOutput_;
using ElementAccumulator = ElementAccumulator_;
using ElementCompute = ElementCompute_;
static int const kCount = Count;
using FragmentOutput = Array<ElementOutput, kCount>;
using FragmentAccumulator = Array<ElementAccumulator, kCount>;
using ComputeFragment = Array<ElementCompute, kCount>;
static FloatRoundStyle const kRound = Round;
struct Params{};
private:
//
// Data members
//
ElementCompute alpha_;
ElementCompute beta_;
public:
/// Constructs the function object, possibly loading from pointers in host memory
CUTLASS_HOST_DEVICE
LeftSiLUAndMul(Params const &/*params*/) {}
/// Returns true if source is needed
CUTLASS_HOST_DEVICE
bool is_source_needed() const {
return true;
}
/// Functionally required for serial reduction in the epilogue
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {
assert(false);
}
/// Computes linear scaling: D = alpha * accumulator + beta * source
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const &lhs,
FragmentAccumulator const &rhs) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_to_compute;
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> compute_to_output;
ComputeFragment converted_lhs = accumulator_to_compute(lhs);
ComputeFragment converted_rhs = accumulator_to_compute(rhs);
cutlass::epilogue::thread::SiLu<ComputeFragment> silu;
cutlass::multiplies<ComputeFragment> mul;
auto silu_lhs = silu(converted_lhs);
return compute_to_output(mul(silu_lhs, converted_rhs));
}
CUTLASS_HOST_DEVICE
ElementOutput operator()(
ElementAccumulator const& lhs,
ElementAccumulator const& rhs
) const {
ElementCompute convert_lhs(lhs);
ElementCompute convert_rhs(rhs);
cutlass::epilogue::thread::SiLu<ElementCompute> silu;
cutlass::multiplies<ElementCompute> mul;
auto silu_lhs = silu(convert_lhs);
return ElementOutput(mul(silu_lhs, convert_rhs));
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/45_dual_gemm/thread/left_silu_and_mul.h/0 | {
"file_path": "examples/45_dual_gemm/thread/left_silu_and_mul.h",
"repo_id": "examples",
"token_count": 1804
} | 11 |
This example shows how to do mixed types GEMMs in CUTLASS.
## High level overview
This example shows how to perform GEMMs on Hopper when A and B have different types. This implementation always passes the type with fewer bits through the register file and upcasts to the type with the higher bit count.
When relying on `KernelScheduleAuto`, the main loop supporting different A and B types will be selected whenever the bit count of A is not equal to the bit count of B. Users can manually select the mixed type main loop and explicitly choose the scheduling policy by specifying one of the following schedules to the `CollectiveBuilder`: `KernelTmaWarpSpecializedMixedInput`, `KernelTmaWarpSpecializedPingpongMixedInput` or `KernelTmaWarpSpecializedCooperativeMixedInput`.
This first version only supports mixed type GEMMs using TMA.
## Performance
While the example offers a harness for straightforward benchmarking, this initial implementation isn't optimized for performance in the majority of scenarios. We expect this implementation to be performant for `{fp16, bf16} x {int8, int4}` and `{fp8} x {int4}` for problems that are compute bound. Additionally, we expect good performance for `fp16, bf16` or `fp32` scales and zero-points. For best performance, it is ideal to have the scales and zero-points be the same type.
We are currently optimizing the following cases:
1. Memory bound cases for all types
## Limitations
* The type that needs to be converted must go through the register file. This means that the collective will swap and transpose whenever the type with fewer bits is the B operand. The user must be aware of when these swaps happen. Note that TMA epilogues currently do not support *implicit* swap + transpose, so non-tma epilogues must be used in this case. We plan to relax this limitation in a future release.
* The layout of the narrow type must be K-major. This means the following:
* Narrow type is the A operand: Must be Row-Major
* Narrow type is the B operand: Must be Column-Major
* For 8-bit x 4-bit or 2-bit, both inputs must be K-major.
* TMA requires an alignment of 128 bits. As a result, for a type with `B` bits, `B x TILE_K` must be a multiple of 128 bits.
* The type of the scale and zero-point type must be two bytes or more.
* The group size must be equal to gemm-k size (indicating a broadcast), or it must be a multiple of the threadblock-k size.
## Upcoming features
* Optimizations for memory bound cases.
* Optimizations for scale and zero-point loading when the group size is not equal to the threadblock-k size.
| examples/55_hopper_mixed_dtype_gemm/README.md/0 | {
"file_path": "examples/55_hopper_mixed_dtype_gemm/README.md",
"repo_id": "examples",
"token_count": 665
} | 12 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cuda_runtime.h"
#include <iostream>
/**
* Panic wrapper for unwinding CUTLASS errors
*/
#define CUTLASS_CHECK(status) \
{ \
cutlass::Status error = status; \
if (error != cutlass::Status::kSuccess) { \
std::cerr << "Got cutlass error: " << cutlassGetStatusString(error) << " at: " << __LINE__ \
<< std::endl; \
exit(EXIT_FAILURE); \
} \
}
/**
* Panic wrapper for unwinding CUDA runtime errors
*/
#define CUDA_CHECK(status) \
{ \
cudaError_t error = status; \
if (error != cudaSuccess) { \
std::cerr << "Got bad cuda status: " << cudaGetErrorString(error) \
<< " at line: " << __LINE__ << std::endl; \
exit(EXIT_FAILURE); \
} \
}
/**
* GPU timer for recording the elapsed time across kernel(s) launched in GPU stream
*/
struct GpuTimer
{
cudaStream_t _stream_id;
cudaEvent_t _start;
cudaEvent_t _stop;
/// Constructor
GpuTimer() : _stream_id(0)
{
CUDA_CHECK(cudaEventCreate(&_start));
CUDA_CHECK(cudaEventCreate(&_stop));
}
/// Destructor
~GpuTimer()
{
CUDA_CHECK(cudaEventDestroy(_start));
CUDA_CHECK(cudaEventDestroy(_stop));
}
/// Start the timer for a given stream (defaults to the default stream)
void start(cudaStream_t stream_id = 0)
{
_stream_id = stream_id;
CUDA_CHECK(cudaEventRecord(_start, _stream_id));
}
/// Stop the timer
void stop()
{
CUDA_CHECK(cudaEventRecord(_stop, _stream_id));
}
/// Return the elapsed time (in milliseconds)
float elapsed_millis()
{
float elapsed = 0.0;
CUDA_CHECK(cudaEventSynchronize(_stop));
CUDA_CHECK(cudaEventElapsedTime(&elapsed, _start, _stop));
return elapsed;
}
};
| examples/common/helper.h/0 | {
"file_path": "examples/common/helper.h",
"repo_id": "examples",
"token_count": 2059
} | 13 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/atom/copy_atom.hpp>
#include <cute/algorithm/copy.hpp>
#include <cute/tensor.hpp>
#include <cute/tensor_predicate.hpp>
namespace cute
{
// cooperative_copy<NumThreads, MaxVecBits>(thr_idx, src, dst)
// Use NumThreads to copy src to dst with element vectorization up to MaxVecBits.
// @pre 0 <= @a tid < NumThreads
// @pre Tensors @a src and @a dst are aligned up to MaxVecBits.
//
template <uint32_t NumThreads, uint32_t MaxVecBits,
class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE
void
cooperative_copy(uint32_t const& tid,
Tensor<SrcEngine, SrcLayout> const& src,
Tensor<DstEngine, DstLayout> & dst)
{
// Assumes the shapes are static, can generalize
CUTE_STATIC_ASSERT_V(size(src) == size(dst));
// Assumes the types are the same, can generalize
static_assert(sizeof_bits_v<typename SrcEngine::value_type> == sizeof_bits_v<typename DstEngine::value_type>);
static_assert(MaxVecBits == sizeof_bits_v<typename SrcEngine::value_type> ||
MaxVecBits == 8 || MaxVecBits == 16 || MaxVecBits == 32 || MaxVecBits == 64 || MaxVecBits == 128,
"Expected MaxVecBits to be value size or 8 or 16 or 32 or 64 or 128 for alignment and performance.");
// Check that the tensors are likely shared across threads: either gmem or smem
static_assert((is_gmem<SrcEngine>::value || is_smem<SrcEngine>::value),
"cooperative_copy expects shared gmem or smem source tensor.");
static_assert((is_gmem<DstEngine>::value || is_smem<DstEngine>::value),
"cooperative_copy expects shared gmem or smem destination tensor.");
// Precondition on tid in DEBUG
assert(tid < NumThreads);
// Fallback - slow path, naive copy, vectorization disabled
if constexpr(size(SrcLayout{}) % NumThreads != 0) {
int index = static_cast<int>(tid);
CUTE_UNROLL
for(int i = 0; i < ceil_div(size(SrcLayout{}), NumThreads); i++) {
if(index < size(SrcLayout{})) {
dst[index] = src[index];
}
index += NumThreads;
}
} else {
// Fast path with vectorization
// Precondition on pointer alignment in DEBUG
assert(is_byte_aligned<max(MaxVecBits/8, 1u)>(raw_pointer_cast(src.data())));
assert(is_byte_aligned<max(MaxVecBits/8, 1u)>(raw_pointer_cast(dst.data())));
constexpr int elem_bits = sizeof_bits_v<typename SrcEngine::value_type>;
//
// Determine val+thr vectorization based on src/dst size and number of threads
// NOTE: This heuristic promotes parallelization over vectorization
//
// The number of elements that can be vectorized in values
constexpr int common_elem = decltype(max_common_vector(src, dst))::value;
constexpr int common_bits = common_elem * elem_bits;
constexpr int total_elem = decltype(size(src))::value;
constexpr int total_bits = total_elem * elem_bits;
static_assert(total_bits % NumThreads == 0);
constexpr int total_bits_per_thr = total_bits / NumThreads;
// If there are too many threads to allow a full elem copy, trunc the thrs and use elem_bits
constexpr int max_vec_bits_by_thr = cute::max(elem_bits, total_bits_per_thr);
// Cap the vectorization to the common bits, the max_vec_bits_by_thr, and the MaxVecBits
constexpr int vec_bits = cute::min(common_bits, max_vec_bits_by_thr, static_cast<int>(MaxVecBits));
// Convert back to number of elements, safe_div
static_assert((vec_bits % elem_bits) == 0);
constexpr int vec_elem = vec_bits / elem_bits;
// Use only part of threads if there's not enough work for all threads
constexpr int vec_thrs = (total_elem % (vec_elem * NumThreads) == 0)
? NumThreads
: (total_elem / vec_elem);
static_assert(vec_thrs <= NumThreads);
// The common layout of the two tensors that can be vectorized over threads
// vidx -> coord
auto common_layout = max_common_layout(get_nonswizzle_portion(src.layout()),
get_nonswizzle_portion(dst.layout()));
// Scale up the common_layout to cover the entire tensors
// vidx -> coord
auto full_perm = tile_to_shape(make_layout(common_layout), size(src));
// Create the Tiler
// ((vid,tid),iter)
auto layout_vt = logical_divide(full_perm, Layout<Shape<Int<vec_elem>, Int<vec_thrs>>>{});
// Apply and slice
Tensor src_v = src.compose(layout_vt)(make_coord(_,tid),_);
Tensor dst_v = dst.compose(layout_vt)(make_coord(_,tid),_);
// Should account for vec_bits < 8 and/or vec_elem <= 1
// And also account for subbyte types, which could cause race conditions
// Want to ENFORCE sufficient vectorization in those cases
static_assert((vec_bits >= 8), "No support for subbyte copying");
using VecType = uint_bit_t<vec_bits>;
#if 0
if (thread0()) {
print(" "); print("cooperative_copy -- vec\n");
print(" "); print("NumThreads: "); print(NumThreads); print("\n");
print(" "); print("MaxVecBits: "); print(MaxVecBits); print("\n");
print(" "); print("src: "); print(src); print("\n");
print(" "); print("dst: "); print(dst); print("\n");
print(" "); print("common_layout: "); print(common_layout); print("\n");
print(" "); print("full_perm: "); print(full_perm); print("\n");
print(" "); print("Used vector: "); print(vec_elem); print("\n");
print(" "); print("Used threads: "); print(vec_thrs); print("\n");
print(" "); print("layout_vt: "); print(layout_vt); print("\n");
print(" "); print("src.compose(layout_vt): "); print(src.compose(layout_vt)); print("\n");
print(" "); print("dst.compose(layout_vt): "); print(dst.compose(layout_vt)); print("\n");
print(" "); print("src_v: "); print(src_v); print("\n");
print(" "); print("dst_v: "); print(dst_v); print("\n");
print(" "); print("recast<VecType const>(src_v): "); print(recast<VecType const>(src_v)); print("\n");
print(" "); print("recast<VecType const>(dst_v): "); print(recast<VecType const>(dst_v)); print("\n");
}
#ifdef __CUDA_ARCH__
__syncthreads();
#endif
#endif
// If we're using all threads (static) or the tid is in in-range (dynamic)
if (vec_thrs >= NumThreads or tid < vec_thrs) {
return copy_if(TrivialPredTensor{}, recast<VecType const>(src_v), recast<VecType>(dst_v));
}
}
}
template <uint32_t NumThreads,
class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE
void
cooperative_copy(uint32_t const& tid,
Tensor<SrcEngine, SrcLayout> const& src,
Tensor<DstEngine, DstLayout> & dst)
{
constexpr uint32_t MaxVecBits = sizeof_bits_v<typename SrcEngine::value_type>;
return cooperative_copy<NumThreads, MaxVecBits>(tid, src, dst);
}
// Accept mutable temporaries
template <uint32_t NumThreads,
class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE
void
cooperative_copy(uint32_t const& tid,
Tensor<SrcEngine, SrcLayout> const& src,
Tensor<DstEngine, DstLayout> && dst)
{
return cooperative_copy<NumThreads>(tid, src, dst);
}
// Accept mutable temporaries
template <uint32_t NumThreads,
uint32_t MaxVecBits,
class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE
void
cooperative_copy(uint32_t const& tid,
Tensor<SrcEngine, SrcLayout> const& src,
Tensor<DstEngine, DstLayout> && dst)
{
return cooperative_copy<NumThreads, MaxVecBits>(tid, src, dst);
}
} // end namespace cute
| include/cute/algorithm/cooperative_copy.hpp/0 | {
"file_path": "include/cute/algorithm/cooperative_copy.hpp",
"repo_id": "include",
"token_count": 3756
} | 14 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#if !defined(__CUDACC_RTC__)
#include <cuda.h>
#include <cinttypes>
#endif
#include <cute/config.hpp>
#include <cute/arch/copy.hpp>
#include <cute/arch/copy_sm90.hpp>
#include <cute/container/alignment.hpp>
#include <cute/container/bit_field.hpp>
#include <cute/container/array.hpp>
#include <cute/numeric/numeric_types.hpp>
namespace cute
{
//////////////////////////////////////////////////////////////////////////////////////////////////////
/// Barriers are 64-bit of user-managed information used in broadly two types syncronization patterns
/// 1) arrive/wait on threads (usage: cp.async and warp-specialized kernels)
/// 2) transaction-based (usage: TMA transaction where a CTA issues one transaction)
//////////////////////////////////////////////////////////////////////////////////////////////////////
// Initialize barrier present in shared memory
CUTE_HOST_DEVICE
void
initialize_barrier(uint64_t& smem_barrier, // 64 bits user-manged barrier in smem
int thread_count = 1) // Thread count expected to arrive/wait on this barrier
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_barrier);
asm volatile ("mbarrier.init.shared::cta.b64 [%0], %1;\n"
:: "r"(smem_int_ptr),
"r"(thread_count));
#endif
}
// Set the number of bytes transfered per transaction and perform an arrive operation as well
CUTE_HOST_DEVICE
void
set_barrier_transaction_bytes(uint64_t& smem_barrier, // 64 bits user-manged barrier in smem
uint32_t bytes) // Number of bytes transfered by per TMA transaction
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_barrier);
asm volatile ("mbarrier.arrive.expect_tx.shared::cta.b64 _, [%0], %1;\n"
:: "r"(smem_int_ptr),
"r"(bytes));
#endif
}
// Barrier wait
CUTE_HOST_DEVICE
void
wait_barrier(uint64_t& smem_barrier, // 64 bits user-manged barrier in smem
int phase_bit) // Current phase bit the barrier waiting to flip
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_barrier);
asm volatile(
"{\n"
".reg .pred P1;\n"
"LAB_WAIT:\n"
"mbarrier.try_wait.parity.shared::cta.b64 P1, [%0], %1;\n"
"@P1 bra.uni DONE;\n"
"bra.uni LAB_WAIT;\n"
"DONE:\n"
"}\n"
:: "r"(smem_int_ptr),
"r"(phase_bit));
#endif
}
// Barrier arrive
CUTE_HOST_DEVICE
void
arrive_barrier(uint64_t& smem_barrier) // 64 bits user-manged barrier in smem
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_barrier);
asm volatile(
"{\n"
".reg .b64 state; \n"
"mbarrier.arrive.shared::cta.b64 state, [%0];\n"
"}\n"
:: "r"(smem_int_ptr));
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////
// TMA Descriptor and utilities
////////////////////////////////////////////////////////////////////////////////////////////////////
namespace TMA {
enum class SmemSwizzleBits : uint8_t {
DISABLE = 0,
B32 = 1,
B64 = 2,
B128 = 3,
};
#if (__CUDACC_VER_MAJOR__ >= 12)
#if !defined(__CUDACC_RTC__)
/// @return The TMA descriptor datatype enum corresponding to T.
template <class T>
inline CUtensorMapDataType
to_CUtensorMapDataType() {
if constexpr (is_same_v<T, int8_t>) { return CU_TENSOR_MAP_DATA_TYPE_UINT8; } else
if constexpr (is_same_v<T, uint8_t>) { return CU_TENSOR_MAP_DATA_TYPE_UINT8; } else
if constexpr (is_same_v<T, float_e4m3_t>) { return CU_TENSOR_MAP_DATA_TYPE_UINT8; } else
if constexpr (is_same_v<T, float_e5m2_t>) { return CU_TENSOR_MAP_DATA_TYPE_UINT8; } else
if constexpr (is_same_v<T, uint16_t>) { return CU_TENSOR_MAP_DATA_TYPE_UINT16; } else
if constexpr (is_same_v<T, uint32_t>) { return CU_TENSOR_MAP_DATA_TYPE_UINT32; } else
if constexpr (is_same_v<T, uint64_t>) { return CU_TENSOR_MAP_DATA_TYPE_UINT64; } else
if constexpr (is_same_v<T, int32_t>) { return CU_TENSOR_MAP_DATA_TYPE_INT32; } else
if constexpr (is_same_v<T, int64_t>) { return CU_TENSOR_MAP_DATA_TYPE_INT64; } else
if constexpr (is_same_v<T, half_t>) { return CU_TENSOR_MAP_DATA_TYPE_FLOAT16; } else
if constexpr (is_same_v<T, float>) { return CU_TENSOR_MAP_DATA_TYPE_FLOAT32; } else
if constexpr (is_same_v<T, double>) { return CU_TENSOR_MAP_DATA_TYPE_FLOAT64; } else
if constexpr (is_same_v<T, bfloat16_t>) { return CU_TENSOR_MAP_DATA_TYPE_BFLOAT16; } else
if constexpr (is_same_v<T, tfloat32_t>) { return CU_TENSOR_MAP_DATA_TYPE_TFLOAT32; } else
{ static_assert(sizeof(T) < 0, "Unknown TMA Format!"); }
}
inline CUtensorMapSwizzle
to_CUtensorMapSwizzle(SmemSwizzleBits const& t) {
switch (t) {
default: assert(false && "Unknown SmemSwizzleBits!");
case SmemSwizzleBits::DISABLE: return CU_TENSOR_MAP_SWIZZLE_NONE;
case SmemSwizzleBits::B32: return CU_TENSOR_MAP_SWIZZLE_32B;
case SmemSwizzleBits::B64: return CU_TENSOR_MAP_SWIZZLE_64B;
case SmemSwizzleBits::B128: return CU_TENSOR_MAP_SWIZZLE_128B;
}
}
#endif // !defined(__CUDACC_RTC__)
#endif // (__CUDACC_VER_MAJOR__ >= 12)
} // end namespace TMA
#if (__CUDACC_VER_MAJOR__ >= 12) && !defined(__CUDACC_RTC__)
using TmaDescriptor = CUtensorMap;
using Im2ColTmaDescriptor = CUtensorMap;
#else
using TmaDescriptor = struct alignas(64) { char bytes[128]; };
using Im2ColTmaDescriptor = struct alignas(64) { char bytes[128]; };
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Initiates a TensorMap Prefetch
////////////////////////////////////////////////////////////////////////////////////////////////////
CUTE_HOST_DEVICE
void
prefetch_tma_descriptor(TmaDescriptor const* desc_ptr)
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr);
// Prefetch TMA Descriptor using generic addressing (i.e. no specific state space: const or param)
asm volatile (
"prefetch.tensormap [%0];"
:
: "l"(gmem_int_desc)
: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Trying to use TMA Descriptor Prefetch without CUTE_ARCH_TMA_SM90_ENABLED.");
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Perform a TensorMap modification (by each field)
////////////////////////////////////////////////////////////////////////////////////////////////////
// Replace tensor pointer directly in GMEM
CUTE_HOST_DEVICE
void
tma_descriptor_replace_addr_in_global_mem(TmaDescriptor const* desc_ptr,
void const* const new_tensor_ptr)
{
#if defined(CUTE_ARCH_DEVICE_MODIFIABLE_TMA_SM90_ENABLED)
uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr);
uint64_t const new_desc_addr = reinterpret_cast<uint64_t>(new_tensor_ptr);
asm volatile (
"tensormap.replace.tile.global_address.global.b1024.b64 [%0], %1;"
:: "l"(gmem_int_desc), "l"(new_desc_addr));
#else
CUTE_INVALID_CONTROL_PATH("Using TMA Descriptor modification without CUTE_ARCH_TMA_SM90_ENABLED and CUDA 12.3");
#endif
}
// Replace tensor pointer by bringing the tensormap from GMEM into the shared memory
CUTE_HOST_DEVICE
void
tma_descriptor_replace_addr_in_shared_mem(TmaDescriptor& smem_desc,
void const* const new_tensor_ptr)
{
#if defined(CUTE_ARCH_DEVICE_MODIFIABLE_TMA_SM90_ENABLED)
uint32_t smem_int_desc = cast_smem_ptr_to_uint(&smem_desc);
uint64_t const new_desc_addr = reinterpret_cast<uint64_t>(new_tensor_ptr);
uint64_t const smem_int64_desc = 0;
asm volatile (
"cvt.u64.u32 %0, %1;"
:: "l"(smem_int64_desc), "r"(smem_int_desc));
asm volatile (
"tensormap.replace.tile.global_address.shared::cta.b1024.b64 [%0], %1;"
:: "l"(smem_int64_desc), "l"(new_desc_addr));
#else
CUTE_INVALID_CONTROL_PATH("Using TMA Descriptor modification without CUTE_ARCH_TMA_SM90_ENABLED and CUDA 12.3");
#endif
}
// Replace tensor dims and strides for GEMMs by bringing the tensormap from GMEM into the shared memory
CUTE_HOST_DEVICE
void
tma_descriptor_replace_dims_strides_in_shared_mem(TmaDescriptor & smem_desc,
cute::array<uint32_t, 3> const& prob_shape,
cute::array<uint64_t, 3> const& prob_stride)
{
#if defined(CUTE_ARCH_DEVICE_MODIFIABLE_TMA_SM90_ENABLED)
uint32_t smem_int_desc = cast_smem_ptr_to_uint(&smem_desc);
uint64_t const smem_int64_desc = 0;
asm volatile (
"cvt.u64.u32 %0, %1;"
:: "l"(smem_int64_desc), "r"(smem_int_desc));
asm volatile (
"tensormap.replace.tile.global_dim.shared::cta.b1024.b32 [%0], 0, %1;"
:: "l"(smem_int64_desc), "r"(prob_shape[0]));
asm volatile (
"tensormap.replace.tile.global_dim.shared::cta.b1024.b32 [%0], 1, %1;"
:: "l"(smem_int64_desc), "r"(prob_shape[1]));
asm volatile (
"tensormap.replace.tile.global_dim.shared::cta.b1024.b32 [%0], 2, %1;"
:: "l"(smem_int64_desc), "r"(prob_shape[2]));
// Strides must be a multiple of 16. Also, stride for the intermost dimension is implicitly 1
asm volatile (
"tensormap.replace.tile.global_stride.shared::cta.b1024.b64 [%0], 0, %1;"
:: "l"(smem_int64_desc), "l"(prob_stride[1] >> 4));
asm volatile (
"tensormap.replace.tile.global_stride.shared::cta.b1024.b64 [%0], 1, %1;"
:: "l"(smem_int64_desc), "l"(prob_stride[2] >> 4));
#else
CUTE_INVALID_CONTROL_PATH("Using TMA Descriptor modification without CUTE_ARCH_TMA_SM90_ENABLED and CUDA 12.3");
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Perform a fused copy and fence operation (needed when modifying tensormap in shared memory)
////////////////////////////////////////////////////////////////////////////////////////////////////
CUTE_HOST_DEVICE
void
tma_descriptor_cp_fence_release(TmaDescriptor const* gmem_desc_ptr, TmaDescriptor& smem_desc)
{
#if defined(CUTE_ARCH_DEVICE_MODIFIABLE_TMA_SM90_ENABLED)
uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(gmem_desc_ptr);
uint32_t smem_int_desc = cast_smem_ptr_to_uint(&smem_desc);
asm volatile (
"tensormap.cp_fenceproxy.global.shared::cta.tensormap::generic.release.gpu.sync.aligned [%0], [%1], 128;"
:: "l"(gmem_int_desc), "r"(smem_int_desc));
#else
CUTE_INVALID_CONTROL_PATH("Using TMA Descriptor modification without CUTE_ARCH_TMA_SM90_ENABLED and CUDA 12.3");
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Perform a release fence operation (needed when modifying tensormap directly in GMEM)
////////////////////////////////////////////////////////////////////////////////////////////////////
CUTE_HOST_DEVICE
void
tma_descriptor_fence_release()
{
#if defined(CUTE_ARCH_DEVICE_MODIFIABLE_TMA_SM90_ENABLED)
asm volatile ("fence.proxy.tensormap::generic.release.gpu;");
#else
CUTE_INVALID_CONTROL_PATH("Using TMA Descriptor modification without CUTE_ARCH_TMA_SM90_ENABLED and CUDA 12.3");
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Perform a acquire fence operation
////////////////////////////////////////////////////////////////////////////////////////////////////
CUTE_HOST_DEVICE
void
tma_descriptor_fence_acquire(TmaDescriptor const* desc_ptr)
{
#if defined(CUTE_ARCH_DEVICE_MODIFIABLE_TMA_SM90_ENABLED)
uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr);
asm volatile (
"fence.proxy.tensormap::generic.acquire.gpu [%0], 128;"
:
: "l"(gmem_int_desc)
: "memory");
asm volatile (
"cvta.global.u64 %0, %0;"
:
: "l"(gmem_int_desc), "l"(gmem_int_desc)
: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Using TMA Descriptor modification without CUTE_ARCH_TMA_SM90_ENABLED and CUDA 12.3");
#endif
}
///////////////////////////////////////////////////////////////////////////////
} // end namespace cute
| include/cute/arch/copy_sm90_desc.hpp/0 | {
"file_path": "include/cute/arch/copy_sm90_desc.hpp",
"repo_id": "include",
"token_count": 5497
} | 15 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Statically sized array of elements that accommodates subbyte trivial types
in a packed storage.
*/
#pragma once
#include <cute/config.hpp>
#include <cute/numeric/numeric_types.hpp>
#include <cute/numeric/integral_constant.hpp>
namespace cute
{
//
// Underlying subbyte storage type
//
template <class T>
using subbyte_storage_type_t = conditional_t<(cute::sizeof_bits_v<T> <= 8), uint8_t,
conditional_t<(cute::sizeof_bits_v<T> <= 16), uint16_t,
conditional_t<(cute::sizeof_bits_v<T> <= 32), uint32_t,
conditional_t<(cute::sizeof_bits_v<T> <= 64), uint64_t,
conditional_t<(cute::sizeof_bits_v<T> <= 128), uint128_t,
T>>>>>;
template <class T> struct subbyte_iterator;
template <class, class> struct swizzle_ptr;
//
// subbyte_reference
// Proxy object for sub-byte element references
//
template <class T>
struct subbyte_reference
{
// Iterator Element type (const or non-const)
using element_type = T;
// Iterator Value type without type qualifier.
using value_type = remove_cv_t<T>;
// Storage type (const or non-const)
using storage_type = conditional_t<(is_const_v<T>), subbyte_storage_type_t<T> const, subbyte_storage_type_t<T>>;
static_assert(sizeof_bits_v<storage_type> % 8 == 0, "Storage type is not supported");
static_assert(sizeof_bits_v<element_type> <= sizeof_bits_v<storage_type>,
"Size of Element must not be greater than Storage.");
private:
// Bitmask for covering one item
static constexpr storage_type BitMask = storage_type(storage_type(-1) >> (sizeof_bits_v<storage_type> - sizeof_bits_v<element_type>));
// Flag for fast branching on straddled elements
static constexpr bool is_storage_unaligned = ((sizeof_bits_v<storage_type> % sizeof_bits_v<element_type>) != 0);
friend struct subbyte_iterator<T>;
// Pointer to storage element
storage_type* ptr_ = nullptr;
// Bit index of value_type starting position within storage_type element.
// RI: 0 <= idx_ < sizeof_bit<storage_type>
uint8_t idx_ = 0;
// Ctor
template <class PointerType>
CUTE_HOST_DEVICE constexpr
subbyte_reference(PointerType* ptr, uint8_t idx = 0) : ptr_(reinterpret_cast<storage_type*>(ptr)), idx_(idx) {}
public:
// Copy Ctor
CUTE_HOST_DEVICE constexpr
subbyte_reference(subbyte_reference const& other) {
*this = element_type(other);
}
// Copy Assignment
CUTE_HOST_DEVICE constexpr
subbyte_reference& operator=(subbyte_reference const& other) {
return *this = element_type(other);
}
// Assignment
template <class T_ = element_type>
CUTE_HOST_DEVICE constexpr
enable_if_t<!is_const_v<T_>, subbyte_reference&> operator=(element_type x)
{
static_assert(is_same_v<T_, element_type>, "Do not specify template arguments!");
storage_type item = (reinterpret_cast<storage_type const&>(x) & BitMask);
// Update the current storage element
storage_type bit_mask_0 = storage_type(BitMask << idx_);
ptr_[0] = storage_type((ptr_[0] & ~bit_mask_0) | (item << idx_));
// If value_type is unaligned with storage_type (static) and this is a straddled value (dynamic)
if (is_storage_unaligned && idx_ + sizeof_bits_v<value_type> > sizeof_bits_v<storage_type>) {
uint8_t straddle_bits = uint8_t(sizeof_bits_v<storage_type> - idx_);
storage_type bit_mask_1 = storage_type(BitMask >> straddle_bits);
// Update the next storage element
ptr_[1] = storage_type((ptr_[1] & ~bit_mask_1) | (item >> straddle_bits));
}
return *this;
}
// Comparison of referenced values
CUTE_HOST_DEVICE constexpr friend
bool operator==(subbyte_reference const& x, subbyte_reference const& y) { return x.get() == y.get(); }
CUTE_HOST_DEVICE constexpr friend
bool operator!=(subbyte_reference const& x, subbyte_reference const& y) { return x.get() != y.get(); }
CUTE_HOST_DEVICE constexpr friend
bool operator< (subbyte_reference const& x, subbyte_reference const& y) { return x.get() < y.get(); }
CUTE_HOST_DEVICE constexpr friend
bool operator> (subbyte_reference const& x, subbyte_reference const& y) { return x.get() > y.get(); }
CUTE_HOST_DEVICE constexpr friend
bool operator<=(subbyte_reference const& x, subbyte_reference const& y) { return x.get() <= y.get(); }
CUTE_HOST_DEVICE constexpr friend
bool operator>=(subbyte_reference const& x, subbyte_reference const& y) { return x.get() >= y.get(); }
// Value
CUTE_HOST_DEVICE
element_type get() const
{
if constexpr (is_same_v<bool, value_type>) { // Extract to bool -- potentially faster impl
return bool((*ptr_) & (BitMask << idx_));
} else { // Extract to element_type
// Extract from the current storage element
auto item = storage_type((ptr_[0] >> idx_) & BitMask);
// If value_type is unaligned with storage_type (static) and this is a straddled value (dynamic)
if (is_storage_unaligned && idx_ + sizeof_bits_v<value_type> > sizeof_bits_v<storage_type>) {
uint8_t straddle_bits = uint8_t(sizeof_bits_v<storage_type> - idx_);
storage_type bit_mask_1 = storage_type(BitMask >> straddle_bits);
// Extract from the next storage element
item |= storage_type((ptr_[1] & bit_mask_1) << straddle_bits);
}
return reinterpret_cast<element_type&>(item);
}
}
// Extract to type element_type
CUTE_HOST_DEVICE constexpr
operator element_type() const {
return get();
}
// Address
subbyte_iterator<T> operator&() const {
return {ptr_, idx_};
}
};
//
// subbyte_iterator
// Random-access iterator over subbyte references
//
template <class T>
struct subbyte_iterator
{
// Iterator Element type (const or non-const)
using element_type = T;
// Iterator Value type without type qualifier.
using value_type = remove_cv_t<T>;
// Storage type (const or non-const)
using storage_type = conditional_t<(is_const_v<T>), subbyte_storage_type_t<T> const, subbyte_storage_type_t<T>>;
// Reference proxy type
using reference = subbyte_reference<element_type>;
static_assert(sizeof_bits_v<storage_type> % 8 == 0, "Storage type is not supported");
static_assert(sizeof_bits_v<element_type> <= sizeof_bits_v<storage_type>,
"Size of Element must not be greater than Storage.");
private:
template <class, class> friend struct swizzle_ptr;
// Pointer to storage element
storage_type* ptr_ = nullptr;
// Bit index of value_type starting position within storage_type element.
// RI: 0 <= idx_ < sizeof_bit<storage_type>
uint8_t idx_ = 0;
public:
// Ctor
subbyte_iterator() = default;
// Ctor
template <class PointerType>
CUTE_HOST_DEVICE constexpr
subbyte_iterator(PointerType* ptr, uint8_t idx = 0) : ptr_(reinterpret_cast<storage_type*>(ptr)), idx_(idx) { }
CUTE_HOST_DEVICE constexpr
reference operator*() const {
return reference(ptr_, idx_);
}
CUTE_HOST_DEVICE constexpr
subbyte_iterator& operator+=(uint64_t k) {
k = sizeof_bits_v<value_type> * k + idx_;
ptr_ += k / sizeof_bits_v<storage_type>;
idx_ = k % sizeof_bits_v<storage_type>;
return *this;
}
CUTE_HOST_DEVICE constexpr
subbyte_iterator operator+(uint64_t k) const {
return subbyte_iterator(ptr_, idx_) += k;
}
CUTE_HOST_DEVICE constexpr
reference operator[](uint64_t k) const {
return *(*this + k);
}
CUTE_HOST_DEVICE constexpr
subbyte_iterator& operator++() {
idx_ += sizeof_bits_v<value_type>;
if (idx_ >= sizeof_bits_v<storage_type>) {
++ptr_;
idx_ -= sizeof_bits_v<storage_type>;
}
return *this;
}
CUTE_HOST_DEVICE constexpr
subbyte_iterator operator++(int) {
subbyte_iterator ret(*this);
++(*this);
return ret;
}
CUTE_HOST_DEVICE constexpr
subbyte_iterator& operator--() {
if (idx_ >= sizeof_bits_v<value_type>) {
idx_ -= sizeof_bits_v<value_type>;
} else {
--ptr_;
idx_ += sizeof_bits_v<storage_type> - sizeof_bits_v<value_type>;
}
return *this;
}
CUTE_HOST_DEVICE constexpr
subbyte_iterator operator--(int) {
subbyte_iterator ret(*this);
--(*this);
return ret;
}
CUTE_HOST_DEVICE constexpr friend
bool operator==(subbyte_iterator const& x, subbyte_iterator const& y) {
return x.ptr_ == y.ptr_ && x.idx_ == y.idx_;
}
CUTE_HOST_DEVICE constexpr friend
bool operator< (subbyte_iterator const& x, subbyte_iterator const& y) {
return x.ptr_ < y.ptr_ || (x.ptr_ == y.ptr_ && x.idx_ < y.idx_);
}
CUTE_HOST_DEVICE constexpr friend
bool operator!=(subbyte_iterator const& x, subbyte_iterator const& y) { return !(x == y); }
CUTE_HOST_DEVICE constexpr friend
bool operator<=(subbyte_iterator const& x, subbyte_iterator const& y) { return !(y < x); }
CUTE_HOST_DEVICE constexpr friend
bool operator> (subbyte_iterator const& x, subbyte_iterator const& y) { return (y < x); }
CUTE_HOST_DEVICE constexpr friend
bool operator>=(subbyte_iterator const& x, subbyte_iterator const& y) { return !(x < y); }
// Conversion to raw pointer with loss of subbyte index
CUTE_HOST_DEVICE constexpr friend
T* raw_pointer_cast(subbyte_iterator const& x) {
assert(x.idx_ == 0);
return reinterpret_cast<T*>(x.ptr_);
}
// Conversion to NewT_ with possible loss of subbyte index
template <class NewT_>
CUTE_HOST_DEVICE constexpr friend
auto recast_ptr(subbyte_iterator const& x) {
using NewT = conditional_t<(is_const_v<T>), NewT_ const, NewT_>;
if constexpr (cute::is_subbyte_v<NewT>) { // Making subbyte_iter, preserve the subbyte idx
return subbyte_iterator<NewT>(x.ptr_, x.idx_);
} else { // Not subbyte, assume/assert subbyte idx 0
return reinterpret_cast<NewT*>(raw_pointer_cast(x));
}
CUTE_GCC_UNREACHABLE;
}
CUTE_HOST_DEVICE friend void print(subbyte_iterator x) {
printf("subptr[%db](%p.%u)", int(sizeof_bits_v<T>), x.ptr_, x.idx_);
}
};
//
// array_subbyte
// Statically sized array for non-byte-aligned data types
//
template <class T, size_t N>
struct array_subbyte
{
using element_type = T;
using value_type = remove_cv_t<T>;
using pointer = element_type*;
using const_pointer = element_type const*;
using size_type = size_t;
using difference_type = ptrdiff_t;
//
// References
//
using reference = subbyte_reference<element_type>;
using const_reference = subbyte_reference<element_type const>;
//
// Iterators
//
using iterator = subbyte_iterator<element_type>;
using const_iterator = subbyte_iterator<element_type const>;
// Storage type (const or non-const)
using storage_type = conditional_t<(is_const_v<T>), subbyte_storage_type_t<T> const, subbyte_storage_type_t<T>>;
static_assert(sizeof_bits_v<storage_type> % 8 == 0, "Storage type is not supported");
private:
// Number of storage elements, ceil_div
static constexpr size_type StorageElements = (N * sizeof_bits_v<value_type> + sizeof_bits_v<storage_type> - 1) / sizeof_bits_v<storage_type>;
// Internal storage
storage_type storage[StorageElements];
public:
constexpr
array_subbyte() = default;
CUTE_HOST_DEVICE constexpr
array_subbyte(array_subbyte const& x) {
CUTE_UNROLL
for (size_type i = 0; i < StorageElements; ++i) {
storage[i] = x.storage[i];
}
}
CUTE_HOST_DEVICE constexpr
size_type size() const {
return N;
}
CUTE_HOST_DEVICE constexpr
size_type max_size() const {
return N;
}
CUTE_HOST_DEVICE constexpr
bool empty() const {
return !N;
}
// Efficient clear method
CUTE_HOST_DEVICE constexpr
void clear() {
CUTE_UNROLL
for (size_type i = 0; i < StorageElements; ++i) {
storage[i] = storage_type(0);
}
}
CUTE_HOST_DEVICE constexpr
void fill(T const& value) {
CUTE_UNROLL
for (size_type i = 0; i < N; ++i) {
at(i) = value;
}
}
CUTE_HOST_DEVICE constexpr
reference at(size_type pos) {
return iterator(storage)[pos];
}
CUTE_HOST_DEVICE constexpr
const_reference at(size_type pos) const {
return const_iterator(storage)[pos];
}
CUTE_HOST_DEVICE constexpr
reference operator[](size_type pos) {
return at(pos);
}
CUTE_HOST_DEVICE constexpr
const_reference operator[](size_type pos) const {
return at(pos);
}
CUTE_HOST_DEVICE constexpr
reference front() {
return at(0);
}
CUTE_HOST_DEVICE constexpr
const_reference front() const {
return at(0);
}
CUTE_HOST_DEVICE constexpr
reference back() {
return at(N-1);
}
CUTE_HOST_DEVICE constexpr
const_reference back() const {
return at(N-1);
}
CUTE_HOST_DEVICE constexpr
pointer data() {
return reinterpret_cast<pointer>(storage);
}
CUTE_HOST_DEVICE constexpr
const_pointer data() const {
return reinterpret_cast<const_pointer>(storage);
}
CUTE_HOST_DEVICE constexpr
storage_type* raw_data() {
return storage;
}
CUTE_HOST_DEVICE constexpr
storage_type const* raw_data() const {
return storage;
}
CUTE_HOST_DEVICE constexpr
iterator begin() {
return iterator(storage);
}
CUTE_HOST_DEVICE constexpr
const_iterator begin() const {
return const_iterator(storage);
}
CUTE_HOST_DEVICE constexpr
const_iterator cbegin() const {
return begin();
}
CUTE_HOST_DEVICE constexpr
iterator end() {
return iterator(storage) + N;
}
CUTE_HOST_DEVICE constexpr
const_iterator end() const {
return const_iterator(storage) + N;
}
CUTE_HOST_DEVICE constexpr
const_iterator cend() const {
return end();
}
//
// Comparison operators
//
};
//
// Operators
//
template <class T, size_t N>
CUTE_HOST_DEVICE constexpr
void clear(array_subbyte<T,N>& a)
{
a.clear();
}
template <class T, size_t N>
CUTE_HOST_DEVICE constexpr
void fill(array_subbyte<T,N>& a, T const& value)
{
a.fill(value);
}
} // namespace cute
//
// Specialize tuple-related functionality for cute::array_subbyte
//
#if defined(__CUDACC_RTC__)
#include <cuda/std/tuple>
#else
#include <tuple>
#endif
namespace cute
{
template <size_t I, class T, size_t N>
CUTE_HOST_DEVICE constexpr
T& get(array_subbyte<T,N>& a)
{
static_assert(I < N, "Index out of range");
return a[I];
}
template <size_t I, class T, size_t N>
CUTE_HOST_DEVICE constexpr
T const& get(array_subbyte<T,N> const& a)
{
static_assert(I < N, "Index out of range");
return a[I];
}
template <size_t I, class T, size_t N>
CUTE_HOST_DEVICE constexpr
T&& get(array_subbyte<T,N>&& a)
{
static_assert(I < N, "Index out of range");
return cute::move(a[I]);
}
} // end namespace cute
namespace CUTE_STL_NAMESPACE
{
template <class T>
struct is_reference<cute::subbyte_reference<T>>
: CUTE_STL_NAMESPACE::true_type
{};
template <class T, size_t N>
struct tuple_size<cute::array_subbyte<T,N>>
: CUTE_STL_NAMESPACE::integral_constant<size_t, N>
{};
template <size_t I, class T, size_t N>
struct tuple_element<I, cute::array_subbyte<T,N>>
{
using type = T;
};
template <class T, size_t N>
struct tuple_size<const cute::array_subbyte<T,N>>
: CUTE_STL_NAMESPACE::integral_constant<size_t, N>
{};
template <size_t I, class T, size_t N>
struct tuple_element<I, const cute::array_subbyte<T,N>>
{
using type = T;
};
} // end namespace CUTE_STL_NAMESPACE
#ifdef CUTE_STL_NAMESPACE_IS_CUDA_STD
namespace std
{
#if defined(__CUDACC_RTC__)
template <class... _Tp>
struct tuple_size;
template <size_t _Ip, class... _Tp>
struct tuple_element;
#endif
template <class T, size_t N>
struct tuple_size<cute::array_subbyte<T,N>>
: CUTE_STL_NAMESPACE::integral_constant<size_t, N>
{};
template <size_t I, class T, size_t N>
struct tuple_element<I, cute::array_subbyte<T,N>>
{
using type = T;
};
template <class T, size_t N>
struct tuple_size<const cute::array_subbyte<T,N>>
: CUTE_STL_NAMESPACE::integral_constant<size_t, N>
{};
template <size_t I, class T, size_t N>
struct tuple_element<I, const cute::array_subbyte<T,N>>
{
using type = T;
};
} // end namespace std
#endif // CUTE_STL_NAMESPACE_IS_CUDA_STD
| include/cute/container/array_subbyte.hpp/0 | {
"file_path": "include/cute/container/array_subbyte.hpp",
"repo_id": "include",
"token_count": 6841
} | 16 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Barrier Operations on SM90+
*/
#pragma once
#include <cutlass/arch/memory_sm75.h>
#include <cute/arch/cluster_sm90.hpp>
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 900 && (__CUDACC_VER_MAJOR__ >= 12)
#define CUDA_BARRIER_ENABLED 1
#else
#define CUDA_BARRIER_ENABLED 0
#endif
namespace cutlass {
/// @brief
namespace arch {
////////////////////////////////////////////////////////////////////////////////////////////////////
// Enumerates the reserved named barriers to avoid potential conflicts
// This enum class specifies the NamedBarriers reserved by CUTLASS.
enum class ReservedNamedBarriers {
EpilogueBarrier = 0,
TransposeBarrier = 1,
TransformBarrier = 2,
StreamkBarrier0 = 3,
StreamkBarrier1 = 4
, FirstUserBarrier = StreamkBarrier1 + 1
};
class NamedBarrier {
// Data Members:
// Range = [1 , NUM_THREADS_PER_CTA]
// Range % warp-size (i.e 32) == 0
uint32_t const num_threads_;
// Range : [0, 15]
// Note that should be set to the final barrier ID, including ReserveNamedBarrierCount should be considered
uint32_t const id_;
public:
// Constructor for CUTLASS developers:
// effective barrier ID starts from 0
CUTLASS_DEVICE
NamedBarrier(uint32_t num_threads, ReservedNamedBarriers reserved_named_barriers)
: num_threads_(num_threads), id_(static_cast<uint32_t>(reserved_named_barriers)) {}
// Constructor for CUTLASS users:
// effective barrier ID starts from ReservedNamedBarrierCount
CUTLASS_DEVICE
NamedBarrier(uint32_t num_threads, uint32_t id = 0)
: num_threads_(num_threads), id_(id + ReservedNamedBarrierCount) {
CUTLASS_ASSERT(id + ReservedNamedBarrierCount <= HardwareMaxNumNamedBarriers && "Effective barrier_id should not exceed 16.");
}
CUTLASS_DEVICE
void arrive_and_wait() const {
// Note: The value of id_ is already the final barrier id (set correctly in the constructor).
NamedBarrier::arrive_and_wait_internal(num_threads_, id_);
}
CUTLASS_DEVICE
void arrive() const {
// Note: The value of id_ is already the final barrier id (set correctly in the constructor).
NamedBarrier::arrive_internal(num_threads_, id_);
}
CUTLASS_DEVICE
void sync() const {
NamedBarrier::arrive_and_wait();
}
// Static variants
// Calling interface for CUTLASS users:
// effective barrier ID starts from ReservedNamedBarrierCount
CUTLASS_DEVICE
static void arrive_and_wait(uint32_t num_threads, uint32_t barrier_id) {
arrive_and_wait_internal(num_threads, barrier_id + ReservedNamedBarrierCount);
}
// Calling interface for CUTLASS developers:
// effective barrier ID starts from 0
CUTLASS_DEVICE
static void arrive_and_wait(uint32_t num_threads, ReservedNamedBarriers reserved_named_barriers) {
arrive_and_wait_internal(num_threads, static_cast<int>(reserved_named_barriers));
}
// Calling interface for CUTLASS users:
// effective barrier ID starts from ReservedNamedBarrierCount
CUTLASS_DEVICE
static void arrive(uint32_t num_threads, uint32_t barrier_id) {
arrive_internal(num_threads, barrier_id + ReservedNamedBarrierCount);
}
// Calling interface for CUTLASS developers:
// effective barrier ID starts from 0
CUTLASS_DEVICE
static void arrive(uint32_t num_threads, ReservedNamedBarriers reserved_named_barriers) {
arrive_internal(num_threads, static_cast<int>(reserved_named_barriers));
}
// Calling interface for CUTLASS users:
// effective barrier ID starts from ReservedNamedBarrierCount
CUTLASS_DEVICE
static void sync(uint32_t num_threads, uint32_t barrier_id) {
sync_internal(num_threads, barrier_id + ReservedNamedBarrierCount);
}
// Calling interface for CUTLASS developers:
// effective barrier ID starts from 0
CUTLASS_DEVICE
static void sync(uint32_t num_threads, ReservedNamedBarriers reserved_named_barriers) {
sync_internal(num_threads, static_cast<int>(reserved_named_barriers));
}
private:
CUTLASS_DEVICE
static void arrive_and_wait_internal(uint32_t num_threads, uint32_t barrier_id) {
#if CUDA_BARRIER_ENABLED
asm volatile("bar.sync %0, %1;" : : "r"(barrier_id), "r"(num_threads));
#elif defined(__CUDA_ARCH__)
asm volatile ("brkpt;\n" ::);
#endif
}
CUTLASS_DEVICE
static void arrive_internal(uint32_t num_threads, uint32_t barrier_id) {
#if CUDA_BARRIER_ENABLED
asm volatile("bar.arrive %0, %1;" : : "r"(barrier_id), "r"(num_threads));
#elif defined(__CUDA_ARCH__)
asm volatile ("brkpt;\n" ::);
#endif
}
CUTLASS_DEVICE
static void sync_internal(uint32_t num_threads, uint32_t barrier_id) {
NamedBarrier::arrive_and_wait_internal(num_threads, barrier_id);
}
public:
// Currently we reserve 8 NamedBarriers for CUTLASS' own use cases,
// while leaving the renaming for general users.
static const uint32_t ReservedNamedBarrierCount = static_cast<uint32_t>(ReservedNamedBarriers::FirstUserBarrier);
static const uint32_t HardwareMaxNumNamedBarriers = 16;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// Hopper introduces a new cluster-wide barrier which handle with Cluster-wide arrive-wait behaviour.
// This is an extension to the Ampere arrive-wait barriers
// Note : Ampere arrive-wait Barriers have a larger max-arrive count (2^30) than Hopper arrive-wait Barriers (2^20).
struct ClusterBarrier {
using ValueType = uint64_t;
protected:
// Can never be initialized - can only be aliased to smem
ValueType barrier_;
public:
CUTLASS_DEVICE
ClusterBarrier() = delete;
CUTLASS_DEVICE
void init(uint32_t arrive_count) const {
ClusterBarrier::init(&this->barrier_, arrive_count);
}
CUTLASS_DEVICE
uint32_t test_wait(uint32_t phase, uint32_t pred=true) const {
return ClusterBarrier::test_wait(&this->barrier_, phase, pred);
}
CUTLASS_DEVICE
uint32_t try_wait(uint32_t phase) const {
return ClusterBarrier::try_wait(&this->barrier_, phase);
}
CUTLASS_DEVICE
void wait(uint32_t phase) const {
ClusterBarrier::wait(&this->barrier_, phase);
}
// Barrier arrive on local smem
CUTLASS_DEVICE
void arrive() const {
ClusterBarrier::arrive(&this->barrier_);
}
// Remote SMEM arrive with a perdicate (usually done to pick the thread doing the arrive)
CUTLASS_DEVICE
void arrive(uint32_t cta_id, uint32_t pred = true ) const {
ClusterBarrier::arrive(&this->barrier_, cta_id, pred);
}
//
// Static Versions
//
CUTLASS_DEVICE
static void init(ValueType const* smem_ptr, uint32_t arrive_count) {
#if CUDA_BARRIER_ENABLED
uint32_t smem_addr = cute::cast_smem_ptr_to_uint(smem_ptr);
asm volatile(
"{\n\t"
"mbarrier.init.shared::cta.b64 [%1], %0; \n"
"}"
:
: "r"(arrive_count), "r"(smem_addr));
#elif defined(__CUDA_ARCH__)
asm volatile ("brkpt;\n" ::);
#endif
}
// Static version of wait - in case we don't want to burn a register
CUTLASS_DEVICE
static void wait(ValueType const* smem_ptr, uint32_t phase) {
#if CUDA_BARRIER_ENABLED
uint32_t smem_addr = cute::cast_smem_ptr_to_uint(smem_ptr);
// Arbitrarily large timer value after which try-wait expires and re-tries.
uint32_t ticks = 0x989680;
asm volatile(
"{\n\t"
".reg .pred P1; \n\t"
"LAB_WAIT: \n\t"
"mbarrier.try_wait.parity.shared::cta.b64 P1, [%0], %1, %2; \n\t"
"@P1 bra.uni DONE; \n\t"
"bra.uni LAB_WAIT; \n\t"
"DONE: \n\t"
"}"
:
: "r"(smem_addr), "r"(phase), "r"(ticks));
#elif defined(__CUDA_ARCH__)
asm volatile ("brkpt;\n" ::);
#endif
}
CUTLASS_DEVICE
static uint32_t test_wait(ValueType const* smem_ptr, uint32_t phase, uint32_t pred) {
#if CUDA_BARRIER_ENABLED
uint32_t smem_addr = cute::cast_smem_ptr_to_uint(smem_ptr);
uint32_t waitComplete;
asm volatile(
"{\n\t"
".reg .pred P1; \n\t"
".reg .pred P2; \n\t"
"setp.eq.u32 P2, %3, 1;\n\t"
"@P2 mbarrier.test_wait.parity.shared::cta.b64 P1, [%1], %2; \n\t"
"selp.b32 %0, 1, 0, P1; \n\t"
"}"
: "=r"(waitComplete)
: "r"(smem_addr), "r"(phase), "r"(pred));
return waitComplete;
#elif defined(__CUDA_ARCH__)
asm volatile ("brkpt;\n" ::);
#endif
return 0;
}
CUTLASS_DEVICE
static uint32_t try_wait(ValueType const* smem_ptr, uint32_t phase) {
#if CUDA_BARRIER_ENABLED
uint32_t smem_addr = cute::cast_smem_ptr_to_uint(smem_ptr);
uint32_t waitComplete;
asm volatile(
"{\n\t"
".reg .pred P1; \n\t"
"mbarrier.try_wait.parity.shared::cta.b64 P1, [%1], %2; \n\t"
"selp.b32 %0, 1, 0, P1; \n\t"
"}"
: "=r"(waitComplete)
: "r"(smem_addr), "r"(phase));
return waitComplete;
#elif defined(__CUDA_ARCH__)
asm volatile ("brkpt;\n" ::);
#endif
return 0;
}
// Static Predicated version of the above - in case we know the address.
CUTLASS_DEVICE
static void arrive(ValueType const* smem_ptr, uint32_t cta_id, uint32_t pred) {
#if CUDA_BARRIER_ENABLED
uint32_t smem_addr = cute::cast_smem_ptr_to_uint(smem_ptr);
asm volatile(
"{\n\t"
".reg .pred p;\n\t"
".reg .b32 remAddr32;\n\t"
"setp.eq.u32 p, %2, 1;\n\t"
"@p mapa.shared::cluster.u32 remAddr32, %0, %1;\n\t"
"@p mbarrier.arrive.shared::cluster.b64 _, [remAddr32];\n\t"
"}"
:
: "r"(smem_addr), "r"(cta_id), "r"(pred));
#elif defined(__CUDA_ARCH__)
asm volatile ("brkpt;\n" ::);
#endif
}
// Barrier arrive on local smem
CUTLASS_DEVICE
static void arrive(ValueType const* smem_ptr) {
#if CUDA_BARRIER_ENABLED
uint32_t smem_addr = cute::cast_smem_ptr_to_uint(smem_ptr);
asm volatile(
"{\n\t"
"mbarrier.arrive.shared::cta.b64 _, [%0];\n\t"
"}"
:
: "r"(smem_addr));
#elif defined(__CUDA_ARCH__)
asm volatile ("brkpt;\n" ::);
#endif
}
CUTLASS_DEVICE
static void invalidate(ValueType const* smem_ptr) {
#if CUDA_BARRIER_ENABLED
uint32_t smem_addr = cute::cast_smem_ptr_to_uint(smem_ptr);
asm volatile(
"{\n\t"
"mbarrier.inval.shared::cta.b64 [%0]; \n\t"
"}"
:
: "r"(smem_addr));
#elif defined(__CUDA_ARCH__)
asm volatile ("brkpt;\n" ::);
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// SM90 also introduces a new type of cluster-barrier which supports sync.
// not just based on Arrive Count, but also transaction count (in bytes)
struct ClusterTransactionBarrier : public ClusterBarrier {
CUTLASS_DEVICE
ClusterTransactionBarrier() = delete;
// Performs an arrive operation + expected transaction bytes increment
CUTLASS_DEVICE
void arrive_and_expect_tx(uint32_t transaction_bytes) const {
ClusterTransactionBarrier::arrive_and_expect_tx(&this->barrier_, transaction_bytes);
}
// Performs an arrive operation + expected transaction bytes increment
CUTLASS_DEVICE
void arrive_and_expect_tx(uint32_t transaction_bytes, uint32_t cta_id, uint32_t pred = 1u) const {
ClusterTransactionBarrier::arrive_and_expect_tx(&this->barrier_, transaction_bytes , cta_id, pred);
}
// Performs an expected transaction bytes increment without doing an arrive operation
CUTLASS_DEVICE
void expect_transaction(uint32_t transaction_bytes) const {
ClusterTransactionBarrier::expect_transaction(&this->barrier_, transaction_bytes);
}
// Performs an expected transaction bytes decrement without doing an arrive operation
CUTLASS_DEVICE
void complete_transaction(uint32_t transaction_bytes, uint32_t pred = 1) const {
uint32_t cta_rank = cute::block_rank_in_cluster();
ClusterTransactionBarrier::complete_transaction(&this->barrier_, cta_rank, transaction_bytes, pred);
}
// Performs an expected transaction bytes decrement without doing an arrive operation
CUTLASS_DEVICE
void complete_transaction(uint32_t dst_cta_id, uint32_t transaction_bytes, uint32_t pred) const {
ClusterTransactionBarrier::complete_transaction(&this->barrier_, dst_cta_id, transaction_bytes, pred);
}
//
// Static Versions
//
// Performs an arrive operation + expected transaction bytes increment
CUTLASS_DEVICE
static void arrive_and_expect_tx(ValueType const* smem_ptr, uint32_t transaction_bytes) {
#if CUDA_BARRIER_ENABLED
uint32_t smem_addr = cute::cast_smem_ptr_to_uint(smem_ptr);
asm volatile(
"{\n\t"
"mbarrier.arrive.expect_tx.shared::cta.b64 _, [%1], %0; \n\t"
"}"
:
: "r"(transaction_bytes), "r"(smem_addr));
#elif defined(__CUDA_ARCH__)
asm volatile ("brkpt;\n" ::);
#endif
}
// Performs an arrive operation + expected transaction bytes increment for a remote cta_id in a Cluster
CUTLASS_DEVICE
static void arrive_and_expect_tx(
ValueType const* smem_ptr, uint32_t transaction_bytes, uint32_t cta_id, uint32_t pred) {
#if CUDA_BARRIER_ENABLED
uint32_t smem_addr = cute::cast_smem_ptr_to_uint(smem_ptr);
asm volatile(
"{\n\t"
".reg .pred p;\n\t"
".reg .b32 remAddr32;\n\t"
"setp.eq.u32 p, %2, 1;\n\t"
"@p mapa.shared::cluster.u32 remAddr32, %0, %1;\n\t"
"@p mbarrier.arrive.expect_tx.shared::cluster.b64 _, [remAddr32], %3;\n\t"
"}"
:
: "r"(smem_addr), "r"(cta_id), "r"(pred), "r"(transaction_bytes));
#elif defined(__CUDA_ARCH__)
asm volatile ("brkpt;\n" ::);
#endif
}
// Performs an expected transaction bytes increment without doing an arrive operation
CUTLASS_DEVICE
static void expect_transaction(ValueType const* smem_ptr, uint32_t transaction_bytes) {
#if CUDA_BARRIER_ENABLED
uint32_t smem_addr = cute::cast_smem_ptr_to_uint(smem_ptr);
asm volatile(
"{\n\t"
"mbarrier.expect_tx.shared::cta.b64 [%1], %0; \n\t"
"}"
:
: "r"(transaction_bytes), "r"(smem_addr));
#elif defined(__CUDA_ARCH__)
asm volatile ("brkpt;\n" ::);
#endif
}
// Performs an expected transaction bytes decrement without doing an arrive operation
CUTLASS_DEVICE
static void complete_transaction(
ValueType const* smem_ptr, uint32_t dst_cta_id, uint32_t transaction_bytes, uint32_t pred = 1) {
#if CUDA_BARRIER_ENABLED
uint32_t smem_addr = cute::cast_smem_ptr_to_uint(smem_ptr);
smem_addr = cute::set_block_rank(smem_addr, dst_cta_id);
asm volatile(
"{\n\t"
".reg .pred p;\n\t"
"setp.eq.u32 p, %2, 1;\n\t"
"@p mbarrier.complete_tx.shared::cluster.relaxed.cluster.b64 [%1], %0;"
"}"
:
: "r"(transaction_bytes), "r"(smem_addr), "r"(pred));
#elif defined(__CUDA_ARCH__)
asm volatile ("brkpt;\n" ::);
#endif
}
//
// DEPRECATED APIs
//
[[deprecated("Use arrive_and_expect_tx instead")]] CUTLASS_DEVICE
void arrive_and_reset_bytes(uint32_t transaction_bytes) const {
arrive_and_expect_tx(transaction_bytes);
}
[[deprecated("Use arrive_and_expect_tx instead")]] CUTLASS_DEVICE
void arrive_and_reset_bytes(uint32_t transaction_bytes, uint32_t cta_id) const {
arrive_and_expect_tx(transaction_bytes, cta_id);
}
[[deprecated("Use expect_transaction instead")]] CUTLASS_DEVICE
void reset_bytes(uint32_t transaction_bytes) const {
expect_transaction(transaction_bytes);
}
[[deprecated("Use complete_transaction instead")]] CUTLASS_DEVICE
void commit(uint32_t transaction_bytes, uint32_t pred = 1) const {
complete_transaction(transaction_bytes, pred);
}
[[deprecated("Use complete_transaction instead")]] CUTLASS_DEVICE
void commit(uint32_t dst_cta_id, uint32_t transaction_bytes, uint32_t pred) const {
complete_transaction(dst_cta_id, transaction_bytes, pred);
}
[[deprecated("Use arrive_and_expect_tx instead")]] CUTLASS_DEVICE
static void arrive_and_reset_bytes(ValueType const* smem_ptr, uint32_t transaction_bytes) {
arrive_and_expect_tx(smem_ptr, transaction_bytes);
}
[[deprecated("Use arrive_and_expect_tx instead")]] CUTLASS_DEVICE
static void arrive_and_reset_bytes(ValueType const* smem_ptr, uint32_t transaction_bytes, uint32_t cta_id, uint32_t pred) {
arrive_and_expect_tx(smem_ptr, transaction_bytes, cta_id, pred);
}
[[deprecated("Use expect_transaction instead")]] CUTLASS_DEVICE
static void reset_bytes(ValueType const* smem_ptr, uint32_t transaction_bytes) {
expect_transaction(smem_ptr, transaction_bytes);
}
[[deprecated("Use complete_transaction instead")]] CUTLASS_DEVICE
static void commit(ValueType const* smem_ptr, uint32_t dst_cta_id, uint32_t transaction_bytes, uint32_t pred = 1) {
complete_transaction(smem_ptr, dst_cta_id, transaction_bytes, pred);
}
};
// Helps with visibility of barrier init operations across warps / cta / cluster
// Available as a separate function so as to batch inits across barriers and fence once
// Note : It must be composed with an appropriate sync instruction with the right scope
// to ensure visibility eg. __syncthreads() or a cluster_arrive() + cluster_wait()
CUTLASS_DEVICE
void fence_barrier_init() {
#if CUDA_BARRIER_ENABLED
asm volatile(
"{\n\t"
"fence.mbarrier_init.release.cluster; \n"
"}"
::);
#elif defined(__CUDA_ARCH__)
asm volatile ("brkpt;\n" ::);
#endif
}
// Issue a shared memory fence for async operations
CUTLASS_DEVICE
void fence_view_async_shared() {
#if CUDA_BARRIER_ENABLED
asm volatile (
"{\n\t"
"fence.proxy.async.shared::cta; \n"
"}"
::);
#elif defined(__CUDA_ARCH__)
asm volatile ("brkpt;\n" ::);
#endif
}
// Arrive on completion of in-flight cp.async operations issued by the calling thread
CUTLASS_DEVICE
void cpasync_barrier_arrive(uint64_t const* smem_ptr) {
#if CUDA_BARRIER_ENABLED
uint32_t smem_addr = cute::cast_smem_ptr_to_uint(smem_ptr);
asm volatile(
"{\n\t"
"cp.async.mbarrier.arrive.shared::cta.b64 [%0];\n\t"
"}"
:
: "r"(smem_addr));
#elif defined(__CUDA_ARCH__)
asm volatile ("brkpt;\n" ::);
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
} // end namespace arch
} // end namespace cutlass
| include/cutlass/arch/barrier.h/0 | {
"file_path": "include/cutlass/arch/barrier.h",
"repo_id": "include",
"token_count": 7747
} | 17 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief PTX for TMA Tensor Memory Access operators on memory added for SM90
*/
#pragma once
#include <cuda_runtime_api.h>
#include "cutlass/cutlass.h"
#include "cutlass/trace.h"
#if defined(__CUDACC_RTC__)
#include <cuda/std/type_traits>
#else
#include <type_traits>
#include <cstdio>
#endif
#if ((__CUDACC_VER_MAJOR__ >= 12) || ((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ >= 8)))
# define CUTLASS_SM90_CLUSTER_LAUNCH_ENABLED
#endif
namespace cutlass {
#ifndef NDEBUG
#define Return_Status(cudaError_t_status) \
if (cudaError_t_status != cudaSuccess) { \
fprintf(stderr, \
"[ ERROR: CUDA Runtime ] %s:%d: %s\n", \
__FILE__, \
__LINE__, \
cudaGetErrorString(cudaError_t_status)); \
return Status::kInvalid; \
} else { \
return Status::kSuccess; \
}
#else
#define Return_Status(cudaError_t_status) \
if (cudaError_t_status != cudaSuccess) { \
return Status::kInvalid; \
} else { \
return Status::kSuccess; \
}
#endif
struct ClusterLauncher {
constexpr static int MaxClusterSize = 32;
// Check for hardware compatibility
static inline CUTLASS_HOST
Status check_cluster_dims(dim3 grid, dim3 cluster) {
if (((cluster.x * cluster.y * cluster.z) <= MaxClusterSize) &&
(grid.x % cluster.x == 0) && (grid.y % cluster.y == 0) && (grid.z % cluster.z == 0)) {
return Status::kSuccess;
}
else {
CUTLASS_TRACE_HOST("ClusterLauncher: Invalid cluster configuration -- aborting launch.");
return Status::kInvalid;
}
}
static inline CUTLASS_HOST
Status
#if defined(CUTLASS_SM90_CLUSTER_LAUNCH_ENABLED)
init(void const* kernel_function)
#else
init(void const* /* kernel_function */)
#endif
{
#if defined(CUTLASS_SM90_CLUSTER_LAUNCH_ENABLED)
#if defined(CUTLASS_DEBUG_TRACE_LEVEL) && (CUTLASS_DEBUG_TRACE_LEVEL > 1)
if (kernel_function == nullptr) {
CUTLASS_TRACE_HOST("kernel_function is null");
return Status::kInvalid;
}
CUTLASS_TRACE_HOST("Checking previous error state before calling cudaFuncSetAttribute");
cudaError_t prevStatus = cudaGetLastError();
if (prevStatus != cudaSuccess) {
fprintf(stderr,
"[ ERROR: CUDA Runtime ] %s:%d: %s\n",
__FILE__,
__LINE__,
cudaGetErrorString(prevStatus));
return Status::kInvalid;
}
CUTLASS_TRACE_HOST("Calling cudaFuncSetAttribute");
#endif
// This attribute was added in CUDA 11.8.
cudaError_t status =
cudaFuncSetAttribute(
kernel_function, cudaFuncAttributeNonPortableClusterSizeAllowed, 1);
Return_Status(status);
#else
return Status::kInvalid;
#endif
}
// This is the method we expect to use going forward
static inline CUTLASS_HOST
Status launch(
dim3 const grid_dims,
dim3 const cluster_dims,
dim3 const block_dims,
size_t const smem_size,
cudaStream_t cuda_stream,
void const* kernel,
void** kernel_params) {
#if defined(CUTLASS_SM90_CLUSTER_LAUNCH_ENABLED)
if (check_cluster_dims(grid_dims, cluster_dims) != Status::kSuccess) {
CUTLASS_TRACE_HOST("ClusterLauncher: check_cluster_dims() failed. Aborting.");
return Status::kInvalid;
}
auto init_status = init(kernel);
if (init_status != Status::kSuccess) {
CUTLASS_TRACE_HOST("ClusterLauncher: init(kernel) failed with status " << int(init_status) << ". Aborting.");
return Status::kInvalid;
}
cudaLaunchConfig_t launch_config;
launch_config.gridDim = {grid_dims.x, grid_dims.y, grid_dims.z};
launch_config.blockDim = {block_dims.x, block_dims.y, block_dims.z};
launch_config.dynamicSmemBytes = smem_size;
launch_config.stream = cuda_stream;
cudaLaunchAttribute launch_attribute[1];
launch_attribute[0].id = cudaLaunchAttributeClusterDimension;
launch_attribute[0].val.clusterDim.x = cluster_dims.x;
launch_attribute[0].val.clusterDim.y = cluster_dims.y;
launch_attribute[0].val.clusterDim.z = cluster_dims.z;
launch_config.attrs = launch_attribute;
launch_config.numAttrs = 1;
CUTLASS_TRACE_HOST("ClusterLauncher: Launching GPC_CLUSTER_GRID GridDims = "
"(" << grid_dims.x << ", " << grid_dims.y << ", " << grid_dims.z << "), "
"And ClusterDims = "
"(" << cluster_dims.x << ", " << cluster_dims.y << ", " << cluster_dims.z << ")\n");
cudaError_t status = cudaLaunchKernelExC(&launch_config, kernel, kernel_params);
Return_Status(status);
#else
CUTLASS_TRACE_HOST("ClusterLauncher: CUTLASS_SM90_CLUSTER_LAUNCH_ENABLED not defined! Aborting cluster launch.");
return Status::kInvalid;
#endif
}
};
namespace detail {
template<class Arg>
void* checked_addressof(Arg&& arg) {
static_assert(! std::is_rvalue_reference_v<Arg> || ! std::is_const_v<Arg>, "You cannot take the address of a const rvalue reference (const T&&).");
// We use std::addressof to ensure we get the address,
// in case the type has an overloaded operator&.
// Note that this precludes `const T&&` references.
return const_cast<void*>(reinterpret_cast<void const*>(std::addressof(arg)));
}
} // namespace detail
//! Parameters for launch_on_cluster (see below).
struct ClusterLaunchParams {
//! Grid dimensions
dim3 grid_dims{1, 1, 1};
//! Block dimensions
dim3 block_dims{1, 1, 1};
//! Cluster dimensions
dim3 cluster_dims{1, 1, 1};
//! Number of bytes required for the kernel's shared memory.
int smem_size_in_bytes = 0;
//! CUDA stream on which to launch the kernel.
cudaStream_t cuda_stream = nullptr;
};
/// @brief Launch the kernel on the stream using cluster launch.
///
/// @param params Cluster launch parameters (see above).
/// @param kernel_ptr Pointer to the kernel function (see example).
/// @param args Zero or more arguments to pass to the kernel.
///
/// @tparam Args Types of the arguments passed to the kernel.
/// Don't specify this/these template argument(s) explicitly.
///
/// @return Status::Success on success, else an error code.
///
/// @code
/// template<class SharedMemoryType, class A, class B, class C>
/// __global__ void kernel(A a, B b, C c);
///
/// X x = get_x();
/// Y y = get_y();
/// Z z = get_z();
///
/// void const* kernel_ptr =
/// const_cast<void const*>(reinterpret_cast<void*>(
/// &kernel<SharedMemory, X, Y, Z>));
/// auto status = launch_kernel_on_cluster(
/// {grid_dims, block_dims, cluster_dims, sizeof(SharedMemory)},
/// kernel_ptr, x, y, z);
/// @endcode
template<class ... Args>
CUTLASS_HOST cutlass::Status
launch_kernel_on_cluster(const ClusterLaunchParams& params,
void const* kernel_ptr,
Args&& ... args)
{
// Unfortunately, we find ourselves needing to pass in
// the parameters as an array of raw pointers.
if constexpr (sizeof...(Args) == 0) {
return cutlass::ClusterLauncher::launch(
params.grid_dims,
params.cluster_dims,
params.block_dims,
params.smem_size_in_bytes,
params.cuda_stream,
kernel_ptr, nullptr);
}
else {
void* kernel_params[sizeof...(Args)] = {
detail::checked_addressof(std::forward<Args>(args))...
};
return cutlass::ClusterLauncher::launch(
params.grid_dims,
params.cluster_dims,
params.block_dims,
params.smem_size_in_bytes,
params.cuda_stream,
kernel_ptr,
kernel_params);
}
}
} // namespace cutlass
| include/cutlass/cluster_launch.hpp/0 | {
"file_path": "include/cutlass/cluster_launch.hpp",
"repo_id": "include",
"token_count": 3699
} | 18 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Template for device-level fused activation's scale+bias+relu and Implicit GEMM Convolution
*/
#pragma once
#include <limits>
#include "cutlass/cutlass.h"
#include "cutlass/device_kernel.h"
#include "cutlass/conv/convolution.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template<typename ImplicitGemmFusionKernel_>
class ImplicitGemmConvolutionFusion {
public:
using ImplicitGemmFusionKernel = ImplicitGemmFusionKernel_;
using ElementA = typename ImplicitGemmFusionKernel::ElementA;
using LayoutA = typename ImplicitGemmFusionKernel::LayoutA;
using ElementB = typename ImplicitGemmFusionKernel::ElementB;
using LayoutB = typename ImplicitGemmFusionKernel::LayoutB;
// using ElementScaleBias = typename ImplicitGemmFusionKernel::ElementScaleBias;
// using LayoutScaleBias = typename ImplicitGemmFusionKernel::LayoutScaleBias;
using ElementC = typename ImplicitGemmFusionKernel::ElementC;
using LayoutC = typename ImplicitGemmFusionKernel::LayoutC;
using ElementAccumulator = typename ImplicitGemmFusionKernel::ElementAccumulator;
using ElementCompute = typename ImplicitGemmFusionKernel::ElementCompute;
using OperatorClass = typename ImplicitGemmFusionKernel::OperatorClass;
using ArchTag = typename ImplicitGemmFusionKernel::ArchTag;
using ThreadblockShape = typename ImplicitGemmFusionKernel::ThreadblockShape;
using WarpShape = typename ImplicitGemmFusionKernel::WarpShape;
using InstructionShape = typename ImplicitGemmFusionKernel::InstructionShape;
using ThreadblockSwizzle = typename ImplicitGemmFusionKernel::ThreadblockSwizzle;
using EpilogueOutputOp = typename ImplicitGemmFusionKernel::EpilogueOutputOp;
static int const kStages = ImplicitGemmFusionKernel::kStages;
static int const kConvDim = ImplicitGemmFusionKernel::kConvDim;
using WarpMmaOperator = typename ImplicitGemmFusionKernel::WarpMmaOperator;
using ArchMmaOperator = typename ImplicitGemmFusionKernel::ArchMmaOperator;
using MathOperator = typename ImplicitGemmFusionKernel::MathOperator;
static cutlass::conv::Operator const kConvolutionalOperator = ImplicitGemmFusionKernel::kConvolutionalOperator;
static cutlass::conv::IteratorAlgorithm const kIteratorAlgorithm = ImplicitGemmFusionKernel::kIteratorAlgorithm;
static int const kWarpCount =
(ThreadblockShape::kM / WarpShape::kM) *
(ThreadblockShape::kN / WarpShape::kN) *
(ThreadblockShape::kK / WarpShape::kK);
/// Argument structure
using Arguments = typename ImplicitGemmFusionKernel::Arguments;
private:
/// Kernel parameters object
typename ImplicitGemmFusionKernel::Params params_;
public:
/// Constructs Implicit GEMM
ImplicitGemmConvolutionFusion() { }
/// Determines whether the Implicit GEMM can execute the given problem.
static Status can_implement(Arguments const &args) {
// dispatch to iterators
Status status = ImplicitGemmFusionKernel::Mma::IteratorA::can_implement(args.problem_size);
if (Status::kSuccess != status) {
return status;
}
status = ImplicitGemmFusionKernel::Mma::IteratorB::can_implement(args.problem_size);
if (Status::kSuccess != status) {
return status;
}
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
dim3 grid = threadblock_swizzle.get_grid_shape(
threadblock_swizzle.get_tiled_shape(
cutlass::conv::implicit_gemm_problem_size(kConvolutionalOperator, args.problem_size),
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.problem_size.split_k_slices));
if (!(grid.y <= std::numeric_limits<uint16_t>::max() &&
grid.z <= std::numeric_limits<uint16_t>::max())) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
size_t workspace_bytes = 0;
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord grid_tiled_shape = threadblock_swizzle.get_tiled_shape(
cutlass::conv::implicit_gemm_problem_size(kConvolutionalOperator, args.problem_size),
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.problem_size.split_k_slices);
if(args.split_k_mode == SplitKMode::kParallel) {
// Split-K parallel: CTAs in k-dimension write the partial results in a temporary workspace.
// The user needs to call a reduction operator to optain the final output tensor
workspace_bytes =
sizeof(ElementAccumulator) *
size_t(cutlass::conv::implicit_gemm_tensor_c_size(kConvolutionalOperator, args.problem_size)) *
size_t(grid_tiled_shape.k());
}
else if(args.split_k_mode == SplitKMode::kSerial && args.problem_size.split_k_slices > 1) {
// Split-K serial: The user workspace is used to store semaphore and serialize writing the
// final reduced output to user's output tensor
workspace_bytes = sizeof(int) * size_t(grid_tiled_shape.m()) * size_t(grid_tiled_shape.n());
}
return workspace_bytes;
}
/// Initializes GEMM state from arguments.
Status initialize(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
if (args.problem_size.split_k_slices > 1) {
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
cudaError_t status = cudaMemsetAsync(workspace, 0, get_workspace_size(args), stream);
if (status != cudaSuccess) {
return Status::kErrorInternal;
}
}
// initialize the params structure from the arguments
params_ = typename ImplicitGemmFusionKernel::Params(
args,
static_cast<int *>(workspace)
);
int smem_size = int(sizeof(typename ImplicitGemmFusionKernel::SharedStorage));
if (smem_size >= (48 << 10)) {
cudaError_t result = cudaFuncSetAttribute(cutlass::Kernel<ImplicitGemmFusionKernel>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
return Status::kSuccess;
}
/// Initializes Impicit GEMM state from arguments.
Status update(Arguments const &args, void *workspace = nullptr) {
// update the params structure from the arguments
params_.ptr_A = args.ref_A.data();
params_.ptr_B = args.ref_B.data();
params_.ptr_scale = args.ref_A_scale.data();
params_.ptr_bias = args.ref_A_bias.data();
params_.ptr_C = args.ref_C.data();
params_.ptr_D = args.ref_D.data();
params_.output_op = args.output_op;
params_.semaphore = static_cast<int *>(workspace);
return Status::kSuccess;
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
ThreadblockSwizzle threadblock_swizzle;
dim3 grid = threadblock_swizzle.get_grid_shape(params_.grid_tiled_shape);
dim3 block(32 * kWarpCount, 1, 1);
int smem_size = int(sizeof(typename ImplicitGemmFusionKernel::SharedStorage));
cutlass::Kernel<ImplicitGemmFusionKernel><<<grid, block, smem_size, stream>>>(params_);
cudaError_t result = cudaGetLastError();
return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal;
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace, stream);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/device/implicit_gemm_convolution_fusion.h/0 | {
"file_path": "include/cutlass/conv/device/implicit_gemm_convolution_fusion.h",
"repo_id": "include",
"token_count": 3362
} | 19 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM B (filter tile)
matrix from memory.
This iterator assumes TensorNHWC layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/threadblock/conv2d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
conv::StrideSupport StrideSupport_ = conv::StrideSupport::kUnity,
typename AccessType_ = cutlass::AlignedArray<Element_, ThreadMap_::kElementsPerAccess>
>
class Conv2dDgradFilterTileAccessIteratorOptimized;
/////////////////////////////////////////////////////////////////////////////////////////////////
// Conv2dDgradFilterTileAccessIteratorOptimized unity strided dgrad is more performant for dgrad
// on problem sizes with stride = {1x1}
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
typename AccessType_
>
class Conv2dDgradFilterTileAccessIteratorOptimized <
Shape_,
Element_,
ThreadMap_,
conv::StrideSupport::kStrided,
AccessType_
> {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNHWC;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
//
// Parameters structure
//
struct Params : Conv2dStridedDgradFilterIteratorOptimizedParams {
//
// Methods
//
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(Conv2dStridedDgradFilterIteratorOptimizedParams const &base):
Conv2dStridedDgradFilterIteratorOptimizedParams(base) { }
CUTLASS_HOST_DEVICE
Params(
Conv2dProblemSize const &problem_size,
Layout const &layout
):
Conv2dStridedDgradFilterIteratorOptimizedParams(
problem_size,
layout,
sizeof_bits<Element>::value,
{Shape::kRow, Shape::kColumn},
ThreadMap::kThreads,
ThreadMap::kElementsPerAccess,
{ThreadMap::Iterations::kContiguous, ThreadMap::Iterations::kStrided},
{ThreadMap::Delta::kContiguous, ThreadMap::Delta::kStrided}
) { }
};
private:
Conv2dStridedDgradFilterIteratorOptimizedParams const ¶ms_;
Conv2dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
LongIndex iteration_vector_;
char const *pointer_;
uint32_t predicates_[kAccessesPerVector];
int filter_k_;
int filter_r_;
int filter_s_;
int start_r_;
int start_s_;
int64_t reset_bytes_s_;
int64_t reset_bytes_r_;
//
// Assertions
//
// We map predicates into bits packed in this uint32_t container
static_assert(ThreadMap::Iterations::kStrided *
ThreadMap::Iterations::kContiguous < sizeof(predicates_) * 8,
"Currently, the number of loads per iteration is limited by the size of the predicates container.");
public:
CUTLASS_HOST_DEVICE
Conv2dDgradFilterTileAccessIteratorOptimized(
Conv2dStridedDgradFilterIteratorOptimizedParams const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
int start_r, int start_s,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
predicates_{0},
filter_r_(start_r),
filter_s_(start_s),
start_r_(start_r),
start_s_(start_s) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_k_ = threadblock_offset.row() + thread_coord.strided();
Index column = threadblock_offset.column() + thread_coord.contiguous();
reset_bytes_s_ = (problem_size_.num_gemm_k_filter_s(start_s_) - 1) * params_.inc_next[0];
reset_bytes_r_ = reset_bytes_s_ +
(problem_size_.num_gemm_k_filter_r(start_r_) - 1) * params_.inc_next[1];
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int filter_k = filter_k_ + s * ThreadMap::Delta::kStrided;
int filter_c = column + c * ThreadMap::Delta::kContiguous;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
uint32_t pred = ((filter_k < problem_size_.K && (filter_c + v * AccessType::kElements) < problem_size_.C) ? 1u : 0);
int pred_idx = c + s * ThreadMap::Iterations::kContiguous;
predicates_[v] |= (pred << pred_idx);
}
}
}
TensorCoord coord{filter_k_, filter_r_, filter_s_, column};
pointer_ += params_.layout(coord) * sizeof_bits<Element>::value / 8;
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_DEVICE
void advance() {
int next_idx = 0;
LongIndex reset_bytes = params_.reset_bytes;
// Move filter_s by stride_w
filter_s_ += problem_size_.stride_w;
if (filter_s_ >= problem_size_.S) {
// Restore filter_s
filter_s_ = start_s_;
// Move filter_r by stride_h
filter_r_ += problem_size_.stride_h;
#if 0
bool check = (filter_r_ < problem_size_.R);
filter_r_ = check ? filter_r_ : start_r_;
next_idx = check ? 1 : 2;
reset_bytes += (check ? reset_bytes_s_ : reset_bytes_r_);
#else
asm volatile(
"{\n\t"
" .reg .pred %%p;\n\t"
" .reg .s64 t1;\n\t"
" setp.lt.s32 %%p, %3, %4;\n\t"
" selp.s32 %0, %3, %5, %%p;\n\t"
" selp.s32 %1, 1, 2, %%p;\n\t"
" selp.s64 t1, %6, %7, %%p;\n\t"
" add.s64 %2, %8, t1;\n\t"
"}\n"
: "=r"(filter_r_), "=r"(next_idx), "=l"(reset_bytes)
: "r"(filter_r_), "r"(problem_size_.R), "r"(start_r_),
"l"(reset_bytes_s_), "l"(reset_bytes_r_), "l"(reset_bytes));
#endif
}
// offset pointers by offset_bytes
pointer_ += (params_.inc_next[next_idx] - reset_bytes);
if (next_idx == 2) {
filter_k_ += params_.filter_k_delta;
}
// Clear predicates if needed
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
if (filter_k_ + s * ThreadMap::Delta::kStrided >= problem_size_.K) {
uint32_t kClearMask = ((1u << ThreadMap::Iterations::kContiguous) - 1) << (s * ThreadMap::Iterations::kContiguous);
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
predicates_[v] = (predicates_[v] & (~kClearMask));
}
}
}
}
/// Returns true if the current coordinate is within the filter tensor W
CUTLASS_HOST_DEVICE
bool valid() {
LongIndex pred_idx = iteration_contiguous_ + iteration_strided_ * ThreadMap::Iterations::kContiguous;
return (predicates_[iteration_vector_] & (1u << pred_idx));
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
return reinterpret_cast<AccessType const *>(pointer_ +
iteration_contiguous_ * ThreadMap::Delta::kContiguous * sizeof_bits<Element>::value / 8) + iteration_vector_;
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv2dDgradFilterTileAccessIteratorOptimized &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
// Move to the next K coordinate within the tile
pointer_ += params_.inc_next_strided;
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.C % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Conv2dDgradFilterTileAccessIteratorOptimized unity strided dgrad is more performant for dgrad
// on problem sizes with stride = {1x1}
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
typename AccessType_
>
class Conv2dDgradFilterTileAccessIteratorOptimized <
Shape_,
Element_,
ThreadMap_,
conv::StrideSupport::kUnity,
AccessType_
> {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNHWC;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized;
static StrideSupport const kStrideSupport = conv::StrideSupport::kUnity;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
//
// Parameters structure
//
struct Params : Conv2dDgradFilterIteratorOptimizedParams {
//
// Methods
//
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(Conv2dDgradFilterIteratorOptimizedParams const &base):
Conv2dDgradFilterIteratorOptimizedParams(base) { }
CUTLASS_HOST_DEVICE
Params(
Conv2dProblemSize const &problem_size,
Layout const &layout
):
Conv2dDgradFilterIteratorOptimizedParams(
problem_size,
layout,
sizeof_bits<Element>::value,
{Shape::kRow, Shape::kColumn},
ThreadMap::kThreads,
ThreadMap::kElementsPerAccess,
{ThreadMap::Iterations::kContiguous, ThreadMap::Iterations::kStrided},
{ThreadMap::Delta::kContiguous, ThreadMap::Delta::kStrided}
) { }
};
private:
Conv2dDgradFilterIteratorOptimizedParams const ¶ms_;
Conv2dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
LongIndex iteration_vector_;
char const *pointer_;
uint32_t predicates_[kAccessesPerVector];
int filter_rs_;
int filter_k_;
//
// Assertions
//
// We map predicates into bits packed in this uint32_t container
static_assert(ThreadMap::Iterations::kStrided *
ThreadMap::Iterations::kContiguous < sizeof(predicates_) * 8,
"Currently, the number of loads per iteration is limited by the size of the predicates container.");
public:
CUTLASS_HOST_DEVICE
Conv2dDgradFilterTileAccessIteratorOptimized(
Conv2dDgradFilterIteratorOptimizedParams const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
predicates_{0},
filter_rs_(0),
filter_k_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_k_ = threadblock_offset.row() + thread_coord.strided();
Index column = threadblock_offset.column() + thread_coord.contiguous();
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int filter_k = filter_k_ + s * ThreadMap::Delta::kStrided;
int filter_c = column + c * ThreadMap::Delta::kContiguous;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
uint32_t pred = ((filter_k < problem_size_.K && (filter_c + v * AccessType::kElements) < problem_size_.C) ? 1u : 0);
int pred_idx = c + s * ThreadMap::Iterations::kContiguous;
predicates_[v] |= (pred << pred_idx);
}
}
}
pointer_ += (
filter_k_ * params.layout.stride()[2] + column
) * sizeof_bits<Element>::value / 8;
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
LongIndex next = params_.inc_next_rs;
// moves to the next tile
++filter_rs_;
if (filter_rs_ == params_.RS) {
filter_rs_ = 0;
next = params_.inc_next_k;
filter_k_ += params_.filter_k_delta;
}
// Clear predicates if needed
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
if (filter_k_ + s * ThreadMap::Delta::kStrided >= problem_size_.K) {
uint32_t kClearMask = ((1u << ThreadMap::Iterations::kContiguous) - 1) << (s * ThreadMap::Iterations::kContiguous);
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
predicates_[v] = (predicates_[v] & (~kClearMask));
}
}
}
pointer_ += next;
}
/// Returns true if the current coordinate is within the filter tensor W
CUTLASS_HOST_DEVICE
bool valid() {
LongIndex pred_idx = iteration_contiguous_ + iteration_strided_ * ThreadMap::Iterations::kContiguous;
return (predicates_[iteration_vector_] & (1u << pred_idx));
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
return reinterpret_cast<AccessType const *>(pointer_ +
iteration_contiguous_ * ThreadMap::Delta::kContiguous * sizeof_bits<Element>::value / 8) + iteration_vector_;
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv2dDgradFilterTileAccessIteratorOptimized &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
// Move to the next K coordinate within the tile
pointer_ += params_.inc_next_strided;
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.C % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/threadblock/conv2d_dgrad_filter_tile_access_iterator_optimized.h/0 | {
"file_path": "include/cutlass/conv/threadblock/conv2d_dgrad_filter_tile_access_iterator_optimized.h",
"repo_id": "include",
"token_count": 7198
} | 20 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief A Coord is a coordinate of arbitrary rank into a tensor or matrix
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cstdint>
#else
#include <stdint.h>
#endif
#include "cutlass/cutlass.h"
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Statically-sized array specifying Coords within a tensor
template <
int Rank_, ///< Logical rank of coordinate
typename Index_ = int, ///< Index type used for each dimension
typename LongIndex_ = int64_t ///< Long index type used for linear offsets
>
struct Coord {
public:
//
// Type and constant definitions
//
/// Number of elements in Coord
static int const kRank = Rank_;
/// Index type used to store elements
using Index = Index_;
/// Type used to represent linear offsets
using LongIndex = LongIndex_;
private:
//
// Data members
//
/// Indices
Index idx[kRank];
public:
//
// Methods
//
/// Default ctor initializes uniformly
CUTLASS_HOST_DEVICE
explicit Coord(Index value = Index(0)) {
for (int i = 0; i < kRank; ++i) {
idx[i] = value;
}
}
/// Constructs from an array of integers
CUTLASS_HOST_DEVICE
Coord(Index const (&_idx)[kRank]) {
for (int i = 0; i < kRank; ++i) {
idx[i] = _idx[i];
}
}
/// Constructs from some other Coord
template <int R, typename I, typename L>
CUTLASS_HOST_DEVICE
Coord(Coord<R, I, L> other) {
for (int i = 0; i < kRank; ++i) {
idx[i] = other[i];
}
}
/// Returns a slice of the Coord which may be larger or smaller in rank
/// than this.
template <int Slice>
CUTLASS_HOST_DEVICE
Coord<Slice, Index, LongIndex> slice(int start = 0, Index identity = 0) const {
Coord<Slice, Index, LongIndex> result;
for (int i = 0; i < Slice; ++i) {
if (i + start < kRank) {
result[i] = idx[i + start];
}
else {
result[i] = identity;
}
}
return result;
}
/// Returns the index of the dimension with least value
CUTLASS_HOST_DEVICE
int min_dim_index() const {
int i = 0;
for (int j = 1; j < kRank; ++j) {
if (idx[j] < idx[i]) {
i = j;
}
}
return i;
}
/// Returns the index of the dimension with greatest value
CUTLASS_HOST_DEVICE
int max_dim_index() const {
int i = 0;
for (int j = 1; j < kRank; ++j) {
if (idx[j] > idx[i]) {
i = j;
}
}
return i;
}
/// Returns true if Coord is non-zero.
CUTLASS_HOST_DEVICE
explicit operator bool() const {
for (int i = 0; i < kRank; ++i) {
if (idx[i]) {
return true;
}
}
return false;
}
/// Returns true if Coord is uniformly zero.
CUTLASS_HOST_DEVICE
bool operator!() const {
for (int i = 0; i < kRank; ++i) {
if (idx[i]) {
return false;
}
}
return true;
}
/// Element-wise addition
CUTLASS_HOST_DEVICE
Coord operator+(Coord const& b) const {
Coord c;
for (int i = 0; i < kRank; ++i) {
c.idx[i] = idx[i] + b.idx[i];
}
return c;
}
/// Element-wise subtraction
CUTLASS_HOST_DEVICE
Coord operator-(Coord const& b) const {
Coord c;
for (int i = 0; i < kRank; ++i) {
c.idx[i] = idx[i] - b.idx[i];
}
return c;
}
/// Element-wise multiplication
CUTLASS_HOST_DEVICE
Coord operator*(Coord const& b) const {
Coord c;
for (int i = 0; i < kRank; ++i) {
c.idx[i] = idx[i] * b.idx[i];
}
return c;
}
/// Element-wise division
CUTLASS_HOST_DEVICE
Coord operator/(Coord const& b) const {
Coord c;
for (int i = 0; i < kRank; ++i) {
c.idx[i] = idx[i] / b.idx[i];
}
return c;
}
/// In-place addition
CUTLASS_HOST_DEVICE
Coord& operator+=(Coord const& b) {
for (int i = 0; i < kRank; ++i) {
idx[i] += b.idx[i];
}
return *this;
}
/// In-place subtraction
CUTLASS_HOST_DEVICE
Coord& operator-=(Coord const& b) {
for (int i = 0; i < kRank; ++i) {
idx[i] -= b.idx[i];
}
return *this;
}
/// In-place multiplication
CUTLASS_HOST_DEVICE
Coord& operator*=(Coord const& b) {
for (int i = 0; i < kRank; ++i) {
idx[i] *= b.idx[i];
}
return *this;
}
/// In-place division
CUTLASS_HOST_DEVICE
Coord& operator/=(Coord const& b) {
for (int i = 0; i < kRank; ++i) {
idx[i] /= b.idx[i];
}
return *this;
}
/// Member access operator
CUTLASS_HOST_DEVICE Index& operator[](int dim) { return idx[dim]; }
/// Member access operator
CUTLASS_HOST_DEVICE Index const& operator[](int dim) const { return idx[dim]; }
/// Computes the dot product with anotherCoord object
CUTLASS_HOST_DEVICE
LongIndex dot(Coord const& b, LongIndex sum = LongIndex(0)) const {
for (int i = 0; i < kRank; ++i) {
sum += idx[i] * b.idx[i];
}
return sum;
}
/// Gets the index of a given Coord element
template <int Dim>
CUTLASS_HOST_DEVICE Index& at() {
return idx[Dim];
}
/// Access via index; may limit unrolling potential
CUTLASS_HOST_DEVICE
Index& at(int dim) { return idx[dim]; }
/// Gets the index of a given Coord element
template <int Dim>
CUTLASS_HOST_DEVICE Index const& at() const {
return idx[Dim];
}
/// Access via index; may limit unrolling potential
CUTLASS_HOST_DEVICE
Index const& at(int dim) const { return idx[dim]; }
/// Determines if two Coord<> objects are equal
CUTLASS_HOST_DEVICE
bool operator==(Coord const& b) const {
bool equal = true;
for (int i = 0; equal && i < kRank; ++i) {
equal = (idx[i] == b.idx[i]);
}
return equal;
}
/// Not equal
CUTLASS_HOST_DEVICE
bool operator!=(Coord const& b) const { return !(*this == b); }
/// Clamps a coordinate to a range specified by maximum and minimum values
CUTLASS_HOST_DEVICE
Coord& clamp(Coord const& max, Coord const& min = Coord()) {
for (int i = 0; i < kRank; ++i) {
idx[i] = __NV_STD_MAX(__NV_STD_MIN(idx[i], max.idx[i]), min.idx[i]);
}
return *this;
}
/// Returns the sum of all elements
CUTLASS_HOST_DEVICE
Index sum() const {
Index sum_(idx[0]);
for (int i = 1; i < kRank; ++i) {
sum_ += idx[i];
}
return sum_;
}
/// Returns the product of all elements
CUTLASS_HOST_DEVICE
LongIndex product() const {
LongIndex product_(idx[0]);
for (int i = 1; i < kRank; ++i) {
product_ *= idx[i];
}
return product_;
}
/// Less than operator
CUTLASS_HOST_DEVICE
bool operator<(Coord const &b) const {
for (int i = 0; i < kRank; ++i) {
if (!(idx[i] < b[i])) {
return false;
}
}
return true;
}
/// Less than or equals operator
CUTLASS_HOST_DEVICE
bool operator<=(Coord const &b) const {
for (int i = 0; i < kRank; ++i) {
if (!(idx[i] <= b[i])) {
return false;
}
}
return true;
}
/// Greater than operator
CUTLASS_HOST_DEVICE
bool operator>(Coord const &b) const {
return !(*this <= b);
}
/// Greater than or equals operator
CUTLASS_HOST_DEVICE
bool operator>=(Coord const &b) const {
return !(*this < b);
}
};
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
/// Scalar multiplication
template <int Rank, typename Index>
CUTLASS_HOST_DEVICE
Coord<Rank, Index> operator*(Index s, Coord<Rank, Index> coord) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Rank; ++i) {
coord[i] *= s;
}
return coord;
}
/// Scalar multiplication
template <int Rank, typename Index>
CUTLASS_HOST_DEVICE
Coord<Rank, Index> operator*(Coord<Rank, Index> coord, Index s) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Rank; ++i) {
coord[i] *= s;
}
return coord;
}
/// Scalar division
template <int Rank, typename Index>
CUTLASS_HOST_DEVICE
Coord<Rank, Index> operator/(Index s, Coord<Rank, Index> coord) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Rank; ++i) {
coord[i] = s / coord[i];
}
return coord;
}
/// Scalar division
template <int Rank, typename Index>
CUTLASS_HOST_DEVICE
Coord<Rank, Index> operator/(Coord<Rank, Index> coord, Index s) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Rank; ++i) {
coord[i] /= s;
}
return coord;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
//
// Integer-valued make_Coord
//
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Helper to make a 2-element coordinate
template <typename T>
CUTLASS_HOST_DEVICE
Coord<1, T> make_Coord(T _0) {
T values[1] = {_0};
return Coord<1, T>(values);
}
/// Helper to make a 2-element coordinate
template <typename T>
CUTLASS_HOST_DEVICE
Coord<2, T> make_Coord(T _0, T _1) {
T values[2] = {_0, _1};
return Coord<2, T>(values);
}
/// Helper to make a 3-element coordinate
template <typename T>
CUTLASS_HOST_DEVICE
Coord<3, T> make_Coord(T _0, T _1, T _2) {
T values[3] = {_0, _1, _2};
return Coord<3, T>(values);
}
/// Helper to make a 4-element coordinate
template <typename T>
CUTLASS_HOST_DEVICE
Coord<4, T> make_Coord(T _0, T _1, T _2, T _3) {
T values[4] = {_0, _1, _2, _3};
return Coord<4, T>(values);
}
/// Helper to make a 5-element coordinate
template <typename T>
CUTLASS_HOST_DEVICE
Coord<5, T> make_Coord(T _0, T _1, T _2, T _3, T _4) {
T values[5] = {_0, _1, _2, _3, _4};
return Coord<5, T>(values);
}
/// Helper to make a 1-element coordinate
template <int N, typename T>
CUTLASS_HOST_DEVICE
Coord<N, T>make_Coord_with_padding(T _0) {
Coord<N, T> coord;
CUTLASS_PRAGMA_UNROLL
for (int i = N - 1; i > 0; --i) {
coord[i] = 0;
}
coord[0] = _0;
return coord;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
| include/cutlass/coord.h/0 | {
"file_path": "include/cutlass/coord.h",
"repo_id": "include",
"token_count": 4608
} | 21 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Functor for performing tensor-tensor broadacasts atop existing epilogues.
Concretely, the opeartion performed is the following:
UnaryOp(
BinaryOp1(
BinaryOp0(
Activation((alpha * A @ B) + bias),
beta * C0
),
beta * C1
)
)
where:
- C0 and C1 have the same extents as the output
- BinaryOp0 and BinaryOp1 perform elementwise binary operations
- UnaryOp is an elementwise operation
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/epilogue/collective/detail.hpp"
#include "cute/tensor.hpp"
#include "cutlass/cuda_host_adapter.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace collective {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Collective epilogue that applies elementwise tensor-tensor operations atop other epilogues
///
template <
class StrideC_,
class StrideD_,
class ThreadEpilogueOp_,
class EpilogueSchedule_,
bool PerColumnBias_ = false
>
class EpilogueTensorBroadcast {
public:
//
// Type Aliases
//
using EpilogueSchedule = EpilogueSchedule_;
// derived types of output thread level operator
using ThreadEpilogueOp = ThreadEpilogueOp_;
using ElementOutput = typename ThreadEpilogueOp::ElementOutput;
using ElementAccumulator = typename ThreadEpilogueOp::ElementAccumulator;
using ElementCompute = typename ThreadEpilogueOp::ElementCompute;
using ElementScalar = ElementCompute;
using ElementBias = typename ThreadEpilogueOp::ElementBias;
using ElementC = typename ThreadEpilogueOp::ElementC;
using StrideC = StrideC_;
using ElementD = typename ThreadEpilogueOp::ElementD;
using StrideD = StrideD_;
using ActivationFunctor = typename ThreadEpilogueOp::ActivationFunctor;
static_assert(cute::rank(StrideC{}) == 3, "StrideCD must be rank-3: [M, N, L]");
static_assert(cute::rank(StrideD{}) == 3, "StrideCD must be rank-3: [M, N, L]");
static constexpr int kOutputAlignment = ThreadEpilogueOp::kCount;
using AlignmentType = typename cute::uint_bit<sizeof_bits<ElementOutput>::value * kOutputAlignment>::type;
static constexpr bool IsBinaryOp0Enabled = ThreadEpilogueOp::IsBinaryOp0Enabled;
static constexpr bool IsBinaryOp1Enabled = ThreadEpilogueOp::IsBinaryOp1Enabled;
static constexpr bool IsUnaryOpEnabled = ThreadEpilogueOp::IsUnaryOpEnabled;
static constexpr bool PerColumnBias = PerColumnBias_;
using BiasStride = typename cute::conditional_t<PerColumnBias, Stride<_0, _1, _0>, Stride<_1, _0, _0>>;
struct SharedStorage { };
// Host side epilogue arguments
struct Arguments {
typename ThreadEpilogueOp::Params thread{};
StrideC dC{};
ElementD* ptr_D = nullptr;
StrideD dD{};
ElementBias* ptr_Bias = nullptr;
ElementC* ptr_C0 = nullptr;
ElementC* ptr_C1 = nullptr;
};
// Device side epilogue params
using Params = Arguments;
//
// Methods
//
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(
[[maybe_unused]] ProblemShape const& _,
Arguments const& args,
[[maybe_unused]] void* workspace) {
return args;
}
template <class ProblemShape>
static size_t
get_workspace_size(ProblemShape const& problem_shape, Arguments const& args) {
return 0;
}
template <class ProblemShape>
static cutlass::Status
initialize_workspace(ProblemShape const& problem_shape, Arguments const& args, void* workspace, cudaStream_t stream,
CudaHostAdapter* cuda_adapter = nullptr) {
return cutlass::Status::kSuccess;
}
template <class ProblemShape>
CUTLASS_HOST_DEVICE static bool
can_implement(
[[maybe_unused]] ProblemShape const& problem_shape,
[[maybe_unused]] Arguments const& args) {
return true;
}
CUTLASS_HOST_DEVICE
EpilogueTensorBroadcast(Params const& params_)
: params(params_), epilogue_op(params_.thread) { }
CUTLASS_DEVICE
bool
is_source_needed() {
return epilogue_op.is_source0_needed() || epilogue_op.is_source1_needed();
}
template<
class ProblemShapeMNKL,
class BlockShapeMNK,
class BlockCoordMNKL,
class FrgEngine, class FrgLayout,
class TiledMma,
class ResidueMNK
>
CUTLASS_HOST_DEVICE void
operator()(
ProblemShapeMNKL problem_shape_mnkl,
BlockShapeMNK blk_shape_MNK,
BlockCoordMNKL blk_coord_mnkl,
cute::Tensor<FrgEngine, FrgLayout> const& accumulators,
TiledMma tiled_mma,
ResidueMNK residue_mnk,
int thread_idx,
[[maybe_unused]] char* smem_buf)
{
using namespace cute;
using X = Underscore;
static_assert(cute::rank(ProblemShapeMNKL{}) == 4, "ProblemShapeMNKL must be rank 4");
static_assert(is_static<BlockShapeMNK>::value, "ThreadBlock tile shape must be static");
static_assert(cute::rank(BlockShapeMNK{}) == 3, "BlockShapeMNK must be rank 3");
static_assert(cute::rank(BlockCoordMNKL{}) == 4, "BlockCoordMNKL must be rank 4");
// Separate out problem shape for convenience
auto M = get<0>(problem_shape_mnkl);
auto N = get<1>(problem_shape_mnkl);
auto L = get<3>(problem_shape_mnkl);
auto stride_c = detail::get_epilogue_stride<EpilogueSchedule>(params.dC);
auto stride_d = detail::get_epilogue_stride<EpilogueSchedule>(params.dD);
auto stride_bias = detail::get_epilogue_stride<EpilogueSchedule>(BiasStride{});
// Represent the full output tensor
Tensor mC0_mnl = make_tensor(make_gmem_ptr(params.ptr_C0), make_shape(M,N,L), stride_c); // (m,n,l)
Tensor mC1_mnl = make_tensor(make_gmem_ptr(params.ptr_C1), make_shape(M,N,L), stride_c); // (m,n,l)
Tensor mD_mnl = make_tensor(make_gmem_ptr(params.ptr_D), make_shape(M,N,L), stride_d); // (m,n,l)
Tensor mBias_mnl = make_tensor(make_gmem_ptr(params.ptr_Bias), make_shape(M,N,L), stride_bias); // (m,n,l)
Tensor gC0_mnl = local_tile(mC0_mnl, blk_shape_MNK, make_coord(_,_,_), Step<_1,_1, X>{}); // (BLK_M,BLK_N,m,n,l)
Tensor gC1_mnl = local_tile(mC1_mnl, blk_shape_MNK, make_coord(_,_,_), Step<_1,_1, X>{}); // (BLK_M,BLK_N,m,n,l)
Tensor gD_mnl = local_tile(mD_mnl, blk_shape_MNK, make_coord(_,_,_), Step<_1,_1, X>{}); // (BLK_M,BLK_N,m,n,l)
Tensor gBias_mnl = local_tile(mBias_mnl, blk_shape_MNK, make_coord(_,_,_), Step<_1,_1, X>{}); // (BLK_M,BLK_N,m,n,l)
// Slice to get the tile this thread block is responsible for
auto [m_coord, n_coord, k_coord, l_coord] = blk_coord_mnkl;
Tensor gC0 = gC0_mnl(_,_,m_coord,n_coord,l_coord); // (BLK_M,BLK_N)
Tensor gC1 = gC1_mnl(_,_,m_coord,n_coord,l_coord); // (BLK_M,BLK_N)
Tensor gD = gD_mnl(_,_,m_coord,n_coord,l_coord); // (BLK_M,BLK_N)
Tensor gBias = gBias_mnl(_,_,m_coord,n_coord,l_coord); // (BLK_M,BLK_N)
// Partition source and destination tiles to match the accumulator partitioning
auto thr_mma = tiled_mma.get_thread_slice(thread_idx);
Tensor tCgD = thr_mma.partition_C(gD); // (VEC,THR_M,THR_N)
Tensor tCgC0 = thr_mma.partition_C(gC0); // (VEC,THR_M,THR_N)
Tensor tCgC1 = thr_mma.partition_C(gC1); // (VEC,THR_M,THR_N)
Tensor tCgBias = thr_mma.partition_C(gBias); // (VEC,THR_M,THR_N)
static_assert(is_static<FrgLayout>::value,
"Accumulator layout must be static");
CUTE_STATIC_ASSERT_V(size(tCgC0) == size(tCgD),
"Source and destination must have the same number of elements.");
CUTE_STATIC_ASSERT_V(size(tCgC1) == size(tCgD),
"Source and destination must have the same number of elements.");
CUTE_STATIC_ASSERT_V(size(tCgD) == size(accumulators),
"Accumulator count must have the same destination element count.");
CUTE_STATIC_ASSERT_V(size(tCgBias) == size(accumulators),
"Accumulator count must have the same destination element count.");
auto cD = make_identity_tensor(make_shape(unwrap(shape<0>(gD)), unwrap(shape<1>(gD))));
Tensor tCcD = thr_mma.partition_C(cD);
bool bias_needed = params.ptr_Bias != nullptr;
bool c0_needed = (params.ptr_C0 != nullptr) && epilogue_op.is_source0_needed();
bool c1_needed = (params.ptr_C1 != nullptr) && epilogue_op.is_source1_needed();
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < size(accumulators); ++i) {
if (elem_less(tCcD(i), make_coord(get<0>(residue_mnk), get<1>(residue_mnk)))) {
ElementBias bias = bias_needed ? tCgBias(i) : ElementBias(0);
ElementC c0 = c0_needed ? tCgC0(i) : ElementC(0);
ElementC c1 = c1_needed ? tCgC1(i) : ElementC(0);
tCgD(i) = epilogue_op(accumulators(i), c0, c1, bias);
}
}
}
private:
Params params;
ThreadEpilogueOp epilogue_op;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace collective
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/collective/epilogue_tensor_broadcast.hpp/0 | {
"file_path": "include/cutlass/epilogue/collective/epilogue_tensor_broadcast.hpp",
"repo_id": "include",
"token_count": 4602
} | 22 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Functor performing linear combination operations used by epilogues.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/platform/platform.h"
#include "cutlass/epilogue/thread/activation.h"
#include "cutlass/epilogue/thread/scale_type.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
// If kIsHeavy is a member, use it. Otherwise, assume that it's false.
namespace { // (anonymous)
template<class Op, class Enable = void>
struct kIsHeavy_member_or_false {
static constexpr bool value = false;
};
template<class Op>
struct kIsHeavy_member_or_false<Op, typename cutlass::platform::enable_if<Op::kIsHeavy>::type> {
static constexpr bool value = Op::kIsHeavy;
};
} // namespace (anonymous)
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
struct EmptyArguments {};
template<class T, class = void>
struct ElementwiseOpDispatcher {
using Arguments = EmptyArguments;
T op;
CUTLASS_HOST_DEVICE
ElementwiseOpDispatcher(Arguments) {}
template <typename ValueType>
CUTLASS_HOST_DEVICE
ValueType operator()(ValueType value) {
return op(value);
}
};
template<class T>
struct ElementwiseOpDispatcher<T, std::void_t<typename T::Arguments>> {
using Arguments = typename T::Arguments;
Arguments args;
T op;
CUTLASS_HOST_DEVICE
ElementwiseOpDispatcher(Arguments args_):args(args_) {}
template <typename ValueType>
CUTLASS_HOST_DEVICE
ValueType operator()(ValueType value) {
return op(value, args);
}
};
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// This base class is meant to define the concept required of the
/// EpilogueWithBroadcast::OutputOp
template <
typename ElementC_,
typename ElementAccumulator_,
typename ElementCompute_,
typename ElementZ_,
typename ElementT_,
int ElementsPerAccess,
typename ElementwiseOp_ = Identity<ElementCompute_>,
typename BinaryOp_ = plus<ElementCompute_>,
bool StoreT_ = true,
typename ElementVector_ = ElementC_
>
class LinearCombinationBiasElementwise {
public:
using ElementOutput = ElementC_;
using ElementC = ElementC_;
using ElementAccumulator = ElementAccumulator_;
using ElementCompute = ElementCompute_;
using ElementZ = ElementZ_;
using ElementT = ElementT_;
using ElementVector = ElementVector_;
static int const kElementsPerAccess = ElementsPerAccess;
static int const kCount = kElementsPerAccess;
using ElementwiseOp = ElementwiseOp_;
using BinaryOp = BinaryOp_;
using ElementwiseOpDispatcher = detail::ElementwiseOpDispatcher<ElementwiseOp>;
using ElementwiseArguments = typename ElementwiseOpDispatcher::Arguments;
// Indicates that this epilogue applies only one binary operation
static bool const kIsSingleSource = true;
using FragmentAccumulator = Array<ElementAccumulator, kElementsPerAccess>;
using FragmentCompute = Array<ElementCompute, kElementsPerAccess>;
using FragmentC = Array<ElementC, kElementsPerAccess>;
using FragmentZ = Array<ElementZ, kElementsPerAccess>;
using FragmentT = Array<ElementT, kElementsPerAccess>;
// Definitions needed for collective epilogue
using FragmentSource = FragmentC;
using FragmentOutput = FragmentZ;
using ElementBias = ElementVector;
using FragmentBias = Array<ElementBias, kElementsPerAccess>;
using ActivationFunctor = ElementwiseOp;
static const ScaleType::Kind kScale = ScaleType::Default;
static bool const kIsHeavy = kIsHeavy_member_or_false<ElementwiseOp>::value;
/// If true, the 'Z' tensor is stored
static bool const kStoreZ = true;
/// If true, the 'T' tensor is stored
static bool const kStoreT = StoreT_;
/// Host-constructable parameters structure
struct Params {
ElementCompute alpha; ///< scales accumulators
ElementCompute beta; ///< scales source tensor
ElementCompute const *alpha_ptr; ///< pointer to accumulator scalar - if not null, loads it from memory
ElementCompute const *beta_ptr; ///< pointer to source scalar - if not null, loads it from memory
ElementwiseArguments elementwise; ///< Arguments for elementwise operation
//
// Methods
//
CUTLASS_HOST_DEVICE
Params():
alpha(ElementCompute(1)),
beta(ElementCompute(0)),
alpha_ptr(nullptr),
beta_ptr(nullptr) { }
CUTLASS_HOST_DEVICE
Params(
ElementCompute alpha,
ElementCompute beta,
ElementwiseArguments elementwise_ = ElementwiseArguments{}
): alpha(alpha), beta(beta), alpha_ptr(nullptr), beta_ptr(nullptr), elementwise(elementwise_) {
}
CUTLASS_HOST_DEVICE
Params(
ElementCompute alpha
): alpha(alpha), beta(0), alpha_ptr(nullptr), beta_ptr(nullptr) {
}
CUTLASS_HOST_DEVICE
Params(
ElementCompute const *alpha_ptr,
ElementCompute const *beta_ptr,
ElementwiseArguments elementwise_ = ElementwiseArguments{}
): alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr), elementwise(elementwise_) {
}
CUTLASS_HOST_DEVICE
Params(
ElementCompute const *alpha_ptr
): alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(nullptr) {
}
};
private:
//
// Data members
//
ElementCompute alpha_;
ElementCompute beta_;
ElementwiseArguments const &elementwise_;
bool skip_elementwise_;
public:
//
// Methods
//
/// Constructor from Params
CUTLASS_HOST_DEVICE
LinearCombinationBiasElementwise(Params const ¶ms): elementwise_(params.elementwise) {
alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha);
beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta);
skip_elementwise_ = false;
}
/// Returns true if source is needed
CUTLASS_HOST_DEVICE
bool is_source_needed() const {
return beta_ != ElementCompute(0);
}
/// Functionally required for serial reduction in the epilogue
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {
if (k_partition) {
beta_ = ElementCompute(1);
}
if (k_partition != k_partition_count - 1) {
skip_elementwise_ = true;
}
}
/// Applies the operation when elementwise_op require arguments and is_source_needed() is true
template <typename ElementwiseArgs>
CUTLASS_HOST_DEVICE
void operator()(
FragmentZ &frag_Z,
FragmentT &frag_T,
FragmentAccumulator const &AB,
FragmentC const &frag_C,
FragmentCompute const &V,
ElementwiseArgs const &elementwise_args) const {
ElementwiseOp elementwise_op;
BinaryOp binary_op;
FragmentCompute tmp_Accum = NumericArrayConverter<ElementCompute, ElementAccumulator, kElementsPerAccess>()(AB);
FragmentCompute tmp_C = NumericArrayConverter<ElementCompute, ElementC, kElementsPerAccess>()(frag_C);
FragmentCompute result_Z;
FragmentCompute result_T;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kElementsPerAccess; ++i) {
ElementCompute z = binary_op(alpha_ * tmp_Accum[i] + beta_ * tmp_C[i], V[i]);
result_T[i] = z;
result_Z[i] = skip_elementwise_ ? z : elementwise_op(z, elementwise_args);
}
NumericArrayConverter<ElementZ, ElementCompute, kElementsPerAccess> convert_z;
frag_Z = convert_z(result_Z);
if constexpr (kStoreT) {
NumericArrayConverter<ElementT, ElementCompute, kElementsPerAccess> convert_t;
frag_T = convert_t(result_T);
}
}
/// Applies the operation when elementwise_op require arguments and is_source_needed() is false
template <typename ElementwiseArgs>
CUTLASS_HOST_DEVICE
void operator()(
FragmentZ &frag_Z,
FragmentT &frag_T,
FragmentAccumulator const &AB,
FragmentCompute const &V,
ElementwiseArgs const &elementwise_args) const {
ElementwiseOp elementwise_op;
BinaryOp binary_op;
FragmentCompute tmp_Accum = NumericArrayConverter<ElementCompute, ElementAccumulator, kElementsPerAccess>()(AB);
FragmentCompute result_Z;
FragmentCompute result_T;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kElementsPerAccess; ++i) {
ElementCompute z = binary_op(alpha_ * tmp_Accum[i], V[i]);
result_T[i] = z;
result_Z[i] = skip_elementwise_ ? z : elementwise_op(z, elementwise_args);
}
NumericArrayConverter<ElementZ, ElementCompute, kElementsPerAccess> convert_z;
frag_Z = convert_z(result_Z);
if constexpr (kStoreT) {
NumericArrayConverter<ElementT, ElementCompute, kElementsPerAccess> convert_t;
frag_T = convert_t(result_T);
}
}
/// Applies the operation when is_source_needed() is true
CUTLASS_HOST_DEVICE
void operator()(
FragmentZ &frag_Z,
FragmentT &frag_T,
FragmentAccumulator const &AB,
FragmentC const &frag_C,
FragmentCompute const &V) const {
ElementwiseOpDispatcher elementwise_op(elementwise_);
BinaryOp binary_op;
FragmentCompute tmp_Accum = NumericArrayConverter<ElementCompute, ElementAccumulator, kElementsPerAccess>()(AB);
FragmentCompute tmp_C = NumericArrayConverter<ElementCompute, ElementC, kElementsPerAccess>()(frag_C);
FragmentCompute result_Z;
FragmentCompute result_T;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kElementsPerAccess; ++i) {
ElementCompute z = binary_op(alpha_ * tmp_Accum[i] + beta_ * tmp_C[i], V[i]);
result_T[i] = z;
result_Z[i] = skip_elementwise_ ? z : elementwise_op(z);
}
NumericArrayConverter<ElementZ, ElementCompute, kElementsPerAccess> convert_z;
frag_Z = convert_z(result_Z);
if constexpr (kStoreT) {
NumericArrayConverter<ElementT, ElementCompute, kElementsPerAccess> convert_t;
frag_T = convert_t(result_T);
}
}
/// Applies the operation when is_source_needed() is false
CUTLASS_HOST_DEVICE
void operator()(
FragmentZ &frag_Z,
FragmentT &frag_T,
FragmentAccumulator const &AB,
FragmentCompute const &V) const {
ElementwiseOpDispatcher elementwise_op(elementwise_);
BinaryOp binary_op;
FragmentCompute tmp_Accum = NumericArrayConverter<ElementCompute, ElementAccumulator, kElementsPerAccess>()(AB);
FragmentCompute result_Z;
FragmentCompute result_T;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kElementsPerAccess; ++i) {
ElementCompute z = binary_op(alpha_ * tmp_Accum[i], V[i]);
result_T[i] = z;
result_Z[i] = skip_elementwise_ ? z : elementwise_op(z);
}
NumericArrayConverter<ElementZ, ElementCompute, kElementsPerAccess> convert_z;
frag_Z = convert_z(result_Z);
if constexpr (kStoreT) {
NumericArrayConverter<ElementT, ElementCompute, kElementsPerAccess> convert_t;
frag_T = convert_t(result_T);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/thread/linear_combination_bias_elementwise.h/0 | {
"file_path": "include/cutlass/epilogue/thread/linear_combination_bias_elementwise.h",
"repo_id": "include",
"token_count": 4484
} | 23 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using WMMA.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/epilogue/thread/linear_combination_clamp.h"
#include "cutlass/epilogue/thread/linear_combination_relu.h"
#include "cutlass/epilogue/thread/linear_combination_gelu.h"
#include "cutlass/epilogue/thread/linear_combination_sigmoid.h"
#include "cutlass/epilogue/thread/linear_combination_planar_complex.h"
#include "cutlass/epilogue/thread/conversion_op.h"
#include "cutlass/epilogue/thread/reduction_op.h"
#include "cutlass/transform/threadblock/regular_tile_iterator_pitch_linear.h"
#include "cutlass/epilogue/warp/fragment_iterator_wmma_tensor_op.h"
#include "cutlass/epilogue/warp/tile_iterator_wmma_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_thread_map_wmma_tensor_op.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
#include "cutlass/epilogue/threadblock/shared_load_iterator.h"
#include "cutlass/epilogue/threadblock/epilogue.h"
#include "cutlass/layout/permute.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues for WMMA TensorOps.
template <
typename Shape_,
typename WarpMmaTensorOp_,
int PartitionsK,
typename OutputOp_,
int ElementsPerAccess,
bool ScatterD = false,
typename PermuteDLayout = layout::NoPermute
>
struct DefaultEpilogueWmmaTensorOp {
using Shape = Shape_;
using WarpMmaTensorOp = WarpMmaTensorOp_;
static int const kPartitionsK = PartitionsK;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = ElementsPerAccess;
using ElementOutput = typename OutputOp::ElementOutput;
using LayoutC = typename WarpMmaTensorOp::LayoutC;
using ElementAccumulator = typename WarpMmaTensorOp::ElementC;
//
// Thread map
//
using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapWmmaTensorOp<
Shape,
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
kPartitionsK,
ElementOutput,
kElementsPerAccess
>::Type;
using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator<
OutputTileThreadMap,
ElementOutput,
ScatterD,
PermuteDLayout
>;
using AccumulatorFragmentIterator = cutlass::epilogue::warp::FragmentIteratorWmmaTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
LayoutC
>;
using WarpTileIterator = cutlass::epilogue::warp::TileIteratorWmmaTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
LayoutC
>;
using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator<
typename OutputTileThreadMap::CompactedThreadMap,
ElementAccumulator
>;
/// Hard-coded padding elements added
using Padding = typename WarpTileIterator::Padding;
//
// Define the epilogue
//
using Epilogue = cutlass::epilogue::threadblock::Epilogue<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputTileIterator,
AccumulatorFragmentIterator,
WarpTileIterator,
SharedLoadIterator,
OutputOp,
Padding
>;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/default_epilogue_wmma_tensor_op.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/default_epilogue_wmma_tensor_op.h",
"repo_id": "include",
"token_count": 1811
} | 24 |
/***************************************************************************************************
* Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Threadblock-level epilogue computing:
Aux = ((alpha * scale_a * scale_b) * accumulator) + ((beta * scale_c) * source) + bias
D = activation(Aux)
if Aux is fp8 type:
abs_max_output = max( abs(aux) | (for every aux in Aux))
Aux = scale_aux * Aux
endif
if D is fp8 type:
abs_max_output = max( abs(d) | (for every d in D))
D = scale_d * D
endif
Parameter Aux is optionally stored to global memory
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#include <cuda/std/utility>
#else
#include <assert.h>
#include <utility>
#endif
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/functional.h"
#include "cutlass/fast_math.h"
#include "cutlass/layout/vector.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/transform/threadblock/regular_tile_iterator.h"
#include "cutlass/epilogue/threadblock/epilogue_base.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
#include "cutlass/numeric_types.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
/// Helper class for keeping track of absolute maximums and performing scaling
template <
typename Iterator, // Iterator type used for storing the data for which absolute maximum and scaling
// will be computed. This type is used for predicating absolute maximum calculations.
typename Fragment, // Type of input to be computed on
bool ScalingAndAmaxNeeded // Whether to perform absolute maximum and scaling operations
>
struct ScalingAndAmaxHelper;
/// Partial specialization that does not perform scaling or calculate an absolute maximum
template <typename Iterator, typename Fragment>
struct ScalingAndAmaxHelper<Iterator, Fragment, false> {
using Element = typename Fragment::Element;
CUTLASS_HOST_DEVICE
ScalingAndAmaxHelper(Element scale) { }
CUTLASS_DEVICE
Fragment operator()(const Iterator& iterator, const Fragment& inp) {
return inp;
}
CUTLASS_HOST_DEVICE
Element get_abs_max() const {
return Element(0.);
}
CUTLASS_HOST_DEVICE
void set_scaling_factor(Element scale_) { }
};
/// Partial specialization that keeps track of an absolute maximum value of inputs seen
/// and scales inputs
template <typename Iterator, typename Fragment>
struct ScalingAndAmaxHelper<Iterator, Fragment, true> {
using Element = typename Fragment::Element;
using AccessType = typename Iterator::AccessType;
using ThreadMap = typename Iterator::ThreadMap;
Element abs_max;
Element scale;
// Operators
maximum_with_nan_propogation<Element> max_op;
absolute_value_op<Element> abs_op;
multiplies<Fragment> multiply;
CUTLASS_HOST_DEVICE
ScalingAndAmaxHelper(Element scale_) : abs_max(0.), scale(scale_) { }
// Compute the absolute maximum value between `abs_max` and the entries
// of `frag` for predicated-on entries of `iterator`. Return a scaled
// version of `inp`.
CUTLASS_DEVICE
Fragment operator()(const Iterator& iterator, const Fragment& frag) {
using PredicateGroup = Array<Element, Iterator::ThreadMap::kElementsPerAccess>;
PredicateGroup const *frag_ptr = reinterpret_cast<PredicateGroup const *>(&frag);
typename Iterator::Mask mask;
iterator.get_mask(mask);
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
int row_offset = row * ThreadMap::Delta::kRow
+ group * ThreadMap::Delta::kGroup
+ cluster * ThreadMap::Delta::kCluster;
bool row_guard = ((row_offset + iterator.thread_start_row()) < iterator.extent_row());
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
bool guard = row_guard && mask.predicates[column];
if (guard) {
int access_idx = frag_row_idx * ThreadMap::Iterations::kColumn + column;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < PredicateGroup::kElements; ++i) {
abs_max = max_op(abs_max, abs_op(frag_ptr[access_idx][i]));
}
}
}
}
}
}
// Perform scaling
return multiply(scale, frag);
}
CUTLASS_HOST_DEVICE
Element get_abs_max() const {
return abs_max;
}
CUTLASS_HOST_DEVICE
void set_scaling_factor(Element scale_) {
scale = scale_;
}
};
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_, ///< Shape of threadblock tile (concept: GemmShape)
typename WarpMmaOperator_, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp)
int PartitionsK, ///< Number of partitions of the K dimension
typename OutputTileIterator_, ///< Tile iterator reading and writing output tensors
typename AuxOutputTileIterator_, ///< Tile iterator writing auxiliary output tensors
typename ElementVector_, ///< Data type of bias vector
typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators
typename WarpTileIterator_, ///< Warp-scoped tile iterator writing accumulators to SMEM
typename SharedLoadIterator_, ///< Threadblock-scoped tile iterator loading from SMEM
typename OutputOp_, ///< Output operator
typename Padding_, ///< Padding added to SMEM allocation to avoid bank conflicts (concept: MatrixShape)
int FragmentsPerPartition = 1, ///< Used to coarsen the epilogue granularity
int IterationsUnroll = ///< Used to reduce binary size when epilogue op is large
(!IsEpilogueFunctorHeavy<OutputOp_>::value)
>
class EpilogueWithAbsMax :
public EpilogueBase<
Shape_,
typename WarpMmaOperator_::Shape,
PartitionsK,
AccumulatorFragmentIterator_,
WarpTileIterator_,
Padding_,
FragmentsPerPartition> {
public:
using Base = EpilogueBase<
Shape_,
typename WarpMmaOperator_::Shape,
PartitionsK,
AccumulatorFragmentIterator_,
WarpTileIterator_,
Padding_,
FragmentsPerPartition>;
static bool const kIsSingleSource = true;
using Shape = Shape_;
using WarpMmaOperator = WarpMmaOperator_;
static int const kPartitionsK = PartitionsK;
using OutputTileIterator = OutputTileIterator_;
using AuxOutputTileIterator = AuxOutputTileIterator_;
using ElementVector = ElementVector_;
using AccumulatorFragmentIterator = AccumulatorFragmentIterator_;
using WarpTileIterator = WarpTileIterator_;
using SharedLoadIterator = SharedLoadIterator_;
using OutputOp = OutputOp_;
using Padding = Padding_;
using Layout = layout::RowMajor;
using LongIndex = typename Layout::LongIndex;
/// The complete warp-level accumulator tile
using AccumulatorTile = typename Base::AccumulatorTile;
/// Accumulator element
using ElementAccumulator = typename WarpTileIterator::Element;
/// Data type used for absolute maximum value
using ElementAbsmax = typename OutputOp::ElementAbsmax;
/// Compute data type produced by the output op
using ElementCompute = typename OutputOp::ElementCompute;
/// Compute fragment
using FragmentCompute = Array<ElementCompute, OutputTileIterator::Fragment::kElements>;
/// Helpers for (optionally) computing absolute maximums and scaling output and auxiliary output
using OutputScaler = detail::ScalingAndAmaxHelper<OutputTileIterator,
FragmentCompute,
OutputOp::kIsScalingAndAmaxOutputNeeded>;
using AuxOutputScaler = detail::ScalingAndAmaxHelper<AuxOutputTileIterator,
FragmentCompute,
OutputOp::kIsScalingAndAmaxAuxOutputNeeded>;
/// Thread map used by output tile iterators
using ThreadMap = typename OutputTileIterator::ThreadMap;
/// Fragment object used to store the broadcast values
using BroadcastFragment = Array<
ElementCompute,
ThreadMap::Iterations::kColumn * ThreadMap::kElementsPerAccess>;
/// Output element
using ElementOutput = typename OutputTileIterator::Element;
/// Data type of auxiliary output
using ElementAuxOutput = typename AuxOutputTileIterator::Element;
/// Output access size
static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess;
/// Tensor reference to destination tensor
using TensorRef = typename OutputTileIterator::TensorRef;
/// Tensor reference to sync tensor
using SyncTensorRef = typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>;
/// Const tensor reference to source tensor
using ConstTensorRef = typename OutputTileIterator::ConstTensorRef;
/// Array type used to output
using OutputAccessType = Array<
typename OutputTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
/// Array type used by output functor
using AccumulatorAccessType = Array<typename WarpTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
/// Array type used by output functor
using ComputeAccessType = Array<ElementCompute, OutputTileIterator::kElementsPerAccess>;
/// Auxiliary output access type
using AuxAccessType = Array<ElementAuxOutput, OutputTileIterator::kElementsPerAccess>;
/// Number of warps
using WarpCount = typename Base::WarpCount;
/// Shared memory allocation from epilogue base class
using BaseSharedStorage = typename Base::SharedStorage;
static int constexpr kSmemTiles = Base::kFragmentsPerIteration > 1 ? Base::kFragmentsPerIteration : kPartitionsK;
static int constexpr kSmemPointerOffset = Base::SharedStorage::StorageShape::kCount / kSmemTiles;
/// Used for the broadcast
struct BroadcastDetail {
/// Number of threads per warp
static int const kWarpSize = 32;
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
/// Number of distinct scalar column indices handled by each thread
static int const kColumnsPerThread = ThreadMap::Iterations::kColumn * ThreadMap::kElementsPerAccess;
/// Number of distinct scalar row indices handled by each thread
static int const kRowsPerThread = ThreadMap::Iterations::kCount / ThreadMap::Iterations::kColumn;
/// Number of threads per threadblock
static int const kThreadCount = kWarpSize * WarpCount::kCount;
/// Number of distinct threads per row of output tile
static int const kThreadsPerRow = (Shape::kN / kColumnsPerThread);
/// Number of distinct threads which must be reduced during the final reduction phase within the threadblock.
static int const kThreadRows = kThreadCount / kThreadsPerRow;
/// I'm not sure what I meant here.
static int const kThreadAccessesPerRow = const_max(1, (Shape::kN + kThreadCount - 1) / kThreadCount);
/// Shape of the shared memory allocation for the epilogue
using StorageShape = MatrixShape<
kThreadRows,
Shape::kN
>;
/// Debug printing
CUTLASS_DEVICE
static void print() {
#if 0
printf("BroadcastDetail {\n");
printf(
" kColumnsPerThread: %d\nkRowsPerThread: %d\n,kThreadCount: %d\nkThreadsPerRow: %d\n"
"kThreadRows: %d\nThreadAccessesPerRow: %d\nStorageShape: %d x %d (count: %d)\n",
kColumnsPerThread,
kRowsPerThread,
kThreadCount,
kThreadsPerRow,
kThreadRows,
kThreadAccessesPerRow,
StorageShape::kRow,
StorageShape::kColumn,
StorageShape::kCount
);
printf("};\n");
#endif
}
};
/// Shared storage structure (shadows base) with additional SMEM buffer for reduction
struct SharedStorage {
union {
BaseSharedStorage base;
};
CUTLASS_HOST_DEVICE
SharedStorage() { }
};
public:
static_assert(SharedLoadIterator::Fragment::kElements == OutputTileIterator::Fragment::kElements,
"Mismatch between shared load iterator and output tile iterator.");
static_assert(OutputTileIterator::kElementsPerAccess, "OutputTileIterator::kElementsPerAccess must not be zero.");
static_assert(!(OutputTileIterator::Fragment::kElements % OutputTileIterator::kElementsPerAccess),
"Divisibility");
private:
/// Loads fragment from shared memory aligned with output tensor
SharedLoadIterator shared_load_iterator_;
/// Thread index within the threadblock
int thread_idx_;
public:
/// Constructor
CUTLASS_DEVICE
EpilogueWithAbsMax(
SharedStorage &shared_storage, ///< Shared storage object
int thread_idx, ///< ID of a thread within the threadblock
int warp_idx, ///< ID of warp within threadblock
int lane_idx ///< Id of thread within warp
):
Base(shared_storage.base, thread_idx, warp_idx, lane_idx),
shared_load_iterator_(shared_storage.base.reference(), thread_idx),
thread_idx_(thread_idx)
{
}
/// Streams the result to global memory
CUTLASS_DEVICE
void operator()(
OutputOp &output_op, ///< Output operator
ElementVector const * broadcast_ptr, ///< Broadcast vector
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
OutputTileIterator source_iterator, ///< Tile iterator for source accumulator matrix
AuxOutputTileIterator aux_iterator, ///< Tile iterator for destination auxiliary output
MatrixCoord const &problem_size = ///< Problem size needed to guard against out-of-bounds accesses
MatrixCoord(Shape::kM, Shape::kN),
MatrixCoord const &threadblock_offset = ///< Threadblock's initial offset within the problem size space
MatrixCoord()) {
BroadcastFragment broadcast_fragment;
load_broadcast_fragment_(broadcast_fragment, broadcast_ptr, problem_size, threadblock_offset);
OutputScaler output_scaler(output_op.get_scale_d());
AuxOutputScaler aux_scaler(output_op.get_scale_aux());
if (!output_op.is_source_needed()) {
compute_source_not_needed_(
output_op,
broadcast_fragment,
destination_iterator,
accumulators,
aux_iterator,
output_scaler,
aux_scaler);
}
else {
compute_source_needed_(
output_op,
broadcast_fragment,
destination_iterator,
accumulators,
source_iterator,
aux_iterator,
output_scaler,
aux_scaler);
}
// Store the absolute maximum values of the output and auxiliar tensors, if needed.
if (output_op.get_ptr_output_abs_max() != nullptr) {
ElementAbsmax local_abs_max =
NumericConverter<ElementAbsmax, ElementCompute, OutputOp::kRound>{}(output_scaler.get_abs_max());
atomic_maximum<ElementAbsmax>{}(
output_op.get_ptr_output_abs_max(), local_abs_max);
}
if (output_op.get_ptr_aux_output_abs_max() != nullptr) {
ElementAbsmax local_abs_max =
NumericConverter<ElementAbsmax, ElementCompute, OutputOp::kRound>{}(aux_scaler.get_abs_max());
atomic_maximum<ElementAbsmax>{}(
output_op.get_ptr_aux_output_abs_max(), local_abs_max);
}
}
private:
CUTLASS_DEVICE
void load_broadcast_fragment_(
BroadcastFragment & broadcast_fragment, ///< Fragment containing the accumulated partial reduction over columns
ElementVector const * broadcast_ptr, ///< Broadcast vector
MatrixCoord const &problem_size, ///< Problem size needed to guard against out-of-bounds accesses
MatrixCoord const &threadblock_offset ///< Threadblock's initial offset within the problem size space
) {
broadcast_fragment.clear();
// If no pointer is supplied, set with all zeros and avoid memory accesses
if (!broadcast_ptr) {
return;
}
int thread_initial_column = ThreadMap::initial_offset(thread_idx_).column();
int thread_column_idx = threadblock_offset.column() + thread_initial_column;
broadcast_ptr += thread_initial_column;
NumericArrayConverter<ElementCompute, ElementVector, BroadcastDetail::kElementsPerAccess> converter;
using AccessType = AlignedArray<ElementVector, BroadcastDetail::kElementsPerAccess>;
using ComputeFragmentType = Array<ElementCompute, BroadcastDetail::kElementsPerAccess>;
ComputeFragmentType *frag_ptr = reinterpret_cast<ComputeFragmentType *>(&broadcast_fragment);
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < ThreadMap::Iterations::kColumn; ++j) {
AccessType loaded;
loaded.clear();
if (thread_column_idx < problem_size.column()) {
loaded = *reinterpret_cast<AccessType const *>(broadcast_ptr);
}
ComputeFragmentType cvt = converter(loaded);
frag_ptr[j] = cvt;
thread_column_idx += ThreadMap::Delta::kColumn;
broadcast_ptr += ThreadMap::Delta::kColumn;
}
}
template <class Seq>
struct acc2smem_source_not_needed;
template <size_t... Seq>
struct acc2smem_source_not_needed<cutlass::index_sequence<Seq...>> {
template <int Advance>
CUTLASS_DEVICE static void helper(AccumulatorFragmentIterator accum_fragment_iterator,
WarpTileIterator &warp_tile_iterator) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Advance; i++) {
++accum_fragment_iterator;
}
CUTLASS_PRAGMA_UNROLL
for (int p = 0; p < Base::kFragmentsPerIteration; ++p) {
typename AccumulatorFragmentIterator::Fragment accum_fragment;
accum_fragment_iterator.load(accum_fragment);
++accum_fragment_iterator;
warp_tile_iterator.store(accum_fragment);
if (p < Base::kFragmentsPerIteration - 1) {
warp_tile_iterator.add_pointer_offset(kSmemPointerOffset);
}
}
if (Base::kFragmentsPerIteration > 1) {
warp_tile_iterator.add_pointer_offset(kSmemPointerOffset *
(1 - Base::kFragmentsPerIteration));
}
}
CUTLASS_DEVICE
static void push(size_t pos,
AccumulatorFragmentIterator const &iterator_begin,
WarpTileIterator &warp_tile_iterator) {
int dummy[] = {
(pos == (Seq * Base::kFragmentsPerIteration)) &&
(helper<Seq * Base::kFragmentsPerIteration>(iterator_begin, warp_tile_iterator), 0)...};
CUTLASS_UNUSED(dummy[0]);
}
};
/// Streams the result to global memory
CUTLASS_DEVICE
void compute_source_not_needed_(
OutputOp &output_op, ///< Output operator
BroadcastFragment const &broadcast_fragment, ///< Fragment containing the accumulated partial reduction over columns
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
AuxOutputTileIterator aux_iterator, ///< Tile iterator for destination auxiliary output
OutputScaler& output_scaler, ///< Helper for (optionally) computing the absolute maximum and scaling output
AuxOutputScaler& aux_scaler ///< Helper for (optionally) computing the absolute maximum and scaling the auxiliary output
) {
//
// Iterator over warp-level accumulator fragment
//
AccumulatorFragmentIterator accum_fragment_iterator(accumulators);
//
// Iterate over accumulator tile
//
// CUTLASS_PRAGMA_UNROLL
#pragma unroll(IterationsUnroll ? OutputTileIterator::kIterations / Base::kFragmentsPerIteration : 1)
for (int iter = 0; iter < OutputTileIterator::kIterations; iter += Base::kFragmentsPerIteration) {
//
// Convert and store fragment
//
__syncthreads();
acc2smem_source_not_needed<
cutlass::make_index_sequence<OutputTileIterator::kIterations /
Base::kFragmentsPerIteration>>::push(iter,
accum_fragment_iterator,
this->warp_tile_iterator_);
__syncthreads();
//
// Load fragments from shared memory
//
CUTLASS_PRAGMA_UNROLL
for (int p = 0; p < Base::kFragmentsPerIteration; ++p) {
typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK];
shared_load_iterator_.load(aligned_accum_fragment[0]);
if (p < Base::kFragmentsPerIteration - 1) {
shared_load_iterator_.add_pointer_offset(kSmemPointerOffset);
}
else if (kPartitionsK > 1) {
plus <typename SharedLoadIterator::Fragment> add_fragments;
CUTLASS_PRAGMA_UNROLL
for ( int i = 1; i < kPartitionsK; ++i) {
shared_load_iterator_.add_pointer_offset(kSmemPointerOffset);
shared_load_iterator_.load(aligned_accum_fragment[i]);
aligned_accum_fragment[0] = add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]);
}
shared_load_iterator_.add_pointer_offset((1 - kPartitionsK) * kSmemPointerOffset);
}
//
// Apply output operation
//
FragmentCompute frag_Z_compute;
FragmentCompute frag_Aux_compute;
apply_output_operator_source_not_needed_(
frag_Z_compute,
frag_Aux_compute,
output_op,
aligned_accum_fragment[0],
broadcast_fragment);
//
// Conditionally store fragments
//
// (Optionally) compute the absolute maximum of frag_Z and scale frag_Z
frag_Z_compute = output_scaler(destination_iterator, frag_Z_compute);
NumericArrayConverter<typename OutputTileIterator::Fragment::Element, ElementCompute,
OutputTileIterator::Fragment::kElements> cvt_to_dst;
typename OutputTileIterator::Fragment frag_Z = cvt_to_dst(frag_Z_compute);
// Always store the output
destination_iterator.store(frag_Z);
++destination_iterator;
// Only store the auxiliary output if scaling and absolute-maximum calculation were needed
if (OutputOp::kIsScalingAndAmaxAuxOutputNeeded) {
frag_Aux_compute = aux_scaler(aux_iterator, frag_Aux_compute);
NumericArrayConverter<typename AuxOutputTileIterator::Fragment::Element, ElementCompute,
AuxOutputTileIterator::Fragment::kElements> cvt_to_aux;
typename AuxOutputTileIterator::Fragment frag_Aux = cvt_to_aux(frag_Aux_compute);
aux_iterator.store(frag_Aux);
++aux_iterator;
}
}
if (Base::kFragmentsPerIteration > 1) {
shared_load_iterator_.add_pointer_offset(kSmemPointerOffset * (1 - Base::kFragmentsPerIteration));
}
}
}
template<class Seq>
struct acc2smem_source_needed;
template <size_t... Seq>
struct acc2smem_source_needed<cutlass::index_sequence<Seq...>> {
template<int Advance>
CUTLASS_DEVICE
static void helper(AccumulatorFragmentIterator accum_fragment_iterator,
WarpTileIterator &warp_tile_iterator) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Advance; i++) {
++accum_fragment_iterator;
}
typename AccumulatorFragmentIterator::Fragment accum_fragment;
accum_fragment_iterator.load(accum_fragment);
warp_tile_iterator.store(accum_fragment);
}
CUTLASS_DEVICE
static void push(size_t pos,
AccumulatorFragmentIterator const &iterator_begin,
WarpTileIterator &warp_tile_iterator) {
int dummy[] = {(pos == Seq) && (helper<Seq>(iterator_begin, warp_tile_iterator), 0)...};
}
};
/// Streams the result to global memory
CUTLASS_DEVICE
void compute_source_needed_(
OutputOp &output_op, ///< Output operator
BroadcastFragment const &broadcast_fragment, ///< Fragment containing the accumulated partial reduction over columns
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
OutputTileIterator source_iterator, ///< Tile iterator for source accumulator matrix
AuxOutputTileIterator aux_iterator, ///< Tile iterator for destination auxiliary output
OutputScaler& output_scaler, ///< Helper for (optionally) computing the absolute maximum and scaling output
AuxOutputScaler& aux_scaler ///< Helper for (optionally) computing the absolute maximum and scaling the auxiliary output
) {
typename OutputTileIterator::Fragment source_fragment;
source_fragment.clear();
//
// Iterator over warp-level accumulator fragment
//
AccumulatorFragmentIterator accum_fragment_iterator(accumulators);
//
// Iterate over accumulator tile
//
#pragma unroll(IterationsUnroll ? OutputTileIterator::kIterations : 1)
for (int iter = 0; iter < OutputTileIterator::kIterations; ++iter) {
//
// Load the source
//
source_iterator.load(source_fragment);
++source_iterator;
//
// Convert and store fragment
//
__syncthreads();
acc2smem_source_needed<cutlass::make_index_sequence<OutputTileIterator::kIterations>>::push(
iter, accum_fragment_iterator, this->warp_tile_iterator_);
__syncthreads();
//
// Load fragments from shared memory
//
typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK];
shared_load_iterator_.load(aligned_accum_fragment[0]);
// If the number of k-slices is > 1 - perform a reduction amongst the k-slices
if (kPartitionsK > 1)
{
plus <typename SharedLoadIterator::Fragment> add_fragments;
const int tile_row_offset = Base::SharedStorage::StorageShape::kRow / PartitionsK;
CUTLASS_PRAGMA_UNROLL
for ( int i = 1; i < kPartitionsK; ++i) {
shared_load_iterator_.add_tile_offset({tile_row_offset , 0});
shared_load_iterator_.load(aligned_accum_fragment[i]);
aligned_accum_fragment[0] = add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]);
}
shared_load_iterator_.add_tile_offset({-1 * (kPartitionsK-1) * tile_row_offset, 0});
}
//
// Apply output operation
//
FragmentCompute frag_Z_compute;
FragmentCompute frag_Aux_compute;
apply_output_operator_(
frag_Z_compute,
frag_Aux_compute,
output_op,
aligned_accum_fragment[0],
source_fragment,
broadcast_fragment);
//
// Conditionally store fragments
//
// (Optionally) compute the absolute maximum of frag_Z and scale frag_Z
frag_Z_compute = output_scaler(destination_iterator, frag_Z_compute);
NumericArrayConverter<typename OutputTileIterator::Fragment::Element, ElementCompute,
OutputTileIterator::Fragment::kElements> cvt_to_dst;
typename OutputTileIterator::Fragment frag_Z = cvt_to_dst(frag_Z_compute);
// Always store the output
destination_iterator.store(frag_Z);
++destination_iterator;
// Only store the auxiliary output if scaling and absolute-maximum calculation were needed
if (OutputOp::kIsScalingAndAmaxAuxOutputNeeded) {
frag_Aux_compute = aux_scaler(aux_iterator, frag_Aux_compute);
NumericArrayConverter<typename AuxOutputTileIterator::Fragment::Element, ElementCompute,
AuxOutputTileIterator::Fragment::kElements> cvt_to_aux;
typename AuxOutputTileIterator::Fragment frag_Aux = cvt_to_aux(frag_Aux_compute);
aux_iterator.store(frag_Aux);
++aux_iterator;
}
}
}
/// Helper to invoke the output functor over each vector of output
CUTLASS_DEVICE
void apply_output_operator_(
FragmentCompute &frag_Z,
FragmentCompute &frag_Aux,
OutputOp &output_op,
typename SharedLoadIterator::Fragment const &frag_AB,
typename OutputTileIterator::Fragment const &frag_C,
BroadcastFragment const &frag_Broadcast) {
using AccessTypeZ = Array<ElementCompute, kElementsPerAccess>;
using AccessTypeAux = Array<ElementCompute, kElementsPerAccess>;
using AccessTypeBroadcast = Array<ElementCompute, kElementsPerAccess>;
AccessTypeZ *frag_Z_ptr = reinterpret_cast<AccessTypeZ *>(&frag_Z);
AccessTypeAux *frag_Aux_ptr = reinterpret_cast<AccessTypeAux *>(&frag_Aux);
AccumulatorAccessType const *frag_AB_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&frag_AB);
OutputAccessType const *frag_C_ptr =
reinterpret_cast<OutputAccessType const *>(&frag_C);
AccessTypeBroadcast const *frag_Broadcast_ptr =
reinterpret_cast<AccessTypeBroadcast const *>(&frag_Broadcast);
int const kOutputOpIterations =
OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kOutputOpIterations; ++i) {
output_op(
frag_Z_ptr[i],
frag_Aux_ptr[i],
frag_AB_ptr[i],
frag_Broadcast_ptr[i % ThreadMap::Iterations::kColumn],
frag_C_ptr[i]);
}
}
/// Helper to invoke the output functor over each vector of output
CUTLASS_DEVICE
void apply_output_operator_source_not_needed_(
FragmentCompute &frag_Z,
FragmentCompute &frag_Aux,
OutputOp &output_op,
typename SharedLoadIterator::Fragment const &frag_AB,
BroadcastFragment const &frag_Broadcast) {
using AccessTypeZ = Array<ElementCompute, kElementsPerAccess>;
using AccessTypeAux = Array<ElementCompute, kElementsPerAccess>;
using AccessTypeBroadcast = Array<ElementCompute, kElementsPerAccess>;
AccessTypeZ *frag_Z_ptr = reinterpret_cast<AccessTypeZ *>(&frag_Z);
AccessTypeAux *frag_Aux_ptr = reinterpret_cast<AccessTypeAux *>(&frag_Aux);
AccumulatorAccessType const *frag_AB_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&frag_AB);
AccessTypeBroadcast const *frag_Broadcast_ptr =
reinterpret_cast<AccessTypeBroadcast const *>(&frag_Broadcast);
int const kOutputOpIterations =
OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kOutputOpIterations; ++i) {
output_op(
frag_Z_ptr[i],
frag_Aux_ptr[i],
frag_AB_ptr[i],
frag_Broadcast_ptr[i % ThreadMap::Iterations::kColumn]);
}
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/epilogue_with_absmax.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/epilogue_with_absmax.h",
"repo_id": "include",
"token_count": 12927
} | 25 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/fast_math.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
int Rank
>
struct PredicatedTileIteratorAffineLayoutRankNParams {
using Layout = layout::AffineRankN<Rank>;
using TensorCoord = typename Layout::TensorCoord;
static bool const kBigEndian = false;
//
// Data members
//
Layout layout;
/// Stride in units of bytes along M modes
Coord<Layout::kRank/2, typename Layout::LongIndex> stride_m;
/// Stride in units of bytes along N modes
Coord<Layout::kRank/2, typename Layout::LongIndex> stride_n;
/// Fast divmod objects divided by tensor extents
FastDivmod divmod_m[(Layout::kRank == 2) ? 1 : (Layout::kRank/2 - 1)];
/// Fast divmod objects divided by tensor extents
FastDivmod divmod_n[(Layout::kRank == 2) ? 1 : (Layout::kRank/2 - 1)];
int64_t rank2_inc_col;
int64_t rank2_inc_row;
//
// Methods
//
CUTLASS_HOST_DEVICE
PredicatedTileIteratorAffineLayoutRankNParams() { }
CUTLASS_HOST_DEVICE
PredicatedTileIteratorAffineLayoutRankNParams(TensorCoord const &extent,
Layout const &layout_,
int64_t element_sizeof_bits)
: layout(layout_)
{
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Layout::kRank / 2; ++i) {
stride_m[i] = OffsetBytes(layout_.stride()[i], element_sizeof_bits);
stride_n[i] = OffsetBytes(layout_.stride()[i + Layout::kRank / 2], element_sizeof_bits);
}
if (kBigEndian) {
// "Big Endian" scheme
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Layout::kRank / 2 - 1; ++i) {
divmod_m[i] = FastDivmod(extent[i + 1]);
divmod_n[i] = FastDivmod(extent[i + Layout::kRank / 2 + 1]);
}
}
else {
// "Little Endian" scheme
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Layout::kRank / 2 - 1; ++i) {
divmod_m[i] = FastDivmod(extent[i]);
divmod_n[i] = FastDivmod(extent[i + Layout::kRank / 2]);
}
}
#if 0
//
// Debug print statements to verify extents and strides are passed correctly.
//
printf("PredicatedTileIteratorAffine::Params() entered\n");
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Layout::kRank; ++i) {
printf(" extent[%d]: %d\n", i, extent[i]);
}
for (int i = 0; i < Layout::kRank; ++i) {
printf(" stride[%d]: %ld\n", i, layout_.stride()[i]);
}
printf("PredicatedTileIteratorAffine::Params() returning\n");
#endif
}
CUTLASS_HOST_DEVICE
PredicatedTileIteratorAffineLayoutRankNParams(Layout const &layout_,
int32_t threadmap_delta_kColumn,
int32_t threadmap_delta_kRow,
int64_t element_sizeof_bits)
: layout(layout_)
{
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Layout::kRank / 2; ++i) {
stride_m[i] = OffsetBytes(layout_.stride()[i], element_sizeof_bits);
stride_n[i] = OffsetBytes(layout_.stride()[i + Layout::kRank / 2], element_sizeof_bits);
}
rank2_inc_col = threadmap_delta_kColumn * stride_n[0];
rank2_inc_row = threadmap_delta_kRow * stride_m[0];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/predicated_tile_iterator_affine_layout_params.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/predicated_tile_iterator_affine_layout_params.h",
"repo_id": "include",
"token_count": 2047
} | 26 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines basic structures needed for implementing the warp-scoped phase of the epilogue.
These quantities assume a 'column-major' arrangement of SimtOp instructions, of which
a row-oriented slice is visible per iteration.
*/
#pragma once
#include "cutlass/matrix_shape.h"
#include "cutlass/layout/matrix.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename WarpShape, ///< shape of warp-level GEMM (concept: GemmShape)
typename Operator, ///< matrix multiply operation (concept: arch::Mma)
typename Layout, ///< destination layout in shared memory
typename MmaSimtPolicy ///< policy defining lane arrangement (concept: MmaSimtPolicy)
>
struct SimtPolicy;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for row-major
template <
typename WarpShape_, ///< shape of warp-level GEMM (concept: MatrixShape)
typename Operator_, ///< matrix multiply operation (concept: arch::Mma)
typename MmaSimtPolicy_ ///< policy defining lane arrangement (concept: MmaSimtPolicy)
>
struct SimtPolicy<WarpShape_, Operator_, layout::RowMajor, MmaSimtPolicy_> {
using WarpShape = WarpShape_;
using Operator = Operator_;
using MmaSimtPolicy = MmaSimtPolicy_;
static_assert(!(WarpShape::kM % MmaSimtPolicy::WarpShape::kRow), "Divisibility");
static_assert(!(WarpShape::kN % MmaSimtPolicy::WarpShape::kColumn), "Divisibility");
/// Number of iterations
static int const kIterations = WarpShape::kM / MmaSimtPolicy::WarpShape::kRow;
/// Number of accumulators written per iteration
static int const kElementsPerIteration =
(WarpShape::kN / MmaSimtPolicy::WarpShape::kColumn);
/// Total number of accumulators
static int const kAccumulatorElementCount = kElementsPerIteration * kIterations;
/// Number of consecutive elements
static int const kElementsPerAccess = MmaSimtPolicy::LaneMmaShape::kN;
/// Number of rows per epilogue iteration
static int const kRowsPerIteration = MmaSimtPolicy::WarpShape::kRow;
/// Number of accesses made in one iteration
static int const kAccessesPerIteration = kElementsPerIteration / kElementsPerAccess;
/// Number of elements in between accumulator chunks of (LaneMmaShape::kM x LaneMmaShape::kN)
using Delta = MatrixShape<
MmaSimtPolicy::WarpShape::kRow * MmaSimtPolicy::LaneMmaShape::kM,
MmaSimtPolicy::WarpShape::kColumn * MmaSimtPolicy::LaneMmaShape::kN
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/warp/simt_policy.h/0 | {
"file_path": "include/cutlass/epilogue/warp/simt_policy.h",
"repo_id": "include",
"token_count": 1339
} | 27 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level Rank 2k definitions combine threadblock-scoped matrix multiply-add with
the appropriate threadblock-scoped epilogue.
Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are
accommodated by exchanging A and B operands and assuming transposed layouts.
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/complex.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/kernel/rank_2k_universal.h"
#include "cutlass/gemm/kernel/default_rank_2k.h"
#include "cutlass/gemm/kernel/default_rank_2k_complex.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC_,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by SYRK
typename Operator,
/// Blas3 computation mode (symmetric/hermitian)
BlasMode BlasMode_ = BlasMode::kSymmetric,
///
typename Enable = void
>
struct DefaultRank2KUniversal;
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Real-valued Rank 2k update kernels
//
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by Rank2k
typename Operator>
struct DefaultRank2KUniversal<
ElementA,
LayoutA,
ComplexTransform::kNone, // transform A
kAlignmentA,
ElementB,
LayoutB,
ComplexTransform::kNone, // transform B
kAlignmentB,
ElementC,
LayoutC,
FillModeC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
SplitKSerial,
Operator,
BlasMode::kSymmetric,
typename platform::enable_if< ! cutlass::is_complex<ElementAccumulator>::value>::type
> {
using DefaultRank2Kkernel = typename kernel::DefaultRank2K<
ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementC,
LayoutC,
FillModeC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
SplitKSerial,
Operator,
BlasMode::kSymmetric
>::Rank2Kkernel;
/// Define the kernel in terms of the default kernel
using Rank2Kkernel = kernel::Rank2KUniversal<
typename DefaultRank2Kkernel::Mma1,
typename DefaultRank2Kkernel::Mma2,
typename DefaultRank2Kkernel::Epilogue,
ThreadblockSwizzle,
FillModeC,
BlasMode::kSymmetric
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Complex-valued Rank 2K update kernels
//
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by SYRK
typename Operator,
// BlasMode
BlasMode kBlasMode
>
struct DefaultRank2KUniversal<
ElementA,
LayoutA,
TransformA,
kAlignmentA,
ElementB,
LayoutB,
TransformB,
kAlignmentB,
ElementC,
LayoutC,
FillModeC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
SplitKSerial,
Operator,
kBlasMode,
typename platform::enable_if<cutlass::is_complex<ElementAccumulator>::value>::type
> {
using DefaultRank2Kkernel = typename kernel::DefaultRank2KComplex<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
FillModeC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
TransformA,
TransformB,
Operator,
SplitKSerial,
kBlasMode
>::Rank2Kkernel;
/// Define the kernel in terms of the default kernel
using Rank2Kkernel = kernel::Rank2KUniversal<
typename DefaultRank2Kkernel::Mma1,
typename DefaultRank2Kkernel::Mma2,
typename DefaultRank2Kkernel::Epilogue,
ThreadblockSwizzle,
FillModeC,
kBlasMode
>;
};
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/kernel/default_rank_2k_universal.h/0 | {
"file_path": "include/cutlass/gemm/kernel/default_rank_2k_universal.h",
"repo_id": "include",
"token_count": 3345
} | 28 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/semaphore.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/gemm/kernel/params_universal_base.h"
#include "cutlass/trace.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename EpilogueGemmKReduction_, ///! Epilogue
typename ThreadblockSwizzle_ ///! Threadblock swizzling function
>
struct GemmWithKReduction {
public:
using Mma = Mma_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using EpilogueGemmKReduction = EpilogueGemmKReduction_;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using ElementA = typename Mma::IteratorA::Element;
using LayoutA = typename Mma::IteratorA::Layout;
using ElementB = typename Mma::IteratorB::Element;
using LayoutB = typename Mma::IteratorB::Layout;
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename Epilogue::OutputTileIterator::Layout;
using LayoutGemmKReduction = cutlass::layout::PitchLinear;
static ComplexTransform const kTransformA = Mma::kTransformA;
static ComplexTransform const kTransformB = Mma::kTransformB;
using Operator = typename Mma::Operator;
using OperatorClass = typename Mma::Operator::OperatorClass;
using ThreadblockShape = typename Mma::Shape;
using WarpShape = typename Mma::Operator::Shape;
using InstructionShape = typename Mma::Policy::Operator::InstructionShape;
using ArchTag = typename Mma::ArchTag;
static int const kStages = Mma::kStages;
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
/// Split-K preserves splits that are 128b aligned
static int const kSplitKAlignment = const_max(128 / sizeof_bits<ElementA>::value, 128 / sizeof_bits<ElementB>::value);
static int const kReduceKForA = Mma::kReduceKForA;
//
// Structures
//
/// Argument structure
struct Arguments : UniversalArgumentsBase
{
//
// Data members
//
typename EpilogueOutputOp::Params epilogue;
void const * ptr_A;
void const * ptr_B;
void const * ptr_C;
void * ptr_D;
void * ptr_gemm_k_reduction;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_C;
int64_t batch_stride_gemm_k_reduction;
typename LayoutA::Stride::Index lda;
typename LayoutB::Stride::Index ldb;
typename LayoutC::Stride::Index ldc;
typename LayoutC::Stride::Index ldd;
typename LayoutGemmKReduction::Stride::Index ld_gemm_k_reduction;
//
// Methods
//
Arguments() :
ptr_A(nullptr),
ptr_B(nullptr),
ptr_C(nullptr),
ptr_D(nullptr),
ptr_gemm_k_reduction(nullptr)
{}
/// constructs an arguments structure
Arguments(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_count,
typename EpilogueOutputOp::Params epilogue,
void const * ptr_A,
void const * ptr_B,
void const * ptr_C,
void * ptr_D,
void * ptr_gemm_k_reduction,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C,
int64_t batch_stride_D,
int64_t batch_stride_gemm_k_reduction,
typename LayoutA::Stride::Index lda,
typename LayoutB::Stride::Index ldb,
typename LayoutC::Stride::Index ldc,
typename LayoutC::Stride::Index ldd,
typename LayoutGemmKReduction::Stride::Index ld_gemm_k_reduction)
:
UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D),
epilogue(epilogue),
ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D), ptr_gemm_k_reduction(ptr_gemm_k_reduction),
batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_C(batch_stride_C), batch_stride_gemm_k_reduction(batch_stride_gemm_k_reduction),
lda(lda), ldb(ldb), ldc(ldc), ldd(ldd), ld_gemm_k_reduction(ld_gemm_k_reduction)
{
CUTLASS_TRACE_HOST("GemmUniversal::Arguments::Arguments() - problem_size: " << problem_size);
}
/// Returns arguments for the transposed problem
Arguments transposed_problem() const {
Arguments args(*this);
std::swap(args.problem_size.m(), args.problem_size.n());
std::swap(args.ptr_A, args.ptr_B);
std::swap(args.lda, args.ldb);
std::swap(args.batch_stride_A, args.batch_stride_B);
return args;
}
};
//
// Structure for precomputing values in host memory and passing to kernels
//
/// Parameters structure
struct Params : UniversalParamsBase<
ThreadblockSwizzle,
ThreadblockShape,
ElementA,
ElementB,
ElementC,
LayoutA,
LayoutB>
{
using ParamsBase = UniversalParamsBase<
ThreadblockSwizzle,
ThreadblockShape,
ElementA,
ElementB,
ElementC,
LayoutA,
LayoutB>;
//
// Data members
//
typename Mma::IteratorA::Params params_A;
typename Mma::IteratorB::Params params_B;
typename Epilogue::OutputTileIterator::Params params_C;
typename Epilogue::OutputTileIterator::Params params_D;
typename EpilogueOutputOp::Params output_op;
void * ptr_A;
void * ptr_B;
void * ptr_C;
void * ptr_D;
void * ptr_gemm_k_reduction;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_C;
int64_t batch_stride_gemm_k_reduction;
//
// Host dispatch API
//
/// Default constructor
Params() = default;
/// Constructor
Params(
Arguments const &args, /// GEMM application arguments
int device_sms, /// Number of SMs on the device
int sm_occupancy) /// Kernel SM occupancy (in thread blocks)
:
ParamsBase(args, device_sms, sm_occupancy),
params_A(args.lda),
params_B(args.ldb),
params_C(args.ldc),
params_D(args.ldd),
output_op(args.epilogue),
ptr_A(const_cast<void *>(args.ptr_A)),
ptr_B(const_cast<void *>(args.ptr_B)),
ptr_C(const_cast<void *>(args.ptr_C)),
batch_stride_A(args.batch_stride_A),
batch_stride_B(args.batch_stride_B),
batch_stride_C(args.batch_stride_C),
batch_stride_gemm_k_reduction(args.batch_stride_gemm_k_reduction),
ptr_D(args.ptr_D),
ptr_gemm_k_reduction(args.ptr_gemm_k_reduction)
{}
/// Assign and initialize the specified workspace buffer. Assumes
/// the memory allocated to workspace is at least as large as get_workspace_size().
Status init_workspace(
void *workspace,
cudaStream_t stream = nullptr)
{
CUTLASS_TRACE_HOST("GemmUniversal::Params::Params() - problem_size: " << this->problem_size);
if (this->mode == GemmUniversalMode::kGemmSplitKParallel) {
ptr_D = workspace;
ptr_gemm_k_reduction = static_cast<uint8_t *>(workspace)
+ sizeof(ElementC) * size_t(this->batch_stride_D) * size_t(this->grid_tiled_shape.k());
return Status::kSuccess;
}
return ParamsBase::init_workspace(workspace, stream);
}
/// Returns the workspace size (in bytes) needed for this problem geometry
size_t get_workspace_size() const
{
size_t workspace_bytes = ParamsBase::get_workspace_size();
if (this->mode == GemmUniversalMode::kGemmSplitKParallel)
{
// Split-K parallel always requires a temporary workspace
workspace_bytes +=
sizeof(ElementC) *
size_t(batch_stride_gemm_k_reduction) *
size_t(this->grid_tiled_shape.k());
}
return workspace_bytes;
}
/// Lightweight update given a subset of arguments.
void update(Arguments const &args)
{
ptr_A = const_cast<void *>(args.ptr_A);
ptr_B = const_cast<void *>(args.ptr_B);
ptr_C = const_cast<void *>(args.ptr_C);
ptr_D = args.ptr_D;
ptr_gemm_k_reduction = args.ptr_gemm_k_reduction;
batch_stride_A = args.batch_stride_A;
batch_stride_B = args.batch_stride_B;
batch_stride_C = args.batch_stride_C;
batch_stride_gemm_k_reduction = args.batch_stride_gemm_k_reduction;
this->batch_stride_D = args.batch_stride_D;
output_op = args.epilogue;
CUTLASS_TRACE_HOST("GemmUniversal::Params::update()");
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
public:
//
// Host dispatch API
//
/// Determines whether kernel satisfies alignment
static Status can_implement(
cutlass::gemm::GemmCoord const & problem_size) {
CUTLASS_TRACE_HOST("GemmUniversal::can_implement()");
static int const kAlignmentA = (platform::is_same<typename Mma::IteratorA::Layout,
layout::ColumnMajorInterleaved<32>>::value)
? 32
: (platform::is_same<typename Mma::IteratorA::Layout,
layout::ColumnMajorInterleaved<64>>::value)
? 64
: Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = (platform::is_same<typename Mma::IteratorB::Layout,
layout::RowMajorInterleaved<32>>::value)
? 32
: (platform::is_same<typename Mma::IteratorB::Layout,
layout::RowMajorInterleaved<64>>::value)
? 64
: Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = (platform::is_same<LayoutC,
layout::ColumnMajorInterleaved<32>>::value)
? 32
: (platform::is_same<LayoutC,
layout::ColumnMajorInterleaved<64>>::value)
? 64
: Epilogue::OutputTileIterator::kElementsPerAccess;
bool isAMisaligned = false;
bool isBMisaligned = false;
bool isCMisaligned = false;
if (platform::is_same<LayoutA, layout::RowMajor>::value) {
isAMisaligned = problem_size.k() % kAlignmentA;
} else if (platform::is_same<LayoutA, layout::ColumnMajor>::value) {
isAMisaligned = problem_size.m() % kAlignmentA;
} else if (platform::is_same<LayoutA, layout::ColumnMajorInterleaved<32>>::value
|| platform::is_same<LayoutA, layout::ColumnMajorInterleaved<64>>::value) {
isAMisaligned = problem_size.k() % kAlignmentA;
}
if (platform::is_same<LayoutB, layout::RowMajor>::value) {
isBMisaligned = problem_size.n() % kAlignmentB;
} else if (platform::is_same<LayoutB, layout::ColumnMajor>::value) {
isBMisaligned = problem_size.k() % kAlignmentB;
} else if (platform::is_same<LayoutB, layout::RowMajorInterleaved<32>>::value
|| platform::is_same<LayoutB, layout::RowMajorInterleaved<64>>::value) {
isBMisaligned = problem_size.k() % kAlignmentB;
}
if (platform::is_same<LayoutC, layout::RowMajor>::value) {
isCMisaligned = problem_size.n() % kAlignmentC;
} else if (platform::is_same<LayoutC, layout::ColumnMajor>::value) {
isCMisaligned = problem_size.m() % kAlignmentC;
} else if (platform::is_same<LayoutC, layout::ColumnMajorInterleaved<32>>::value
|| platform::is_same<LayoutC, layout::ColumnMajorInterleaved<64>>::value) {
isCMisaligned = problem_size.n() % kAlignmentC;
}
if (isAMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for operand A");
return Status::kErrorMisalignedOperand;
}
if (isBMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for operand B");
return Status::kErrorMisalignedOperand;
}
if (isCMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for operand C");
return Status::kErrorMisalignedOperand;
}
CUTLASS_TRACE_HOST(" returning kSuccess");
return Status::kSuccess;
}
static Status can_implement(Arguments const &args) {
return can_implement(args.problem_size);
}
public:
//
// Device-only API
//
// Factory invocation
CUTLASS_DEVICE
static void invoke(
Params const ¶ms,
SharedStorage &shared_storage)
{
GemmWithKReduction op;
op(params, shared_storage);
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
int offset_k = 0;
int problem_size_k = params.problem_size.k();
ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A);
ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B);
//
// Fetch pointers based on mode.
//
if (params.mode == GemmUniversalMode::kGemm ||
params.mode == GemmUniversalMode::kGemmSplitKParallel) {
if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) {
problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size;
}
offset_k = threadblock_tile_offset.k() * params.gemm_k_size;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_A += threadblock_tile_offset.k() * params.batch_stride_A;
ptr_B += threadblock_tile_offset.k() * params.batch_stride_B;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_A = static_cast<ElementA * const *>(params.ptr_A)[threadblock_tile_offset.k()];
ptr_B = static_cast<ElementB * const *>(params.ptr_B)[threadblock_tile_offset.k()];
}
__syncthreads();
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
threadblock_tile_offset.m() * Mma::Shape::kM,
offset_k,
};
cutlass::MatrixCoord tb_offset_B{
offset_k,
threadblock_tile_offset.n() * Mma::Shape::kN
};
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(
params.params_A,
ptr_A,
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_A);
typename Mma::IteratorB iterator_B(
params.params_B,
ptr_B,
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_B);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = canonical_warp_idx_sync();
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
typename Mma::FragmentReduction gemm_k_accumulators;
gemm_k_accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
mma(
gemm_k_iterations,
accumulators,
iterator_A,
iterator_B,
accumulators,
gemm_k_accumulators);
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.n() * Mma::Shape::kN
);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C);
ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D);
ElementC *ptr_gemm_k_reduction = static_cast<ElementC *>(params.ptr_gemm_k_reduction);
//
// Fetch pointers based on mode.
//
// Construct the semaphore.
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
if (params.mode == GemmUniversalMode::kGemm) {
// If performing a reduction via split-K, fetch the initial synchronization
if (params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
}
}
else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) {
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
ptr_gemm_k_reduction += threadblock_tile_offset.k() * params.batch_stride_gemm_k_reduction;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_C += threadblock_tile_offset.k() * params.batch_stride_C;
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_C = static_cast<ElementC * const *>(params.ptr_C)[threadblock_tile_offset.k()];
ptr_D = static_cast<ElementC * const *>(params.ptr_D)[threadblock_tile_offset.k()];
}
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
ptr_C,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
ptr_D,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Wait on the semaphore - this latency may have been covered by iterator construction
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_offset.k()) {
iterator_C = iterator_D;
}
semaphore.wait(threadblock_tile_offset.k());
}
// Execute the epilogue operator to update the destination tensor.
epilogue(
output_op,
iterator_D,
accumulators,
iterator_C);
if ((kReduceKForA && threadblock_tile_offset.n() == 0)
|| (!kReduceKForA && threadblock_tile_offset.m() == 0)) {
int warp_idx_mn = warp_idx % (Mma::Base::WarpCount::kM * Mma::Base::WarpCount::kN);
int warp_idx_m = warp_idx_mn % Mma::Base::WarpCount::kM;
int warp_idx_n = warp_idx_mn / Mma::Base::WarpCount::kM;
if ((kReduceKForA && warp_idx_n == 0)
|| (!kReduceKForA && warp_idx_m == 0)) {
int reduction_warp_idx = kReduceKForA ? warp_idx_m : warp_idx_n;
int reduction_threadblock_offset = kReduceKForA ? threadblock_tile_offset.m() :
threadblock_tile_offset.n();
int reduction_vector_size = kReduceKForA ? params.problem_size.m()
: params.problem_size.n();
EpilogueGemmKReduction epilogue_gemm_k_reduction(thread_idx,
reduction_warp_idx,
lane_idx,
reduction_threadblock_offset,
ptr_gemm_k_reduction);
epilogue_gemm_k_reduction(
reduction_vector_size,
gemm_k_accumulators,
params.mode == GemmUniversalMode::kGemm
&& (params.grid_tiled_shape.k() > 1)
&& (threadblock_tile_offset.k() > 0));
}
}
//
// Release the semaphore
//
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_offset.k() + 1;
}
semaphore.release(lock);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/kernel/gemm_with_k_reduction.h/0 | {
"file_path": "include/cutlass/gemm/kernel/gemm_with_k_reduction.h",
"repo_id": "include",
"token_count": 10014
} | 29 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates exposing architecture support for multiply-add operations
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/arch/mma.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/thread/mma.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Gemplate that handles all packed matrix layouts
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename ElementA_,
/// Layout of A matrix (concept: layout::MapFunc)
typename LayoutA_,
/// Data type of B elements
typename ElementB_,
/// Layout of B matrix (concept: layout::MapFunc)
typename LayoutB_,
/// Element type of C matrix
typename ElementC_,
/// Layout of C matrix (concept: layout::MapFunc)
typename LayoutC_,
/// Operator used to compute GEMM
typename Operator_
>
struct MmaGeneric {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// Data type of operand A
using ElementA = ElementA_;
/// Layout of A matrix (concept: layout::MapFunc)
using LayoutA = LayoutA_;
/// Data type of operand B
using ElementB = ElementB_;
/// Layout of B matrix (concept: layout::MapFunc)
using LayoutB = LayoutB_;
/// Element type of operand C
using ElementC = ElementC_;
/// Layout of C matrix (concept: layout::MapFunc)
using LayoutC = LayoutC_;
/// Underlying mathematical operator
using Operator = Operator_;
/// A operand storage
using FragmentA = Array<ElementA, Shape::kMK>;
/// B operand storage
using FragmentB = Array<ElementB, Shape::kKN>;
/// C operand storage
using FragmentC = Array<ElementC, Shape::kMN>;
/// Instruction
using MmaOp = arch::Mma<
gemm::GemmShape<1,1,1>,
1,
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
Operator>;
static bool const kMultipleOf2 = ((Shape::kM % 2 == 0) && (Shape::kN % 2 == 0));
static bool const kAllFp32 = platform::is_same<ElementA, float>::value &&
platform::is_same<ElementB, float>::value &&
platform::is_same<ElementC, float>::value;
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
TensorRef<ElementA const, LayoutA> a_ref(
reinterpret_cast<ElementA const *>(&A), LayoutA::packed({Shape::kM, Shape::kK}));
TensorRef<ElementB const, LayoutB> b_ref(
reinterpret_cast<ElementB const *>(&B), LayoutB::packed({Shape::kK, Shape::kN}));
TensorRef<ElementC, LayoutC> d_ref(
reinterpret_cast<ElementC *>(&D), LayoutC::packed(make_Coord(Shape::kM, Shape::kN)));
MmaOp mma_op;
// Copy accumulators
D = C;
// Compute matrix product
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < Shape::kK; ++k) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 860)
if (kMultipleOf2 && kAllFp32) {
//2x2 zigzag - m and n loops to increment by 2. Inner loop to process 4 multiply-adds in a 2x2 tile.
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Shape::kN; n+=2) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < Shape::kM; m+=2) {
int m_serpentine = (n % 4) ? (Shape::kM - 2 - m) : m;
//top-left element in 2x2 tile
{
MatrixCoord mn(m_serpentine, n);
MatrixCoord mk(m_serpentine, k);
MatrixCoord kn(k, n);
Array<ElementC, 1> d;
Array<ElementA, 1> a;
Array<ElementB, 1> b;
d[0] = d_ref.at(mn);
a[0] = a_ref.at(mk);
b[0] = b_ref.at(kn);
mma_op(d, a, b, d);
d_ref.at(mn) = d[0];
}
//bottom-left element in 2x2 tile
{
MatrixCoord mn(m_serpentine+1, n);
MatrixCoord mk(m_serpentine+1, k);
MatrixCoord kn(k, n);
Array<ElementC, 1> d;
Array<ElementA, 1> a;
Array<ElementB, 1> b;
d[0] = d_ref.at(mn);
a[0] = a_ref.at(mk);
b[0] = b_ref.at(kn);
mma_op(d, a, b, d);
d_ref.at(mn) = d[0];
}
//bottom-right element in 2x2 tile
{
MatrixCoord mn(m_serpentine+1, n+1);
MatrixCoord mk(m_serpentine+1, k);
MatrixCoord kn(k, n+1);
Array<ElementC, 1> d;
Array<ElementA, 1> a;
Array<ElementB, 1> b;
d[0] = d_ref.at(mn);
a[0] = a_ref.at(mk);
b[0] = b_ref.at(kn);
mma_op(d, a, b, d);
d_ref.at(mn) = d[0];
}
//top-right element in 2x2 tile
{
MatrixCoord mn(m_serpentine, n+1);
MatrixCoord mk(m_serpentine, k);
MatrixCoord kn(k, n+1);
Array<ElementC, 1> d;
Array<ElementA, 1> a;
Array<ElementB, 1> b;
d[0] = d_ref.at(mn);
a[0] = a_ref.at(mk);
b[0] = b_ref.at(kn);
mma_op(d, a, b, d);
d_ref.at(mn) = d[0];
}
}
}
} else
#endif
{
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Shape::kN; ++n) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < Shape::kM; ++m) {
int m_serpentine = (n % 2) ? (Shape::kM - 1 - m) : m;
MatrixCoord mn(m_serpentine, n);
MatrixCoord mk(m_serpentine, k);
MatrixCoord kn(k, n);
Array<ElementC, 1> d;
Array<ElementA, 1> a;
Array<ElementB, 1> b;
d[0] = d_ref.at(mn);
a[0] = a_ref.at(mk);
b[0] = b_ref.at(kn);
mma_op(d, a, b, d);
d_ref.at(mn) = d[0];
}
}
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
/// Matrix multiply-add operation - assumes operand B is not changing
struct MmaComplexF32_Column {
using Shape = gemm::GemmShape<1, 1, 1>;
using ElementC = complex<float>;
CUTLASS_HOST_DEVICE
void operator()(
Array<complex<float>, 1> &d,
Array<complex<float>, 1> const &a,
Array<complex<float>, 1> const &b,
Array<complex<float>, 1> const &c
) {
d[0].real() = a[0].real() * b[0].real() + c[0].real();
d[0].imag() = a[0].real() * b[0].imag() + d[0].imag();
d[0].real() = -a[0].imag() * b[0].imag() + d[0].real();
d[0].imag() = a[0].imag() * b[0].real() + c[0].imag();
}
};
/// Matrix multiply-add operation - assumes operand A is not changing
struct MmaComplexF32_Corner {
using Shape = gemm::GemmShape<1, 1, 1>;
using ElementC = complex<float>;
CUTLASS_HOST_DEVICE
void operator()(
Array<complex<float>, 1> &d,
Array<complex<float>, 1> const &a,
Array<complex<float>, 1> const &b,
Array<complex<float>, 1> const &c
) {
d[0].real() = -a[0].imag() * b[0].imag() + d[0].real();
d[0].imag() = a[0].real() * b[0].imag() + d[0].imag();
d[0].real() = a[0].real() * b[0].real() + c[0].real();
d[0].imag() = a[0].imag() * b[0].real() + c[0].imag();
}
};
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Gemplate that handles all packed matrix layouts
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Layout of A matrix (concept: layout::MapFunc)
typename LayoutA_,
/// Layout of B matrix (concept: layout::MapFunc)
typename LayoutB_,
/// Layout of C matrix (concept: layout::MapFunc)
typename LayoutC_
>
struct MmaGeneric<
Shape_,
complex<float>,
LayoutA_,
complex<float>,
LayoutB_,
complex<float>,
LayoutC_,
arch::OpMultiplyAdd> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// Data type of operand A
using ElementA = complex<float>;
/// Layout of A matrix (concept: layout::MapFunc)
using LayoutA = LayoutA_;
/// Data type of operand B
using ElementB = complex<float>;
/// Layout of B matrix (concept: layout::MapFunc)
using LayoutB = LayoutB_;
/// Element type of operand C
using ElementC = complex<float>;
/// Layout of C matrix (concept: layout::MapFunc)
using LayoutC = LayoutC_;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
/// A operand storage
using FragmentA = Array<ElementA, Shape::kMK>;
/// B operand storage
using FragmentB = Array<ElementB, Shape::kKN>;
/// C operand storage
using FragmentC = Array<ElementC, Shape::kMN>;
/// Instruction
using MmaOp = arch::Mma<
gemm::GemmShape<1,1,1>,
1,
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
Operator>;
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
TensorRef<ElementA const, LayoutA> a_ref(
reinterpret_cast<ElementA const *>(&A), LayoutA::packed({Shape::kM, Shape::kK}));
TensorRef<ElementB const, LayoutB> b_ref(
reinterpret_cast<ElementB const *>(&B), LayoutB::packed({Shape::kK, Shape::kN}));
TensorRef<ElementC, LayoutC> d_ref(
reinterpret_cast<ElementC *>(&D), LayoutC::packed(make_Coord(Shape::kM, Shape::kN)));
detail::MmaComplexF32_Column mma_column;
detail::MmaComplexF32_Corner mma_corner;
// Copy accumulators
D = C;
// Compute matrix product
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < Shape::kK; ++k) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Shape::kN; ++n) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < Shape::kM; ++m) {
int m_serpentine = (n % 2) ? (Shape::kM - 1 - m) : m;
MatrixCoord mn(m_serpentine, n);
MatrixCoord mk(m_serpentine, k);
MatrixCoord kn(k, n);
Array<ElementC, 1> d;
Array<ElementA, 1> a;
Array<ElementB, 1> b;
d[0] = d_ref.at(mn);
a[0] = a_ref.at(mk);
b[0] = b_ref.at(kn);
if ((m == 0 && n) || m == Shape::kM - 1) {
mma_corner(d, a, b, d);
}
else {
mma_column(d, a, b, d);
}
d_ref.at(mn) = d[0];
}
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Gemplate that handles conventional layouts for FFMA and DFMA GEMM
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename ElementA_,
/// Layout of A matrix (concept: layout::MapFunc)
typename LayoutA_,
/// Data type of B elements
typename ElementB_,
/// Layout of B matrix (concept: layout::MapFunc)
typename LayoutB_,
/// Element type of C matrix
typename ElementC_,
/// Layout of C matrix (concept: layout::MapFunc)
typename LayoutC_
>
struct Mma<
Shape_,
ElementA_,
LayoutA_,
ElementB_,
LayoutB_,
ElementC_,
LayoutC_,
arch::OpMultiplyAdd,
bool> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// Data type of operand A
using ElementA = ElementA_;
/// Layout of A matrix (concept: layout::MapFunc)
using LayoutA = LayoutA_;
/// Data type of operand B
using ElementB = ElementB_;
/// Layout of B matrix (concept: layout::MapFunc)
using LayoutB = LayoutB_;
/// Element type of operand C
using ElementC = ElementC_;
/// Layout of C matrix (concept: layout::MapFunc)
using LayoutC = LayoutC_;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
/// A operand storage
using FragmentA = Array<ElementA, Shape::kMK>;
/// B operand storage
using FragmentB = Array<ElementB, Shape::kKN>;
/// C operand storage
using FragmentC = Array<ElementC, Shape::kMN>;
/// Underlying matrix multiply operator (concept: arch::Mma)
using ArchMmaOperator = typename MmaGeneric<
Shape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
Operator>::MmaOp;
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
MmaGeneric<
Shape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
Operator> mma;
mma(D, A, B, C);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/thread/mma_sm50.h/0 | {
"file_path": "include/cutlass/gemm/thread/mma_sm50.h",
"repo_id": "include",
"token_count": 6595
} | 30 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a double-buffered threadblock-scoped GEMM kernel.
It loads two loop invariant vectors, mean and var, in the prologue and
stores them in the register file. In the mainloop, it loads two loop
variant vectors, gamma and beta, by using cp.async. We will call
elementwise operation to apply var, mean, gamma, beta between ldmatrix and
warp mma.
*/
#pragma once
#include "cutlass/aligned_buffer.h"
#include "cutlass/arch/memory.h"
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
#include "cutlass/transform/threadblock/predicated_scale_bias_vector_iterator.h"
#include "cutlass/gemm/threadblock/mma_base.h"
#include "cutlass/gemm/warp/layernorm_scale_bias_transform.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Element type of scale and bias vectors
typename ElementScaleBias_,
/// Layout of scale and bias vectors
typename LayoutScaleBias_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// WarpIterator to load Scale or Bias vector from the shared memory
typename WarpIteratorGammaBeta_,
/// Number of stages,
int Stages,
/// Used for partial specialization
typename Enable = bool>
class MmaMainloopFusionBase {
public:
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
///< Element type of scale and bias vectors
using ElementScaleBias = ElementScaleBias_;
/// Layout of scale and bias vectors
using LayoutScaleBias = LayoutScaleBias_;
///< Policy describing tuning details
using Policy = Policy_;
///< WarpIterator to load Scale or Bias vector from the shared memory
using WarpIteratorGammaBeta = WarpIteratorGammaBeta_;
//
// Dependent types
//
/// Warp-level Mma
using Operator = typename Policy::Operator;
/// Shape describing the overall GEMM computed from shared memory
/// by each warp.
using WarpGemm = typename Policy::Operator::Shape;
/// Shape describing the number of warps filling the CTA
using WarpCount = cutlass::gemm::GemmShape<Shape::kM / WarpGemm::kM,
Shape::kN / WarpGemm::kN,
Shape::kK / WarpGemm::kK>;
/// Number of warp-level GEMM oeprations
static int const kWarpGemmIterations =
(WarpGemm::kK / Operator::Policy::MmaShape::kK);
/// Number of stages
static int const kStages = Stages;
/// Tensor reference to the A operand
using TensorRefA = TensorRef<typename Operator::ElementA, typename Operator::LayoutA>;
/// Tensor reference to the scale and bias vectors
using TensorRefGammaBeta = TensorRef<ElementScaleBias, LayoutScaleBias>;
/// Tensor reference to the B operand
using TensorRefB = TensorRef<typename Operator::ElementB, typename Operator::LayoutB>;
//
// Nested structs
//
/// Shared storage object needed by threadblock-scoped GEMM
class SharedStorage {
public:
//
// Type definitions
//
/// Shape of the A matrix operand in shared memory
using ShapeA = MatrixShape<Shape::kM + Policy::SmemPaddingA::kRow,
Shape::kK * kStages +
Policy::SmemPaddingA::kColumn>;
/// Shape of the A scale and bias vectors in shared memory
using ShapeGammaBeta =
MatrixShape<1 + Policy::SmemPaddingA::kRow,
2 * Shape::kK * kStages + Policy::SmemPaddingA::kColumn>;
/// Shape of the B matrix operand in shared memory
using ShapeB =
MatrixShape<Shape::kK * kStages + Policy::SmemPaddingB::kRow,
Shape::kN + Policy::SmemPaddingB::kColumn>;
public:
//
// Data members
//
/// Buffer for A operand
AlignedBuffer<typename Operator::ElementA, ShapeA::kCount> operand_A;
/// Buffer for B operand
AlignedBuffer<typename Operator::ElementB, ShapeB::kCount> operand_B;
/// Buffer for A operand Scale and Bias
AlignedBuffer<ElementScaleBias, ShapeGammaBeta::kCount> operand_A_gamma_beta;
public:
//
// Methods
//
/// Returns a layout object for the A matrix
CUTLASS_DEVICE
static typename Operator::LayoutA LayoutA() {
return Operator::LayoutA::packed({ShapeA::kRow, ShapeA::kColumn});
}
/// Returns a layout object for the B matrix
CUTLASS_HOST_DEVICE
static typename Operator::LayoutB LayoutB() {
return Operator::LayoutB::packed({ShapeB::kRow, ShapeB::kColumn});
}
/// Returns a layout object for the A scale and bias vectors
CUTLASS_DEVICE
static LayoutScaleBias LayoutScaleBias() {
return LayoutScaleBias::packed(
{ShapeGammaBeta::kRow, ShapeGammaBeta::kColumn});
}
/// Returns a TensorRef to the A operand
CUTLASS_HOST_DEVICE
TensorRefA operand_A_ref() {
return TensorRefA{operand_A.data(), LayoutA()};
}
/// Returns a TensorRef to the B operand
CUTLASS_HOST_DEVICE
TensorRefB operand_B_ref() {
return TensorRefB{operand_B.data(), LayoutB()};
}
/// Returns a TensorRef to the A operand Scale vector
CUTLASS_HOST_DEVICE
TensorRefGammaBeta operand_A_gamma_beta_ref() {
return TensorRefGammaBeta{operand_A_gamma_beta.data(), LayoutScaleBias()};
}
};
protected:
//
// Data members
//
/// Iterator to load a warp-scoped tile of A operand from shared memory
typename Operator::IteratorA warp_tile_iterator_A_;
/// Iterator to load a warp-scoped tile of A operand scale and bias vector
/// from shared memory
WarpIteratorGammaBeta warp_tile_iterator_A_gamma_beta_;
/// Iterator to load a warp-scoped tile of B operand from shared memory
typename Operator::IteratorB warp_tile_iterator_B_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
MmaMainloopFusionBase(
///< Shared storage needed for internal use by threadblock-scoped GEMM
SharedStorage &shared_storage,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx)
: warp_tile_iterator_A_(shared_storage.operand_A_ref(), lane_idx),
warp_tile_iterator_A_gamma_beta_(
shared_storage.operand_A_gamma_beta_ref(), lane_idx),
warp_tile_iterator_B_(shared_storage.operand_B_ref(), lane_idx) {}
};
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Iterates over tiles of A operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorA_,
/// Iterates over tiles of A operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorA_,
/// Cache operation for operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorB_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB_,
/// Cache operation for operand B
cutlass::arch::CacheOperation::Kind CacheOpB,
/// Iterates over vectors of var and mean vector in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorVarMean_,
/// Iterates over vectors of scale and bias vector in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorGammaBeta_,
/// Iterates over vectors of scale and bias vector in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorGammaBeta_,
/// Cache operation for scale/bias operand
cutlass::arch::CacheOperation::Kind CacheOpGammaBeta,
/// Data type of accumulator matrix
typename ElementC_,
/// Data type of accumulator matrix
typename LayoutC_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// WarpIterator to load Scale or Bias vector from the shared memory
typename WarpIteratorGammaBeta_,
/// Number of stages,
int Stages,
/// Use zfill or predicate for out-of-bound cp.async
SharedMemoryClearOption SharedMemoryClear = SharedMemoryClearOption::kNone,
/// Used for partial specialization
typename Enable = bool>
class MmaLayernormMainloopFusionMultistage :
public MmaMainloopFusionBase<Shape_, typename IteratorGammaBeta_::Element,
typename IteratorGammaBeta_::Layout, Policy_, WarpIteratorGammaBeta_, Stages> {
public:
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
///< Iterates over tiles of A operand in global memory
using IteratorA = IteratorA_;
///< Iterates over tiles of B operand in global memory
using IteratorB = IteratorB_;
///< Iterates over tiles of the var and mean vectors in global memory
using IteratorVarMean = IteratorVarMean_;
///< Iterates over tiles of the scale and bias vectors in global memory
using IteratorGammaBeta = IteratorGammaBeta_;
///< WarpIterator to load Scale or Bias vector from the shared memory
using WarpIteratorGammaBeta = WarpIteratorGammaBeta_;
///< Policy describing tuning details
using Policy = Policy_;
///< Base class
using Base = MmaMainloopFusionBase<Shape_, typename IteratorGammaBeta::Element,
typename IteratorGammaBeta::Layout, Policy,
WarpIteratorGammaBeta, Stages>;
///< Data type of accumulator matrix
using ElementC = ElementC_;
///< Layout of accumulator matrix
using LayoutC = LayoutC_;
using SmemIteratorA = SmemIteratorA_;
using SmemIteratorB = SmemIteratorB_;
using SmemIteratorGammaBeta = SmemIteratorGammaBeta_;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB;
static cutlass::arch::CacheOperation::Kind const kCacheOpGammaBeta =
CacheOpGammaBeta;
//
// Dependent types
//
/// Fragment of accumulator tile
using FragmentC = typename Policy::Operator::FragmentC;
/// Warp-level Mma
using Operator = typename Policy::Operator;
/// Minimum architecture is Sm80 to support cp.async
using ArchTag = arch::Sm80;
/// Complex transform on A operand
static ComplexTransform const kTransformA = Operator::kTransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB = Operator::kTransformB;
/// Internal structure exposed for introspection.
struct Detail {
static_assert(Base::kWarpGemmIterations > 1,
"The pipelined structure requires at least two warp-level "
"GEMM operations.");
/// Number of cp.async instructions to load one stage of operand A
static int const AsyncCopyIterationsPerStageA =
IteratorA::ThreadMap::Iterations::kCount;
/// Number of cp.async instructions to load one stage of operand B
static int const AsyncCopyIterationsPerStageB =
IteratorB::ThreadMap::Iterations::kCount;
/// Number of stages
static int const kStages = Stages;
/// Number of cp.async instructions to load on group of operand A
static int const kAccessesPerGroupA =
(AsyncCopyIterationsPerStageA + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations;
/// Number of cp.async instructions to load on group of operand B
static int const kAccessesPerGroupB =
(AsyncCopyIterationsPerStageB + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations;
};
private:
using WarpLoadedFragmentA = typename Operator::FragmentA;
using WarpLoadedFragmentB = typename Operator::FragmentB;
using WarpTransformedFragmentA = typename Operator::TransformedFragmentA;
using WarpTransformedFragmentB = typename Operator::TransformedFragmentB;
using WarpLoadedFragmentVarMean = typename IteratorVarMean::Fragment;
using WarpLoadedFragmentGammaBeta =
typename WarpIteratorGammaBeta::Fragment;
private:
//
// Data members
//
/// Iterator to write threadblock-scoped tile of A operand to shared memory
SmemIteratorA smem_iterator_A_;
/// Iterator to write threadblock-scoped tile of A operand scale vector to shared memory
SmemIteratorGammaBeta smem_iterator_A_gamma_beta_;
/// Iterator to write threadblock-scoped tile of B operand to shared memory
SmemIteratorB smem_iterator_B_;
int warp_idx_m_;
int warp_idx_n_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
MmaLayernormMainloopFusionMultistage(
///< Shared storage needed for internal use by threadblock-scoped GEMM
typename Base::SharedStorage &shared_storage,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx
):
Base(shared_storage, thread_idx, warp_idx, lane_idx),
smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx),
smem_iterator_A_gamma_beta_(shared_storage.operand_A_gamma_beta_ref(),
thread_idx),
smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx)
{
// Compute warp location within threadblock tile by mapping the warp_id to
// three coordinates:
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN);
warp_idx_m_ = warp_idx_mn % Base::WarpCount::kM;
warp_idx_n_ = warp_idx_mn / Base::WarpCount::kM;
// Add per-warp offsets in units of warp-level tiles
this->warp_tile_iterator_A_.add_tile_offset(
{warp_idx_m_, Base::kWarpGemmIterations * warp_idx_k});
this->warp_tile_iterator_A_gamma_beta_.add_tile_offset(
{warp_idx_m_, Base::kWarpGemmIterations * warp_idx_k});
this->warp_tile_iterator_B_.add_tile_offset(
{Base::kWarpGemmIterations * warp_idx_k, warp_idx_n_});
}
CUTLASS_DEVICE
void copy_tiles_and_advance(IteratorA &iterator_A,
IteratorGammaBeta &iterator_A_gamma_beta,
IteratorB &iterator_B,
int group_start_A = 0, int group_start_B = 0) {
iterator_A.set_iteration_index(group_start_A *
IteratorA::kAccessesPerVector);
this->smem_iterator_A_.set_iteration_index(group_start_A);
// Async Copy for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupA; ++j) {
if (group_start_A + j < Detail::AsyncCopyIterationsPerStageA) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(
this->smem_iterator_A_.get());
int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value *
IteratorA::ThreadMap::kElementsPerAccess /
IteratorA::kAccessesPerVector / 8;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) {
auto gmem_ptr = iterator_A.get();
if (SharedMemoryClear == SharedMemoryClearOption::kZfill) {
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>(
dst_ptr + v, gmem_ptr, iterator_A.valid());
} else {
cutlass::arch::cp_async<kSrcBytes, kCacheOpA>(
dst_ptr + v, gmem_ptr, iterator_A.valid());
}
++iterator_A;
}
++this->smem_iterator_A_;
}
}
// Async Copy for operand A scale and bias vector. Scale and bias vectors
// are small. One iteration is enough.
if (group_start_A == 0) {
typename IteratorGammaBeta::AccessType *dst_ptr =
reinterpret_cast<typename IteratorGammaBeta::AccessType *>(
this->smem_iterator_A_gamma_beta_.get());
int const kSrcBytes =
sizeof_bits<typename IteratorGammaBeta::Element>::value *
IteratorGammaBeta::kElementsPerAccess / 8;
cutlass::arch::cp_async<kSrcBytes, kCacheOpGammaBeta>(
dst_ptr, iterator_A_gamma_beta.get(), iterator_A_gamma_beta.valid());
}
iterator_B.set_iteration_index(group_start_B *
IteratorB::kAccessesPerVector);
this->smem_iterator_B_.set_iteration_index(group_start_B);
// Async Copy for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupB; ++j) {
if (group_start_B + j < Detail::AsyncCopyIterationsPerStageB) {
typename IteratorB::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB::AccessType *>(
this->smem_iterator_B_.get());
int const kSrcBytes = sizeof_bits<typename IteratorB::Element>::value *
IteratorB::ThreadMap::kElementsPerAccess /
IteratorB::kAccessesPerVector / 8;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) {
auto gmem_ptr = iterator_B.get();
if (SharedMemoryClear == SharedMemoryClearOption::kZfill) {
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>(
dst_ptr + v, gmem_ptr, iterator_B.valid());
} else {
cutlass::arch::cp_async<kSrcBytes, kCacheOpB>(
dst_ptr + v, gmem_ptr, iterator_B.valid());
}
++iterator_B;
}
++this->smem_iterator_B_;
}
}
}
/// Perform a threadblock-scoped matrix multiply-accumulate
CUTLASS_DEVICE
void operator()(
///< problem size of GEMM
int gemm_k_iterations,
///< destination accumulator tile
FragmentC &accum,
///< iterator over A operand in global memory
IteratorA iterator_A,
///< iterator over B operand in global memory
IteratorB iterator_B,
///< iterator over B operand in global memory
IteratorVarMean iterator_var_mean,
///< iterator over scale and bias vectors in global memory
IteratorGammaBeta iterator_A_gamma_beta,
///< initial value of accumulator
FragmentC const &src_accum) {
//
// Prologue
//
// Issue several complete stages
WarpLoadedFragmentVarMean warp_loaded_frag_var_mean;
iterator_var_mean.add_tile_offset({0, warp_idx_m_});
iterator_var_mean.load(warp_loaded_frag_var_mean);
CUTLASS_PRAGMA_UNROLL
for (int stage = 0; stage < Base::kStages - 1;
++stage, --gemm_k_iterations) {
iterator_A.clear_mask(gemm_k_iterations == 0);
iterator_A_gamma_beta.clear_mask(gemm_k_iterations == 0);
iterator_B.clear_mask(gemm_k_iterations == 0);
iterator_A.set_iteration_index(0);
this->smem_iterator_A_.set_iteration_index(0);
// Async Copy for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(
this->smem_iterator_A_.get());
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) {
int const kSrcBytes =
sizeof_bits<typename IteratorA::Element>::value *
IteratorA::ThreadMap::kElementsPerAccess /
IteratorA::kAccessesPerVector / 8;
int src_bytes = (iterator_A.valid() ? kSrcBytes : 0);
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>(
dst_ptr + v, iterator_A.get(), iterator_A.valid());
++iterator_A;
}
++this->smem_iterator_A_;
}
// Async Copy for operand A scale and bias vectors. Scale and bias
// vectors are small. One iteration is enough.
{
typename IteratorGammaBeta::AccessType *dst_ptr =
reinterpret_cast<typename IteratorGammaBeta::AccessType *>(
this->smem_iterator_A_gamma_beta_.get());
int const kSrcBytes =
sizeof_bits<typename IteratorGammaBeta::Element>::value *
IteratorGammaBeta::kElementsPerAccess / 8;
cutlass::arch::cp_async<kSrcBytes, kCacheOpGammaBeta>(
dst_ptr, iterator_A_gamma_beta.get(), iterator_A_gamma_beta.valid());
}
iterator_B.set_iteration_index(0);
this->smem_iterator_B_.set_iteration_index(0);
// Async Copy for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) {
typename IteratorB::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB::AccessType *>(
this->smem_iterator_B_.get());
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) {
int const kSrcBytes =
sizeof_bits<typename IteratorB::Element>::value *
IteratorB::ThreadMap::kElementsPerAccess /
IteratorB::kAccessesPerVector / 8;
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>(
dst_ptr + v, iterator_B.get(), iterator_B.valid());
++iterator_B;
}
++this->smem_iterator_B_;
}
// Move to the next stage
iterator_A.add_tile_offset({0, 1});
iterator_A_gamma_beta.add_tile_offset({0, 1});
iterator_B.add_tile_offset({1, 0});
this->smem_iterator_A_.add_tile_offset({0, 1});
this->smem_iterator_A_gamma_beta_.add_tile_offset({0, 1});
this->smem_iterator_B_.add_tile_offset({1, 0});
// Defines the boundary of a stage of cp.async.
cutlass::arch::cp_async_fence();
}
// Perform accumulation in the 'd' output operand
accum = src_accum;
// Waits until kStages-2 stages have committed.
cutlass::arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
// Pair of fragments used to overlap shared memory loads and math
// instructions
WarpLoadedFragmentA warp_loaded_frag_A[2];
WarpLoadedFragmentB warp_loaded_frag_B[2];
WarpLoadedFragmentGammaBeta warp_loaded_frag_A_gamma_beta[2];
WarpTransformedFragmentA warp_transformed_frag_A[2];
WarpTransformedFragmentB warp_transformed_frag_B[2];
Operator warp_mma;
cutlass::gemm::warp::LayernormScaleBiasTransform<WarpTransformedFragmentA,
WarpLoadedFragmentVarMean,
WarpLoadedFragmentGammaBeta>
elementwise_transform;
this->warp_tile_iterator_A_.set_kgroup_index(0);
this->warp_tile_iterator_A_gamma_beta_.set_kgroup_index(0);
this->warp_tile_iterator_B_.set_kgroup_index(0);
this->warp_tile_iterator_A_.load(warp_loaded_frag_A[0]);
this->warp_tile_iterator_A_gamma_beta_.load(
warp_loaded_frag_A_gamma_beta[0]);
this->warp_tile_iterator_B_.load(warp_loaded_frag_B[0]);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_A_gamma_beta_;
++this->warp_tile_iterator_B_;
iterator_A.clear_mask(gemm_k_iterations == 0);
iterator_A_gamma_beta.clear_mask(gemm_k_iterations == 0);
iterator_B.clear_mask(gemm_k_iterations == 0);
int smem_write_stage_idx = Base::kStages - 1;
int smem_read_stage_idx = 0;
warp_mma.transform(warp_transformed_frag_A[0], warp_transformed_frag_B[0],
warp_loaded_frag_A[0], warp_loaded_frag_B[0]);
elementwise_transform(warp_transformed_frag_A[0],
warp_loaded_frag_var_mean,
warp_loaded_frag_A_gamma_beta[0]);
//
// Mainloop
//
CUTLASS_GEMM_LOOP
for (; gemm_k_iterations > (-Base::kStages + 1);) {
//
// Loop over GEMM K dimension
//
// Computes a warp-level GEMM on data held in shared memory
// Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations;
++warp_mma_k) {
// Load warp-level tiles from shared memory, wrapping to k offset if
// this is the last group as the case may be.
this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_A_gamma_beta_.set_kgroup_index(
(warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_A_.load(warp_loaded_frag_A[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_A_gamma_beta_.load(
warp_loaded_frag_A_gamma_beta[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_B_.load(warp_loaded_frag_B[(warp_mma_k + 1) % 2]);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_A_gamma_beta_;
++this->warp_tile_iterator_B_;
if (warp_mma_k > 0) {
warp_mma.transform(warp_transformed_frag_A[warp_mma_k % 2],
warp_transformed_frag_B[warp_mma_k % 2],
warp_loaded_frag_A[warp_mma_k % 2],
warp_loaded_frag_B[warp_mma_k % 2]);
elementwise_transform(warp_transformed_frag_A[warp_mma_k % 2],
warp_loaded_frag_var_mean,
warp_loaded_frag_A_gamma_beta[warp_mma_k % 2]);
}
warp_mma(
accum,
warp_transformed_frag_A[warp_mma_k % 2],
warp_transformed_frag_B[warp_mma_k % 2],
accum
);
// Issue global->shared copies for the this stage
if (warp_mma_k < Base::kWarpGemmIterations - 1) {
int group_start_iteration_A, group_start_iteration_B;
group_start_iteration_A = warp_mma_k * Detail::kAccessesPerGroupA;
group_start_iteration_B = warp_mma_k * Detail::kAccessesPerGroupB;
copy_tiles_and_advance(iterator_A, iterator_A_gamma_beta, iterator_B,
group_start_iteration_A,
group_start_iteration_B);
}
if (warp_mma_k + 2 == Base::kWarpGemmIterations) {
int group_start_iteration_A, group_start_iteration_B;
group_start_iteration_A =
(warp_mma_k + 1) * Detail::kAccessesPerGroupA;
group_start_iteration_B =
(warp_mma_k + 1) * Detail::kAccessesPerGroupB;
copy_tiles_and_advance(iterator_A, iterator_A_gamma_beta, iterator_B,
group_start_iteration_A,
group_start_iteration_B);
// Inserts a memory fence between stages of cp.async instructions.
cutlass::arch::cp_async_fence();
// Waits until kStages-2 stages have committed.
arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
// Move to the next stage
iterator_A.add_tile_offset({0, 1});
iterator_A_gamma_beta.add_tile_offset({0, 1});
iterator_B.add_tile_offset({1, 0});
this->smem_iterator_A_.add_tile_offset({0, 1});
this->smem_iterator_A_gamma_beta_.add_tile_offset({0, 1});
this->smem_iterator_B_.add_tile_offset({1, 0});
// Add negative offsets to return iterators to the 'start' of the
// circular buffer in shared memory
if (smem_write_stage_idx == (Base::kStages - 1)) {
this->smem_iterator_A_.add_tile_offset({0, -Base::kStages});
this->smem_iterator_A_gamma_beta_.add_tile_offset({0, -Base::kStages});
this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0});
smem_write_stage_idx = 0;
} else {
++smem_write_stage_idx;
}
if (smem_read_stage_idx == (Base::kStages - 1)) {
this->warp_tile_iterator_A_.add_tile_offset(
{0, -Base::kStages * Policy::kPartitionsK *
Base::kWarpGemmIterations});
this->warp_tile_iterator_A_gamma_beta_.add_tile_offset(
{0, -Base::kStages * Policy::kPartitionsK *
Base::kWarpGemmIterations});
this->warp_tile_iterator_B_.add_tile_offset(
{-Base::kStages * Policy::kPartitionsK *
Base::kWarpGemmIterations,
0});
smem_read_stage_idx = 0;
} else {
++smem_read_stage_idx;
}
--gemm_k_iterations;
iterator_A.clear_mask(gemm_k_iterations == 0);
iterator_A_gamma_beta.clear_mask(gemm_k_iterations == 0);
iterator_B.clear_mask(gemm_k_iterations == 0);
}
// Do any conversions feeding the first stage at the end of the loop so
// we can start right away on mma instructions
if (warp_mma_k + 1 == Base::kWarpGemmIterations) {
warp_mma.transform(warp_transformed_frag_A[(warp_mma_k + 1) % 2],
warp_transformed_frag_B[(warp_mma_k + 1) % 2],
warp_loaded_frag_A[(warp_mma_k + 1) % 2],
warp_loaded_frag_B[(warp_mma_k + 1) % 2]);
elementwise_transform(
warp_transformed_frag_A[(warp_mma_k + 1) % 2],
warp_loaded_frag_var_mean,
warp_loaded_frag_A_gamma_beta[(warp_mma_k + 1) % 2]);
}
}
}
// commit and drain all pending and predicated cp.async pnz from the GEMM mainloop
cutlass::arch::cp_async_fence();
cutlass::arch::cp_async_wait<0>();
__syncthreads();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/threadblock/mma_layernorm_mainloop_fusion_multistage.h/0 | {
"file_path": "include/cutlass/gemm/threadblock/mma_layernorm_mainloop_fusion_multistage.h",
"repo_id": "include",
"token_count": 13560
} | 31 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Default warp-level GEMM operators selected by data type, size, and layouts of operands.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/mma.h"
#include "cutlass/gemm/warp/mma_tensor_op.h"
#include "cutlass/gemm/warp/mma_mixed_input_tensor_op.h"
#include "cutlass/gemm/warp/mma_tensor_op_fast_f32.h"
#include "cutlass/gemm/warp/default_mma_tensor_op.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial Specialization - inputs and output types are float - uses BF16 internally
template <
/// Shape of one matrix production operation (concept: GemmShape)
typename WarpShape_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC,
/// Number of partitions along K dimension
int PartitionsK,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor>
struct DefaultMmaTensorOp<
WarpShape_,
GemmShape<16, 8, 8>,
float, LayoutA,
float, LayoutB,
float, LayoutC,
arch::OpMultiplyAddFastBF16,
PartitionsK, AccumulatorsInRowMajor> {
// Uses BF16 internally
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
GemmShape<16, 8, 8>,
32,
bfloat16_t, cutlass::layout::RowMajor,
bfloat16_t, cutlass::layout::ColumnMajor,
float, cutlass::layout::RowMajor,
arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1> >;
// Define the warp-level tensor op
using Type = cutlass::gemm::warp::MmaTensorOp<
WarpShape_, float, LayoutA, float, LayoutB, float, LayoutC,
Policy, PartitionsK, AccumulatorsInRowMajor>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial Specialization - inputs and output types are float - uses F16 internally
template <
/// Shape of one matrix production operation (concept: GemmShape)
typename WarpShape_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC,
/// Number of partitions along K dimension
int PartitionsK,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor>
struct DefaultMmaTensorOp<
WarpShape_,
GemmShape<16, 8, 8>,
float, LayoutA,
float, LayoutB,
float, LayoutC,
arch::OpMultiplyAddFastF16,
PartitionsK, AccumulatorsInRowMajor> {
// Uses F16 internally
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
GemmShape<16, 8, 8>,
32,
half_t, cutlass::layout::RowMajor,
half_t, cutlass::layout::ColumnMajor,
float, cutlass::layout::RowMajor,
arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1> >;
// Define the warp-level tensor op
using Type = cutlass::gemm::warp::MmaTensorOp<
WarpShape_, float, LayoutA, float, LayoutB, float, LayoutC,
Policy, PartitionsK, AccumulatorsInRowMajor>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial Specialization - inputs and output types are float - uses TF32 internally
template <
/// Shape of one matrix production operation (concept: GemmShape)
typename WarpShape_,
/// Shape of target matrix multiply instruction (concept: GemmShape)
typename InstructionShape_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC,
/// Number of partitions along K dimension
int PartitionsK,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor>
struct DefaultMmaTensorOp<
WarpShape_,
InstructionShape_,
float, LayoutA,
float, LayoutB,
float, LayoutC,
arch::OpMultiplyAdd, PartitionsK, AccumulatorsInRowMajor> {
// Uses TF32 internally
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
InstructionShape_,
32,
tfloat32_t, cutlass::layout::RowMajor,
tfloat32_t, cutlass::layout::ColumnMajor,
float, cutlass::layout::RowMajor,
arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1> >;
// Define the warp-level tensor op
using Type = cutlass::gemm::warp::MmaTensorOp<
WarpShape_, float, LayoutA, float, LayoutB, float, LayoutC,
Policy, PartitionsK, AccumulatorsInRowMajor>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial Specialization - inputs and output types are float - uses TF32 for Fast Accurate FP32
template <
/// Shape of one matrix production operation (concept: GemmShape)
typename WarpShape_,
/// Shape of target matrix multiply instruction (concept: GemmShape)
typename InstructionShape_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC,
/// Number of partitions along K dimension
int PartitionsK,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor>
struct DefaultMmaTensorOp<
WarpShape_,
InstructionShape_,
float, LayoutA,
float, LayoutB,
float, LayoutC,
arch::OpMultiplyAddFastF32, PartitionsK, AccumulatorsInRowMajor> {
// Uses TF32 internally
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
InstructionShape_,
32,
cutlass::tfloat32_t, cutlass::layout::RowMajor,
cutlass::tfloat32_t, cutlass::layout::ColumnMajor,
float, cutlass::layout::RowMajor,
arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1> >;
// Define the warp-level tensor op
using Type = cutlass::gemm::warp::MmaTensorOpFastF32<
WarpShape_, float, LayoutA, float, LayoutB, float, LayoutC,
Policy, PartitionsK, AccumulatorsInRowMajor>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial Specialization - inputs are mixed types - uses wider datatype internally.
/// (e.g. F16 <= F16 x S8 + F16, F16 <= BF16 x S8 + F32)
template <
/// Shape of one matrix production operation (concept: GemmShape)
typename WarpShape_,
/// Element type of A matrix
typename ElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA,
/// Element type of B matrix
typename ElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB,
/// Element type of C matrix
typename ElementC,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC,
/// Number of partitions along K dimension
int PartitionsK,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor>
struct DefaultMmaTensorOp<
WarpShape_,
GemmShape<16, 8, 16>, // InstructionShape
ElementA, // Element type of A matrix in Global Memory
LayoutA, // Layout of A matrix in Global Memory
ElementB, // Element type of B matrix in Global Memory
LayoutB, // Layout of B matrix in Global Memory
ElementC, // Element type of C matrix in Global Memory
LayoutC, // Layout of C matrix in Global Memory
arch::OpMultiplyAddMixedInputUpcast, // Tag to indicate mixed-input datatype, where narrower datatype is upcasted to wider datatype
PartitionsK, AccumulatorsInRowMajor> {
// Check if the ElementA and ElementB are of different data types
static_assert(!platform::is_same<ElementA, ElementB>::value,
"DefaultMmaTensorOp with arch::OpMultiplyAddMixedInputUpcast ElementA and ElementB cannot be of the same data type");
// Data type used for internal computation - use the wider of the two data types for mma.sync operands
using ElementOperand = typename platform::conditional<(sizeof(ElementA) > sizeof(ElementB)),
ElementA, ElementB>::type;
// Operand datatypes in the internal MMA instruction - use the wider of the two data types
using ElementAMma = ElementOperand;
using ElementBMma = ElementOperand;
using MmaElementC = ElementC;
// Uses
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
GemmShape<16, 8, 16>,
32,
ElementAMma, cutlass::layout::RowMajor,
ElementBMma, cutlass::layout::ColumnMajor,
MmaElementC, cutlass::layout::RowMajor,
arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1> >;
// Define the warp-level tensor op
using Type = cutlass::gemm::warp::MmaMixedInputTensorOp<
WarpShape_, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
Policy, PartitionsK, AccumulatorsInRowMajor>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
#include "cutlass/gemm/warp/mma_complex_tensor_op_tile_iterator_sm80.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/warp/default_mma_tensor_op_sm80.h/0 | {
"file_path": "include/cutlass/gemm/warp/default_mma_tensor_op_sm80.h",
"repo_id": "include",
"token_count": 4025
} | 32 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing warp-level matrix multiply-accumulate operations targeting
Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/platform/platform.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/arch/mma_sm75.h"
#include "cutlass/arch/mma_sm80.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/mma.h"
#include "cutlass/gemm/warp/mma_tensor_op_policy.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <typename T, typename S, int N, FloatRoundStyle Round>
struct ConvertAndPack {
using Converter = NumericArrayConverter<T, S, N, Round>;
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<S, N> const &source) {
Converter converter;
return converter(source);
}
};
template <typename T, int N, FloatRoundStyle Round>
struct ConvertAndPack<T, T, N, Round> {
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &source) {
return source;
}
};
template <int N, FloatRoundStyle Round>
struct ConvertAndPack<bfloat16_t, float, N, Round> {
using Converter = NumericArrayConverter<bfloat16_t, float, N, Round>;
CUTLASS_HOST_DEVICE
Array<bfloat16_t, N> operator()(Array<float, N> const &source) {
Converter converter;
Array<float, N> tmp;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
int idx = (((i << 1) & 2) | ((i >> 1) & 1) | (i & 0xfffffffc));
tmp[i] = source[idx];
}
return converter(tmp);
}
};
template <int N, FloatRoundStyle Round>
struct ConvertAndPack<half_t, float, N, Round> {
using Converter = NumericArrayConverter<half_t, float, N, Round>;
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(Array<float, N> const &source) {
Converter converter;
Array<float, N> tmp;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
int idx = (((i << 1) & 2) | ((i >> 1) & 1) | (i & 0xfffffffc));
tmp[i] = source[idx];
}
return converter(tmp);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename ElementA_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Data type of B elements
typename ElementB_,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Element type of C matrix
typename ElementC_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
typename Policy_,
/// Number of partitions along K dimension
int PartitionsK_ = 1,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor = false,
/// Used for partial specialization
typename Enable = bool
>
class MmaTensorOp {
public:
/// Shape of warp-level matrix operation (concept: GemmShape)
using Shape = Shape_;
/// Data type of multiplicand A
using ElementA = ElementA_;
/// Layout of multiplicand A
using LayoutA = LayoutA_;
/// Data type of multiplicand B
using ElementB = ElementB_;
/// Layout of multiplicand B
using LayoutB = LayoutB_;
/// Data type of accumulator matrix C
using ElementC = ElementC_;
/// Layout of accumulator matrix C
using LayoutC = LayoutC_;
/// Shape of the warp in units of thread (concept: MmaLanePolicySimt)
using Policy = Policy_;
/// Underlying matrix multiply operator (concept: arch::Mma)
using ArchMmaOperator = typename Policy::Operator;
/// Indicates math operator
using MathOperator = typename ArchMmaOperator::Operator;
/// Architecture tag from underlying instruction
using ArchTag = typename ArchMmaOperator::ArchTag;
/// Indicates class of matrix operator
using OperatorClass = arch::OpClassTensorOp;
/// Shape of underlying instruction
using InstructionShape = typename ArchMmaOperator::Shape;
/// Complex transform on A operand
static ComplexTransform const kTransformA = ComplexTransform::kNone;
/// Complex transform on B operand
static ComplexTransform const kTransformB = ComplexTransform::kNone;
/// Number of threads participating in warp-level matrix product
static int const kThreadCount = 32;
/// Number of partitions along K dimension
static int const kPartitionsK = PartitionsK_;
#if defined(__CUDA_ARCH__) && ((__CUDA_ARCH__ < 800) || (__CUDA_ARCH__ == 890))
static int const kVerticalVisit = true;
#else
static int const kVerticalVisit = false;
#endif
public:
/// Iterates over the A operand in memory
using IteratorA = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kM, Shape::kK>, Operand::kA, ElementA, LayoutA,
MatrixShape<ArchMmaOperator::Shape::kM, ArchMmaOperator::Shape::kK>,
Policy::OpDelta::kRow, kThreadCount, kPartitionsK>;
/// Storage for A tile
using FragmentA = typename IteratorA::Fragment;
/// Storage for transformed A tile
using TransformedFragmentA =
Array<typename ArchMmaOperator::ElementA, FragmentA::kElements>;
/// Iterates over the B operand in memory
using IteratorB = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kK, Shape::kN>, Operand::kB, ElementB, LayoutB,
MatrixShape<ArchMmaOperator::Shape::kK, ArchMmaOperator::Shape::kN>,
Policy::OpDelta::kRow, kThreadCount, kPartitionsK>;
/// Storage for B tile
using FragmentB = typename IteratorB::Fragment;
/// Storage for transformed B tile
using TransformedFragmentB =
Array<typename ArchMmaOperator::ElementB, FragmentB::kElements>;
/// Iterates over the C operand in memory
using IteratorC = MmaTensorOpAccumulatorTileIterator<
MatrixShape<Shape::kM, Shape::kN>, ElementC, LayoutC,
typename ArchMmaOperator::Shape, typename Policy::OpDelta>;
/// Storage for C tile
using FragmentC = typename IteratorC::Fragment;
/// Number of mma operations performed
using MmaIterations = MatrixShape<
(Shape::kM + ArchMmaOperator::Shape::kM - 1) / ArchMmaOperator::Shape::kM,
(Shape::kN + ArchMmaOperator::Shape::kN - 1) / ArchMmaOperator::Shape::kN
>;
public:
/// Underlying matrix multiply operator (concept: arch::Mma)
ArchMmaOperator mma;
public:
//
// Methods
//
/// Ctor
CUTLASS_DEVICE
MmaTensorOp() {}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
TransformedFragmentA const &A,
TransformedFragmentB const &B,
FragmentC const &C
) const {
using MmaOperandA = typename ArchMmaOperator::FragmentA;
using MmaOperandB = typename ArchMmaOperator::FragmentB;
using MmaOperandC = typename ArchMmaOperator::FragmentC;
D = C;
MmaOperandA const *ptr_A = reinterpret_cast<MmaOperandA const *>(&A);
MmaOperandB const *ptr_B = reinterpret_cast<MmaOperandB const *>(&B);
MmaOperandC *ptr_D = reinterpret_cast<MmaOperandC *>(&D);
if (kVerticalVisit) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < MmaIterations::kRow; ++m) {
int m_serpentine = ((n % 2) ? (MmaIterations::kRow - 1 - m) : m);
if (AccumulatorsInRowMajor) { // matrix B is reordered
mma(
ptr_D[n + m_serpentine * MmaIterations::kColumn],
ptr_A[m_serpentine],
ptr_B[n],
ptr_D[n + m_serpentine * MmaIterations::kColumn]);
} else {
mma(
ptr_D[m_serpentine + n * MmaIterations::kRow],
ptr_A[m_serpentine],
ptr_B[n],
ptr_D[m_serpentine + n * MmaIterations::kRow]);
}
}
}
} else {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < MmaIterations::kRow; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
int n_serpentine = ((m % 2) ? (MmaIterations::kColumn - 1 - n) : n);
if (AccumulatorsInRowMajor) { // matrix B is reordered
mma(
ptr_D[n_serpentine + m * MmaIterations::kColumn],
ptr_A[m],
ptr_B[n_serpentine],
ptr_D[n_serpentine + m * MmaIterations::kColumn]);
} else {
mma(ptr_D[m + n_serpentine * MmaIterations::kRow],
ptr_A[m],
ptr_B[n_serpentine],
ptr_D[m + n_serpentine * MmaIterations::kRow]);
}
}
}
}
}
/// Transform the mma operands to the required types
CUTLASS_DEVICE
void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B,
FragmentA const &A, FragmentB const &B) const {
//
// Define conversions from source type to instruction type
//
FloatRoundStyle const kRoundA =
PreferredRoundingMode<typename ArchMmaOperator::ElementA,
ElementA>::kRound;
FloatRoundStyle const kRoundB =
PreferredRoundingMode<typename ArchMmaOperator::ElementB,
ElementB>::kRound;
if (kVerticalVisit) {
detail::ConvertAndPack<typename ArchMmaOperator::ElementA, ElementA,
FragmentA::kElements, kRoundA>
convert_A;
NumericArrayConverter<typename ArchMmaOperator::ElementB, ElementB,
FragmentB::kElements / 2, kRoundB>
convert_B;
Array<ElementB, FragmentB::kElements / 2> const *ptr_B =
reinterpret_cast<Array<ElementB, FragmentB::kElements / 2> const *>(&B);
Array<typename ArchMmaOperator::ElementB, FragmentB::kElements / 2> *
ptr_dst_B = reinterpret_cast<Array<typename ArchMmaOperator::ElementB,
FragmentB::kElements / 2> *>(&dst_B);
dst_A = convert_A(A);
ptr_dst_B[0] = convert_B(ptr_B[0]);
ptr_dst_B[1] = convert_B(ptr_B[1]);
} else {
detail::ConvertAndPack<typename ArchMmaOperator::ElementA, ElementA,
FragmentA::kElements / 2, kRoundA>
convert_A;
NumericArrayConverter<typename ArchMmaOperator::ElementB, ElementB,
FragmentB::kElements, kRoundB>
convert_B;
Array<ElementA, FragmentA::kElements / 2> const *ptr_A =
reinterpret_cast<Array<ElementA, FragmentA::kElements / 2> const *>(&A);
Array<typename ArchMmaOperator::ElementA, FragmentA::kElements / 2> *
ptr_dst_A = reinterpret_cast<Array<typename ArchMmaOperator::ElementA,
FragmentA::kElements / 2> *>(&dst_A);
dst_B = convert_B(B);
ptr_dst_A[0] = convert_A(ptr_A[0]);
ptr_dst_A[1] = convert_A(ptr_A[1]);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
#include "cutlass/gemm/warp/mma_tensor_op_fast_f32.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/warp/mma_tensor_op.h/0 | {
"file_path": "include/cutlass/gemm/warp/mma_tensor_op.h",
"repo_id": "include",
"token_count": 5215
} | 33 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/coord.h"
namespace cutlass {
namespace gemm {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Shape of a matrix multiply-add operation
template <
/// Rows of matrix product
int M = 1,
/// Columns of matrix product
int N = 1,
/// Inner dimension of matrix product
int K = 1
>
struct GemmShape {
static int const kM = M;
static int const kN = N;
static int const kK = K;
static int const kMN = M * N;
static int const kMK = M * K;
static int const kKN = N * K;
static int const kMNK = M * N * K;
static int const kCount = kMNK;
//
// Static member functions
//
/// Returns a Coord object
CUTLASS_HOST_DEVICE
static Coord<3> toCoord() {
return make_Coord(kM, kN, kK);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Type alias of the transpose of a GemmShape
template <
/// concept: GemmShape
typename Shape
>
using GemmShapeTranspose = GemmShape<Shape::kN, Shape::kM, Shape::kK>;
////////////////////////////////////////////////////////////////////////////////////////////////////
/// GemmCoord is a structure derived from Coord<3> that specifies a location within the
/// coordinate space of a GEMM problem.
struct GemmCoord : public Coord<3, int> {
/// Integer-valued index
typedef int Index;
/// Base type is a Coord of rank=3
typedef Coord<3, Index> Base;
/// GEMM M dimension - rows of the output C matrix
static int const kM = 0;
/// GEMM N dimension - columns of the output C matrix
static int const kN = 1;
/// GEMM K dimension - inner dimension of the GEMM problem
static int const kK = 2;
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
GemmCoord() { }
/// Constructs from Coord<3> and a batch
CUTLASS_HOST_DEVICE
GemmCoord(Coord<3, Index> const& coord): Base(make_Coord(coord[0], coord[1], coord[2])) { }
/// Helper to construct from a K, N, M, batch variables
CUTLASS_HOST_DEVICE
GemmCoord(Index m, Index n, Index k): Base(make_Coord(m, n, k)) { }
/// Returns the GEMM M coordinate
CUTLASS_HOST_DEVICE
Index const& m() const { return this->at(kM); }
/// Returns reference to the GEMM M coordinate
CUTLASS_HOST_DEVICE
Index & m() { return this->at(kM); }
/// Returns the GEMM N coordinate
CUTLASS_HOST_DEVICE
Index const& n() const { return this->at(kN); }
/// Returns reference to the GEMM N coordinate
CUTLASS_HOST_DEVICE
Index & n() { return this->at(kN); }
/// Returns the GEMM K coordinate
CUTLASS_HOST_DEVICE
Index const& k() const { return this->at(kK); }
/// Returns reference to the GEMM K coordinate
CUTLASS_HOST_DEVICE
Index & k() { return this->at(kK); }
/// Obtains a Coord<3> from GemmCoord
CUTLASS_HOST_DEVICE
Coord<3> mnk() const {
return make_Coord(m(), n(), k());
}
/// Obtains a Coord<3> from GemmCoord
CUTLASS_HOST_DEVICE
Coord<3> knm() const {
return make_Coord(k(), n(), m());
}
/// Obtains a Coord<2> from GemmCoord
CUTLASS_HOST_DEVICE
Coord<2> nm() const {
return make_Coord(n(), m());
}
/// Obtains a Coord<2> from GemmCoord
CUTLASS_HOST_DEVICE
Coord<2> mn() const {
return make_Coord(m(), n());
}
/// Obtains a Coord<2> from GemmCoord
CUTLASS_HOST_DEVICE
Coord<2> mk() const {
return make_Coord(m(), k());
}
/// Obtains a Coord<2> from GemmCoord
CUTLASS_HOST_DEVICE
Coord<2> km() const {
return make_Coord(k(), m());
}
/// Obtains a Coord<2> from GemmCoord
CUTLASS_HOST_DEVICE
Coord<2> nk() const {
return make_Coord(n(), k());
}
/// Obtains a Coord<2> from GemmCoord
CUTLASS_HOST_DEVICE
Coord<2> kn() const {
return make_Coord(k(), n());
}
//
// Coord operators
//
/// Element-wise addition
CUTLASS_HOST_DEVICE
GemmCoord operator+(Base const& b) const {
return GemmCoord(Base::operator+(b));
}
/// Element-wise subtraction
CUTLASS_HOST_DEVICE
GemmCoord operator-(Base const& b) const {
return GemmCoord(Base::operator-(b));
}
/// Element-wise multiplication
CUTLASS_HOST_DEVICE
GemmCoord operator*(Base const& b) const {
return GemmCoord(Base::operator*(b));
}
/// Element-wise division
CUTLASS_HOST_DEVICE
GemmCoord operator/(Base const& b) const {
return GemmCoord(Base::operator/(b));
}
/// In-place addition
CUTLASS_HOST_DEVICE
GemmCoord& operator+=(Base const& b) {
Base::operator+=(b);
return *this;
}
/// In-place subtraction
CUTLASS_HOST_DEVICE
GemmCoord& operator-=(Base const& b) {
Base::operator-=(b);
return *this;
}
/// In-place multiplication
CUTLASS_HOST_DEVICE
GemmCoord& operator*=(Base const& b) {
Base::operator*=(b);
return *this;
}
/// In-place division
CUTLASS_HOST_DEVICE
GemmCoord& operator/=(Base const& b) {
Base::operator/=(b);
return *this;
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// BatchedGemmCoord is a structure derived from Coord<4> that specifies a location within the
/// coordinate space of a batched GEMM problem.
struct BatchedGemmCoord : public Coord<4, int> {
/// Integer-valued index
typedef int Index;
/// Base type is a Coord of rank=4
typedef Coord<4, Index> Base;
/// GEMM M dimension - rows of the output C matrix
static int const kM = 0;
/// GEMM N dimension - columns of the output C matrix
static int const kN = 1;
/// GEMM K dimension - inner dimension of the GEMM problem
static int const kK = 2;
/// GEMM Batch dimension - inner dimension of the GEMM problem
static int const kBatch = 3;
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
BatchedGemmCoord() { }
/// Constructs from Coord<4>
CUTLASS_HOST_DEVICE
BatchedGemmCoord(Base const& coord): Base(coord) { }
/// Helper to construct from a K, N, M, and batch variables
CUTLASS_HOST_DEVICE
BatchedGemmCoord(Index m, Index n, Index k, Index b): Base(make_Coord(m, n, k, b)) { }
/// Returns the GEMM M coordinate
CUTLASS_HOST_DEVICE
Index const& m() const { return this->at(kM); }
/// Returns reference to the GEMM M coordinate
CUTLASS_HOST_DEVICE
Index & m() { return this->at(kM); }
/// Returns the GEMM N coordinate
CUTLASS_HOST_DEVICE
Index const& n() const { return this->at(kN); }
/// Returns reference to the GEMM N coordinate
CUTLASS_HOST_DEVICE
Index & n() { return this->at(kN); }
/// Returns the GEMM K coordinate
CUTLASS_HOST_DEVICE
Index const& k() const { return this->at(kK); }
/// Returns reference to the GEMM K coordinate
CUTLASS_HOST_DEVICE
Index & k() { return this->at(kK); }
/// Returns the GEMM batch coordinate
CUTLASS_HOST_DEVICE
Index const& batch() const { return this->at(kBatch); }
/// Returns reference to the GEMM batch coordinate
CUTLASS_HOST_DEVICE
Index & batch() { return this->at(kBatch); }
/// Obtains a GemmCoord from BatchedGemmCoord
CUTLASS_HOST_DEVICE
GemmCoord mnk() const {
return GemmCoord(m(), n(), k());
}
/// Obtains a Coord<4> from BatchedGemmCoord
CUTLASS_HOST_DEVICE
Coord<4> mnkb() const {
return make_Coord(m(), n(), k(), batch());
}
//
// Coord operators
//
/// Element-wise addition
CUTLASS_HOST_DEVICE
BatchedGemmCoord operator+(Base const& b) const {
return BatchedGemmCoord(Base::operator+(b));
}
/// Element-wise subtraction
CUTLASS_HOST_DEVICE
BatchedGemmCoord operator-(Base const& b) const {
return BatchedGemmCoord(Base::operator-(b));
}
/// Element-wise multiplication
CUTLASS_HOST_DEVICE
BatchedGemmCoord operator*(Base const& b) const {
return BatchedGemmCoord(Base::operator*(b));
}
/// Element-wise division
CUTLASS_HOST_DEVICE
BatchedGemmCoord operator/(Base const& b) const {
return BatchedGemmCoord(Base::operator/(b));
}
/// In-place addition
CUTLASS_HOST_DEVICE
BatchedGemmCoord& operator+=(Base const& b) {
Base::operator+=(b);
return *this;
}
/// In-place subtraction
CUTLASS_HOST_DEVICE
BatchedGemmCoord& operator-=(Base const& b) {
Base::operator-=(b);
return *this;
}
/// In-place multiplication
CUTLASS_HOST_DEVICE
BatchedGemmCoord& operator*=(Base const& b) {
Base::operator*=(b);
return *this;
}
/// In-place division
CUTLASS_HOST_DEVICE
BatchedGemmCoord& operator/=(Base const& b) {
Base::operator/=(b);
return *this;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm_coord.h/0 | {
"file_path": "include/cutlass/gemm_coord.h",
"repo_id": "include",
"token_count": 3582
} | 34 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
\file
\brief Matrix classes with value semantics.
*/
#pragma once
#if !defined(__CUDACC_RTC__)
#include <iosfwd>
#include <cmath>
#endif
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/fast_math.h"
#include "cutlass/layout/matrix.h"
namespace cutlass {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Primary template with partial specializations to follow
template <typename Element, int Rows, int Columns> struct Matrix;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// 1-by-2 matrix template class definition
template <typename Element_>
struct Matrix<Element_, 1, 2> {
//
// Type definitions
//
/// Element data type
using Element = Element_;
/// Number of rows in matrix
static int const kRows = 1;
/// Number of columns in matrix
static int const kColumns = 2;
/// Layout of matrix in underlying array
using Layout = layout::RowMajor;
/// Number of elements in matrix
static int const kCount = 2;
//
// Data members
//
/// Elements of the matrix in row-major layout
Array<Element, kCount> data;
//
// Methods
//
/// Constructs a zero matrix
CUTLASS_HOST_DEVICE
Matrix() {
data.clear();
}
/// Copy constructor for a 1-by-2 matrix
CUTLASS_HOST_DEVICE
Matrix(Matrix const &rhs) {
data = rhs.data;
}
/// Constucts a 1-by-2 matrix from scalar elements
CUTLASS_HOST_DEVICE
Matrix(
Element _0_0, Element _0_1
) {
data[0] = _0_0; data[1] = _0_1;
}
/// Constructs a matrix from a uniform element
CUTLASS_HOST_DEVICE
static Matrix uniform(Element s) {
Matrix m;
m.data[0] = s;
m.data[1] = s;
return m;
}
/// Constructs a matrix from a uniform element 1
CUTLASS_HOST_DEVICE
static Matrix ones() {
return uniform(Element(1));
}
/// Constructs a matrix from a uniform element 0
CUTLASS_HOST_DEVICE
static Matrix zero() {
return Matrix();
}
/// Returns a transposed matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> transpose() const {
Matrix<Element, 2, 1> mt;
mt.data[0] = data[0];
mt.data[1] = data[1];
return mt;
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(int i, int j) const {
return data[i * 1 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(int i, int j) {
return data[i * 1 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element &at(int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element at(int offset) const {
return data[offset];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element operator[](Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & operator[](Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element & operator[](int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element operator[](int offset) const {
return data[offset];
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 2> slice_1x2(int i = 0, int j = 0) const {
Matrix<Element, 1, 2> m;
m.data[0] = data[i * 2 + j + 0];
m.data[1] = data[i * 2 + j + 1];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x2(Matrix<Element, 1, 2> const &m, int i = 0, int j = 0) {
data[i * 2 + j + 0] = m.data[0];
data[i * 2 + j + 1] = m.data[1];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 2> row(int i) const {
return slice_1x2(i, 0);
}
CUTLASS_HOST_DEVICE
Matrix &set_row(Matrix<Element, 1, 2> const &v, int i = 0) {
return set_slice_1x2(v, i, 0);
}
/// Forms a 1-by-2 matrix by horizontally concatenating an Element with an Element
CUTLASS_HOST_DEVICE
static Matrix hcat(Element lhs, Element rhs) {
return Matrix(
lhs, rhs);
}
/// Concatenates this matrix with a an Element to form a 1-by-3 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 3> hcat(Element rhs) const {
return Matrix<Element, 1, 3>::hcat(*this, rhs);
}
/// Concatenates this matrix with a a 1-by-2 matrix to form a 1-by-4 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 4> hcat(Matrix<Element, 1, 2> const & rhs) const {
return Matrix<Element, 1, 4>::hcat(*this, rhs);
}
/// Concatenates this matrix with a a 1-by-2 matrix to form a 2-by-2 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 2> vcat(Matrix<Element, 1, 2> const & rhs) const {
return Matrix<Element, 2, 2>::vcat(*this, rhs);
}
/// Concatenates this matrix with a a 2-by-2 matrix to form a 3-by-2 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 2> vcat(Matrix<Element, 2, 2> const & rhs) const {
return Matrix<Element, 3, 2>::vcat(*this, rhs);
}
/// Concatenates this matrix with a a 3-by-2 matrix to form a 4-by-2 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 2> vcat(Matrix<Element, 3, 2> const & rhs) const {
return Matrix<Element, 4, 2>::vcat(*this, rhs);
}
/// Elementwise add operator (1-by-2)
CUTLASS_HOST_DEVICE
Matrix add(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] + rhs.data[0];
result.data[1] = data[1] + rhs.data[1];
return result;
}
/// Elementwise add operator (1-by-2)
CUTLASS_HOST_DEVICE
Matrix operator +(Matrix const &rhs) const {
return add(rhs);
}
/// Elementwise add operator (1-by-2)
CUTLASS_HOST_DEVICE
Matrix & operator +=(Matrix const &rhs) {
data[0] += rhs.data[0];
data[1] += rhs.data[1];
return *this;
}
/// Elementwise subtract operator (1-by-2)
CUTLASS_HOST_DEVICE
Matrix subtract(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] - rhs.data[0];
result.data[1] = data[1] - rhs.data[1];
return result;
}
/// Elementwise subtract operator (1-by-2)
CUTLASS_HOST_DEVICE
Matrix operator -(Matrix const &rhs) const {
return subtract(rhs);
}
/// Elementwise subtract operator (1-by-2)
CUTLASS_HOST_DEVICE
Matrix & operator -=(Matrix const &rhs) {
data[0] -= rhs.data[0];
data[1] -= rhs.data[1];
return *this;
}
/// Elementwise multiply operator (1-by-2)
CUTLASS_HOST_DEVICE
Matrix multiply(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] * rhs.data[0];
result.data[1] = data[1] * rhs.data[1];
return result;
}
/// Scalar multiply operator (1-by-2)
CUTLASS_HOST_DEVICE
Matrix multiply(Element const &s) const {
Matrix result;
result.data[0] = data[0] * s;
result.data[1] = data[1] * s;
return result;
}
/// Scalar multiply operator (1-by-2)
CUTLASS_HOST_DEVICE
Matrix operator *(Element const &s) const {
return multiply(s);
}
/// Scalar multiply operator (1-by-2)
CUTLASS_HOST_DEVICE
Matrix & operator *=(Element const &s) {
data[0] *= s;
data[1] *= s;
return *this;
}
/// Elementwise divide operator (1-by-2)
CUTLASS_HOST_DEVICE
Matrix divide(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] / rhs.data[0];
result.data[1] = data[1] / rhs.data[1];
return result;
}
/// Scalar divide operator (1-by-2)
CUTLASS_HOST_DEVICE
Matrix divide(Element const &s) const {
Matrix result;
result.data[0] = data[0] / s;
result.data[1] = data[1] / s;
return result;
}
/// Scalar divide operator (1-by-2)
CUTLASS_HOST_DEVICE
Matrix operator /(Element const &s) const {
return divide(s);
}
/// Scalar divide operator (1-by-2)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Element const &s) {
data[0] /= s;
data[1] /= s;
return *this;
}
/// Elementwise divide operator (1-by-2)
CUTLASS_HOST_DEVICE
Matrix operator /(Matrix const &rhs) const {
return divide(rhs);
}
/// Elementwise divide operator (1-by-2)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Matrix const &rhs) {
data[0] /= rhs.data[0];
data[1] /= rhs.data[1];
return *this;
}
/// Negates each element of the matrix
CUTLASS_HOST_DEVICE
Matrix operator-() const {
Matrix m;
m.data[0] = -m.data[0];
m.data[1] = -m.data[1];
return m;
}
/// Matrix product of size 1-by-1-by-2
CUTLASS_HOST_DEVICE
Element product(Matrix<Element, 2, 1> const &rhs, Element accum = Element()) const {
// k=0
accum += data[0] * rhs.data[0];
// k=1
accum += data[1] * rhs.data[1];
return accum;
}
/// Matrix product of size 1-by-1-by-2
CUTLASS_HOST_DEVICE
Element operator*(Matrix<Element, 2, 1> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 1-by-2-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 2> product(
Matrix<Element, 2, 2> const &rhs,
Matrix<Element, 1, 2> accum = Matrix<Element, 1, 2>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
// k=1
accum.data[0] += data[1] * rhs.data[2];
accum.data[1] += data[1] * rhs.data[3];
return accum;
}
/// Matrix product of size 1-by-2-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 2> operator*(Matrix<Element, 2, 2> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 1-by-2-by-2
CUTLASS_HOST_DEVICE
Matrix & operator*=(Matrix<Element, 2, 2> const &rhs) {
*this = product(rhs);
return *this;
}
/// Matrix product of size 1-by-3-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 3> product(
Matrix<Element, 2, 3> const &rhs,
Matrix<Element, 1, 3> accum = Matrix<Element, 1, 3>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
// k=1
accum.data[0] += data[1] * rhs.data[3];
accum.data[1] += data[1] * rhs.data[4];
accum.data[2] += data[1] * rhs.data[5];
return accum;
}
/// Matrix product of size 1-by-3-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 3> operator*(Matrix<Element, 2, 3> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 1-by-4-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 4> product(
Matrix<Element, 2, 4> const &rhs,
Matrix<Element, 1, 4> accum = Matrix<Element, 1, 4>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[0] * rhs.data[3];
// k=1
accum.data[0] += data[1] * rhs.data[4];
accum.data[1] += data[1] * rhs.data[5];
accum.data[2] += data[1] * rhs.data[6];
accum.data[3] += data[1] * rhs.data[7];
return accum;
}
/// Matrix product of size 1-by-4-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 4> operator*(Matrix<Element, 2, 4> const &rhs) const {
return product(rhs);
}
/// Dot product of vectors with extent 2
CUTLASS_HOST_DEVICE
Element dot(Matrix<Element, 2, 1> const &rhs, Element accum = Element()) const {
accum += data[0] * rhs.data[0];
accum += data[1] * rhs.data[1];
return accum;
}
/// Dot product of vectors with extent 2
CUTLASS_HOST_DEVICE
Element dot(Matrix<Element, 1, 2> const &rhs, Element accum = Element()) const {
accum += data[0] * rhs.data[0];
accum += data[1] * rhs.data[1];
return accum;
}
/// Returns the sum of elements
CUTLASS_HOST_DEVICE
Element sum(Element accum = Element()) const {
accum += data[0];
accum += data[1];
return accum;
}
/// Returns the sum of squared elements
CUTLASS_HOST_DEVICE
Element norm(Element accum = Element()) const {
accum += data[0] * data[0];
accum += data[1] * data[1];
return accum;
}
/// Returns square root of the norm
CUTLASS_HOST_DEVICE
Element magnitude() const {
return fast_sqrt(norm());
}
/// Returns the sum of diagonal elements
CUTLASS_HOST_DEVICE
Element trace(Element accum = Element()) const {
accum += data[0];
return accum;
}
};
/// Template alias for 1-by-2 matrix
template <typename Element>
using Matrix1x2 = Matrix<Element, 1, 2>;
/// Free funciton to infer element type from template arguments
template <typename Element>
CUTLASS_HOST_DEVICE Matrix1x2<Element> make_Matrix1x2(
Element _0_0, Element _0_1
) {
return Matrix1x2<Element>(
_0_0, _0_1
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// 1-by-3 matrix template class definition
template <typename Element_>
struct Matrix<Element_, 1, 3> {
//
// Type definitions
//
/// Element data type
using Element = Element_;
/// Number of rows in matrix
static int const kRows = 1;
/// Number of columns in matrix
static int const kColumns = 3;
/// Layout of matrix in underlying array
using Layout = layout::RowMajor;
/// Number of elements in matrix
static int const kCount = 3;
//
// Data members
//
/// Elements of the matrix in row-major layout
Array<Element, kCount> data;
//
// Methods
//
/// Constructs a zero matrix
CUTLASS_HOST_DEVICE
Matrix() {
data.clear();
}
/// Copy constructor for a 1-by-3 matrix
CUTLASS_HOST_DEVICE
Matrix(Matrix const &rhs) {
data = rhs.data;
}
/// Constucts a 1-by-3 matrix from scalar elements
CUTLASS_HOST_DEVICE
Matrix(
Element _0_0, Element _0_1, Element _0_2
) {
data[0] = _0_0; data[1] = _0_1; data[2] = _0_2;
}
/// Constructs a matrix from a uniform element
CUTLASS_HOST_DEVICE
static Matrix uniform(Element s) {
Matrix m;
m.data[0] = s;
m.data[1] = s;
m.data[2] = s;
return m;
}
/// Constructs a matrix from a uniform element 1
CUTLASS_HOST_DEVICE
static Matrix ones() {
return uniform(Element(1));
}
/// Constructs a matrix from a uniform element 0
CUTLASS_HOST_DEVICE
static Matrix zero() {
return Matrix();
}
/// Returns a transposed matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> transpose() const {
Matrix<Element, 3, 1> mt;
mt.data[0] = data[0];
mt.data[1] = data[1];
mt.data[2] = data[2];
return mt;
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(int i, int j) const {
return data[i * 1 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(int i, int j) {
return data[i * 1 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element &at(int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element at(int offset) const {
return data[offset];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element operator[](Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & operator[](Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element & operator[](int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element operator[](int offset) const {
return data[offset];
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 2> slice_1x2(int i = 0, int j = 0) const {
Matrix<Element, 1, 2> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 1];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x2(Matrix<Element, 1, 2> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 1] = m.data[1];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 3> slice_1x3(int i = 0, int j = 0) const {
Matrix<Element, 1, 3> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 1];
m.data[2] = data[i * 3 + j + 2];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x3(Matrix<Element, 1, 3> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 1] = m.data[1];
data[i * 3 + j + 2] = m.data[2];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 3> row(int i) const {
return slice_1x3(i, 0);
}
CUTLASS_HOST_DEVICE
Matrix &set_row(Matrix<Element, 1, 3> const &v, int i = 0) {
return set_slice_1x3(v, i, 0);
}
/// Forms a 1-by-3 matrix by horizontally concatenating an Element with a 1-by-2 matrix
CUTLASS_HOST_DEVICE
static Matrix hcat(Element lhs, Matrix<Element, 1, 2> const & rhs) {
return Matrix(
lhs, rhs.at(0, 0), rhs.at(0, 1));
}
/// Forms a 1-by-3 matrix by horizontally concatenating a 1-by-2 matrix with an Element
CUTLASS_HOST_DEVICE
static Matrix hcat(Matrix<Element, 1, 2> const & lhs, Element rhs) {
return Matrix(
lhs.at(0, 0), lhs.at(0, 1), rhs);
}
/// Concatenates this matrix with a an Element to form a 1-by-4 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 4> hcat(Element rhs) const {
return Matrix<Element, 1, 4>::hcat(*this, rhs);
}
/// Concatenates this matrix with a a 1-by-3 matrix to form a 2-by-3 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 3> vcat(Matrix<Element, 1, 3> const & rhs) const {
return Matrix<Element, 2, 3>::vcat(*this, rhs);
}
/// Concatenates this matrix with a a 2-by-3 matrix to form a 3-by-3 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 3> vcat(Matrix<Element, 2, 3> const & rhs) const {
return Matrix<Element, 3, 3>::vcat(*this, rhs);
}
/// Concatenates this matrix with a a 3-by-3 matrix to form a 4-by-3 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 3> vcat(Matrix<Element, 3, 3> const & rhs) const {
return Matrix<Element, 4, 3>::vcat(*this, rhs);
}
/// Elementwise add operator (1-by-3)
CUTLASS_HOST_DEVICE
Matrix add(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] + rhs.data[0];
result.data[1] = data[1] + rhs.data[1];
result.data[2] = data[2] + rhs.data[2];
return result;
}
/// Elementwise add operator (1-by-3)
CUTLASS_HOST_DEVICE
Matrix operator +(Matrix const &rhs) const {
return add(rhs);
}
/// Elementwise add operator (1-by-3)
CUTLASS_HOST_DEVICE
Matrix & operator +=(Matrix const &rhs) {
data[0] += rhs.data[0];
data[1] += rhs.data[1];
data[2] += rhs.data[2];
return *this;
}
/// Elementwise subtract operator (1-by-3)
CUTLASS_HOST_DEVICE
Matrix subtract(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] - rhs.data[0];
result.data[1] = data[1] - rhs.data[1];
result.data[2] = data[2] - rhs.data[2];
return result;
}
/// Elementwise subtract operator (1-by-3)
CUTLASS_HOST_DEVICE
Matrix operator -(Matrix const &rhs) const {
return subtract(rhs);
}
/// Elementwise subtract operator (1-by-3)
CUTLASS_HOST_DEVICE
Matrix & operator -=(Matrix const &rhs) {
data[0] -= rhs.data[0];
data[1] -= rhs.data[1];
data[2] -= rhs.data[2];
return *this;
}
/// Elementwise multiply operator (1-by-3)
CUTLASS_HOST_DEVICE
Matrix multiply(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] * rhs.data[0];
result.data[1] = data[1] * rhs.data[1];
result.data[2] = data[2] * rhs.data[2];
return result;
}
/// Scalar multiply operator (1-by-3)
CUTLASS_HOST_DEVICE
Matrix multiply(Element const &s) const {
Matrix result;
result.data[0] = data[0] * s;
result.data[1] = data[1] * s;
result.data[2] = data[2] * s;
return result;
}
/// Scalar multiply operator (1-by-3)
CUTLASS_HOST_DEVICE
Matrix operator *(Element const &s) const {
return multiply(s);
}
/// Scalar multiply operator (1-by-3)
CUTLASS_HOST_DEVICE
Matrix & operator *=(Element const &s) {
data[0] *= s;
data[1] *= s;
data[2] *= s;
return *this;
}
/// Elementwise divide operator (1-by-3)
CUTLASS_HOST_DEVICE
Matrix divide(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] / rhs.data[0];
result.data[1] = data[1] / rhs.data[1];
result.data[2] = data[2] / rhs.data[2];
return result;
}
/// Scalar divide operator (1-by-3)
CUTLASS_HOST_DEVICE
Matrix divide(Element const &s) const {
Matrix result;
result.data[0] = data[0] / s;
result.data[1] = data[1] / s;
result.data[2] = data[2] / s;
return result;
}
/// Scalar divide operator (1-by-3)
CUTLASS_HOST_DEVICE
Matrix operator /(Element const &s) const {
return divide(s);
}
/// Scalar divide operator (1-by-3)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Element const &s) {
data[0] /= s;
data[1] /= s;
data[2] /= s;
return *this;
}
/// Elementwise divide operator (1-by-3)
CUTLASS_HOST_DEVICE
Matrix operator /(Matrix const &rhs) const {
return divide(rhs);
}
/// Elementwise divide operator (1-by-3)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Matrix const &rhs) {
data[0] /= rhs.data[0];
data[1] /= rhs.data[1];
data[2] /= rhs.data[2];
return *this;
}
/// Negates each element of the matrix
CUTLASS_HOST_DEVICE
Matrix operator-() const {
Matrix m;
m.data[0] = -m.data[0];
m.data[1] = -m.data[1];
m.data[2] = -m.data[2];
return m;
}
/// Matrix product of size 1-by-1-by-3
CUTLASS_HOST_DEVICE
Element product(Matrix<Element, 3, 1> const &rhs, Element accum = Element()) const {
// k=0
accum += data[0] * rhs.data[0];
// k=1
accum += data[1] * rhs.data[1];
// k=2
accum += data[2] * rhs.data[2];
return accum;
}
/// Matrix product of size 1-by-1-by-3
CUTLASS_HOST_DEVICE
Element operator*(Matrix<Element, 3, 1> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 1-by-2-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 2> product(
Matrix<Element, 3, 2> const &rhs,
Matrix<Element, 1, 2> accum = Matrix<Element, 1, 2>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
// k=1
accum.data[0] += data[1] * rhs.data[2];
accum.data[1] += data[1] * rhs.data[3];
// k=2
accum.data[0] += data[2] * rhs.data[4];
accum.data[1] += data[2] * rhs.data[5];
return accum;
}
/// Matrix product of size 1-by-2-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 2> operator*(Matrix<Element, 3, 2> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 1-by-3-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 3> product(
Matrix<Element, 3, 3> const &rhs,
Matrix<Element, 1, 3> accum = Matrix<Element, 1, 3>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
// k=1
accum.data[0] += data[1] * rhs.data[3];
accum.data[1] += data[1] * rhs.data[4];
accum.data[2] += data[1] * rhs.data[5];
// k=2
accum.data[0] += data[2] * rhs.data[6];
accum.data[1] += data[2] * rhs.data[7];
accum.data[2] += data[2] * rhs.data[8];
return accum;
}
/// Matrix product of size 1-by-3-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 3> operator*(Matrix<Element, 3, 3> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 1-by-3-by-3
CUTLASS_HOST_DEVICE
Matrix & operator*=(Matrix<Element, 3, 3> const &rhs) {
*this = product(rhs);
return *this;
}
/// Matrix product of size 1-by-4-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 4> product(
Matrix<Element, 3, 4> const &rhs,
Matrix<Element, 1, 4> accum = Matrix<Element, 1, 4>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[0] * rhs.data[3];
// k=1
accum.data[0] += data[1] * rhs.data[4];
accum.data[1] += data[1] * rhs.data[5];
accum.data[2] += data[1] * rhs.data[6];
accum.data[3] += data[1] * rhs.data[7];
// k=2
accum.data[0] += data[2] * rhs.data[8];
accum.data[1] += data[2] * rhs.data[9];
accum.data[2] += data[2] * rhs.data[10];
accum.data[3] += data[2] * rhs.data[11];
return accum;
}
/// Matrix product of size 1-by-4-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 4> operator*(Matrix<Element, 3, 4> const &rhs) const {
return product(rhs);
}
/// Dot product of vectors with extent 3
CUTLASS_HOST_DEVICE
Element dot(Matrix<Element, 3, 1> const &rhs, Element accum = Element()) const {
accum += data[0] * rhs.data[0];
accum += data[1] * rhs.data[1];
accum += data[2] * rhs.data[2];
return accum;
}
/// Dot product of vectors with extent 3
CUTLASS_HOST_DEVICE
Element dot(Matrix<Element, 1, 3> const &rhs, Element accum = Element()) const {
accum += data[0] * rhs.data[0];
accum += data[1] * rhs.data[1];
accum += data[2] * rhs.data[2];
return accum;
}
/// Returns the sum of elements
CUTLASS_HOST_DEVICE
Element sum(Element accum = Element()) const {
accum += data[0];
accum += data[1];
accum += data[2];
return accum;
}
/// Returns the sum of squared elements
CUTLASS_HOST_DEVICE
Element norm(Element accum = Element()) const {
accum += data[0] * data[0];
accum += data[1] * data[1];
accum += data[2] * data[2];
return accum;
}
/// Returns square root of the norm
CUTLASS_HOST_DEVICE
Element magnitude() const {
return fast_sqrt(norm());
}
/// Returns the sum of diagonal elements
CUTLASS_HOST_DEVICE
Element trace(Element accum = Element()) const {
accum += data[0];
return accum;
}
/// Cross product
CUTLASS_HOST_DEVICE
Matrix cross(Matrix const &rhs) const {
return Matrix(
data[1] * rhs.data[2] - data[2] * rhs.data[1],
data[0] * rhs.data[2] - data[2] * rhs.data[1],
data[0] * rhs.data[1] - data[1] * rhs.data[0]
);
}
};
/// Template alias for 1-by-3 matrix
template <typename Element>
using Matrix1x3 = Matrix<Element, 1, 3>;
/// Free funciton to infer element type from template arguments
template <typename Element>
CUTLASS_HOST_DEVICE Matrix1x3<Element> make_Matrix1x3(
Element _0_0, Element _0_1, Element _0_2
) {
return Matrix1x3<Element>(
_0_0, _0_1, _0_2
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// 1-by-4 matrix template class definition
template <typename Element_>
struct Matrix<Element_, 1, 4> {
//
// Type definitions
//
/// Element data type
using Element = Element_;
/// Number of rows in matrix
static int const kRows = 1;
/// Number of columns in matrix
static int const kColumns = 4;
/// Layout of matrix in underlying array
using Layout = layout::RowMajor;
/// Number of elements in matrix
static int const kCount = 4;
//
// Data members
//
/// Elements of the matrix in row-major layout
Array<Element, kCount> data;
//
// Methods
//
/// Constructs a zero matrix
CUTLASS_HOST_DEVICE
Matrix() {
data.clear();
}
/// Copy constructor for a 1-by-4 matrix
CUTLASS_HOST_DEVICE
Matrix(Matrix const &rhs) {
data = rhs.data;
}
/// Constucts a 1-by-4 matrix from scalar elements
CUTLASS_HOST_DEVICE
Matrix(
Element _0_0, Element _0_1, Element _0_2, Element _0_3
) {
data[0] = _0_0; data[1] = _0_1; data[2] = _0_2; data[3] = _0_3;
}
/// Constructs a matrix from a uniform element
CUTLASS_HOST_DEVICE
static Matrix uniform(Element s) {
Matrix m;
m.data[0] = s;
m.data[1] = s;
m.data[2] = s;
m.data[3] = s;
return m;
}
/// Constructs a matrix from a uniform element 1
CUTLASS_HOST_DEVICE
static Matrix ones() {
return uniform(Element(1));
}
/// Constructs a matrix from a uniform element 0
CUTLASS_HOST_DEVICE
static Matrix zero() {
return Matrix();
}
/// Returns a transposed matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 1> transpose() const {
Matrix<Element, 4, 1> mt;
mt.data[0] = data[0];
mt.data[1] = data[1];
mt.data[2] = data[2];
mt.data[3] = data[3];
return mt;
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(int i, int j) const {
return data[i * 1 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(int i, int j) {
return data[i * 1 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element &at(int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element at(int offset) const {
return data[offset];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element operator[](Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & operator[](Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element & operator[](int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element operator[](int offset) const {
return data[offset];
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 2> slice_1x2(int i = 0, int j = 0) const {
Matrix<Element, 1, 2> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x2(Matrix<Element, 1, 2> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 3> slice_1x3(int i = 0, int j = 0) const {
Matrix<Element, 1, 3> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 2];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x3(Matrix<Element, 1, 3> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 2] = m.data[2];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 4> slice_1x4(int i = 0, int j = 0) const {
Matrix<Element, 1, 4> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 2];
m.data[3] = data[i * 4 + j + 3];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x4(Matrix<Element, 1, 4> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 2] = m.data[2];
data[i * 4 + j + 3] = m.data[3];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 4> row(int i) const {
return slice_1x4(i, 0);
}
CUTLASS_HOST_DEVICE
Matrix &set_row(Matrix<Element, 1, 4> const &v, int i = 0) {
return set_slice_1x4(v, i, 0);
}
/// Forms a 1-by-4 matrix by horizontally concatenating an Element with a 1-by-3 matrix
CUTLASS_HOST_DEVICE
static Matrix hcat(Element lhs, Matrix<Element, 1, 3> const & rhs) {
return Matrix(
lhs, rhs.at(0, 0), rhs.at(0, 1), rhs.at(0, 2));
}
/// Forms a 1-by-4 matrix by horizontally concatenating a 1-by-2 matrix with a 1-by-2 matrix
CUTLASS_HOST_DEVICE
static Matrix hcat(Matrix<Element, 1, 2> const & lhs, Matrix<Element, 1, 2> const & rhs) {
return Matrix(
lhs.at(0, 0), lhs.at(0, 1), rhs.at(0, 0), rhs.at(0, 1));
}
/// Forms a 1-by-4 matrix by horizontally concatenating a 1-by-3 matrix with an Element
CUTLASS_HOST_DEVICE
static Matrix hcat(Matrix<Element, 1, 3> const & lhs, Element rhs) {
return Matrix(
lhs.at(0, 0), lhs.at(0, 1), lhs.at(0, 2), rhs);
}
/// Concatenates this matrix with a a 1-by-4 matrix to form a 2-by-4 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 4> vcat(Matrix<Element, 1, 4> const & rhs) const {
return Matrix<Element, 2, 4>::vcat(*this, rhs);
}
/// Concatenates this matrix with a a 2-by-4 matrix to form a 3-by-4 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 4> vcat(Matrix<Element, 2, 4> const & rhs) const {
return Matrix<Element, 3, 4>::vcat(*this, rhs);
}
/// Concatenates this matrix with a a 3-by-4 matrix to form a 4-by-4 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 4> vcat(Matrix<Element, 3, 4> const & rhs) const {
return Matrix<Element, 4, 4>::vcat(*this, rhs);
}
/// Elementwise add operator (1-by-4)
CUTLASS_HOST_DEVICE
Matrix add(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] + rhs.data[0];
result.data[1] = data[1] + rhs.data[1];
result.data[2] = data[2] + rhs.data[2];
result.data[3] = data[3] + rhs.data[3];
return result;
}
/// Elementwise add operator (1-by-4)
CUTLASS_HOST_DEVICE
Matrix operator +(Matrix const &rhs) const {
return add(rhs);
}
/// Elementwise add operator (1-by-4)
CUTLASS_HOST_DEVICE
Matrix & operator +=(Matrix const &rhs) {
data[0] += rhs.data[0];
data[1] += rhs.data[1];
data[2] += rhs.data[2];
data[3] += rhs.data[3];
return *this;
}
/// Elementwise subtract operator (1-by-4)
CUTLASS_HOST_DEVICE
Matrix subtract(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] - rhs.data[0];
result.data[1] = data[1] - rhs.data[1];
result.data[2] = data[2] - rhs.data[2];
result.data[3] = data[3] - rhs.data[3];
return result;
}
/// Elementwise subtract operator (1-by-4)
CUTLASS_HOST_DEVICE
Matrix operator -(Matrix const &rhs) const {
return subtract(rhs);
}
/// Elementwise subtract operator (1-by-4)
CUTLASS_HOST_DEVICE
Matrix & operator -=(Matrix const &rhs) {
data[0] -= rhs.data[0];
data[1] -= rhs.data[1];
data[2] -= rhs.data[2];
data[3] -= rhs.data[3];
return *this;
}
/// Elementwise multiply operator (1-by-4)
CUTLASS_HOST_DEVICE
Matrix multiply(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] * rhs.data[0];
result.data[1] = data[1] * rhs.data[1];
result.data[2] = data[2] * rhs.data[2];
result.data[3] = data[3] * rhs.data[3];
return result;
}
/// Scalar multiply operator (1-by-4)
CUTLASS_HOST_DEVICE
Matrix multiply(Element const &s) const {
Matrix result;
result.data[0] = data[0] * s;
result.data[1] = data[1] * s;
result.data[2] = data[2] * s;
result.data[3] = data[3] * s;
return result;
}
/// Scalar multiply operator (1-by-4)
CUTLASS_HOST_DEVICE
Matrix operator *(Element const &s) const {
return multiply(s);
}
/// Scalar multiply operator (1-by-4)
CUTLASS_HOST_DEVICE
Matrix & operator *=(Element const &s) {
data[0] *= s;
data[1] *= s;
data[2] *= s;
data[3] *= s;
return *this;
}
/// Elementwise divide operator (1-by-4)
CUTLASS_HOST_DEVICE
Matrix divide(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] / rhs.data[0];
result.data[1] = data[1] / rhs.data[1];
result.data[2] = data[2] / rhs.data[2];
result.data[3] = data[3] / rhs.data[3];
return result;
}
/// Scalar divide operator (1-by-4)
CUTLASS_HOST_DEVICE
Matrix divide(Element const &s) const {
Matrix result;
result.data[0] = data[0] / s;
result.data[1] = data[1] / s;
result.data[2] = data[2] / s;
result.data[3] = data[3] / s;
return result;
}
/// Scalar divide operator (1-by-4)
CUTLASS_HOST_DEVICE
Matrix operator /(Element const &s) const {
return divide(s);
}
/// Scalar divide operator (1-by-4)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Element const &s) {
data[0] /= s;
data[1] /= s;
data[2] /= s;
data[3] /= s;
return *this;
}
/// Elementwise divide operator (1-by-4)
CUTLASS_HOST_DEVICE
Matrix operator /(Matrix const &rhs) const {
return divide(rhs);
}
/// Elementwise divide operator (1-by-4)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Matrix const &rhs) {
data[0] /= rhs.data[0];
data[1] /= rhs.data[1];
data[2] /= rhs.data[2];
data[3] /= rhs.data[3];
return *this;
}
/// Negates each element of the matrix
CUTLASS_HOST_DEVICE
Matrix operator-() const {
Matrix m;
m.data[0] = -m.data[0];
m.data[1] = -m.data[1];
m.data[2] = -m.data[2];
m.data[3] = -m.data[3];
return m;
}
/// Matrix product of size 1-by-1-by-4
CUTLASS_HOST_DEVICE
Element product(Matrix<Element, 4, 1> const &rhs, Element accum = Element()) const {
// k=0
accum += data[0] * rhs.data[0];
// k=1
accum += data[1] * rhs.data[1];
// k=2
accum += data[2] * rhs.data[2];
// k=3
accum += data[3] * rhs.data[3];
return accum;
}
/// Matrix product of size 1-by-1-by-4
CUTLASS_HOST_DEVICE
Element operator*(Matrix<Element, 4, 1> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 1-by-2-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 2> product(
Matrix<Element, 4, 2> const &rhs,
Matrix<Element, 1, 2> accum = Matrix<Element, 1, 2>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
// k=1
accum.data[0] += data[1] * rhs.data[2];
accum.data[1] += data[1] * rhs.data[3];
// k=2
accum.data[0] += data[2] * rhs.data[4];
accum.data[1] += data[2] * rhs.data[5];
// k=3
accum.data[0] += data[3] * rhs.data[6];
accum.data[1] += data[3] * rhs.data[7];
return accum;
}
/// Matrix product of size 1-by-2-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 2> operator*(Matrix<Element, 4, 2> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 1-by-3-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 3> product(
Matrix<Element, 4, 3> const &rhs,
Matrix<Element, 1, 3> accum = Matrix<Element, 1, 3>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
// k=1
accum.data[0] += data[1] * rhs.data[3];
accum.data[1] += data[1] * rhs.data[4];
accum.data[2] += data[1] * rhs.data[5];
// k=2
accum.data[0] += data[2] * rhs.data[6];
accum.data[1] += data[2] * rhs.data[7];
accum.data[2] += data[2] * rhs.data[8];
// k=3
accum.data[0] += data[3] * rhs.data[9];
accum.data[1] += data[3] * rhs.data[10];
accum.data[2] += data[3] * rhs.data[11];
return accum;
}
/// Matrix product of size 1-by-3-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 3> operator*(Matrix<Element, 4, 3> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 1-by-4-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 4> product(
Matrix<Element, 4, 4> const &rhs,
Matrix<Element, 1, 4> accum = Matrix<Element, 1, 4>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[0] * rhs.data[3];
// k=1
accum.data[0] += data[1] * rhs.data[4];
accum.data[1] += data[1] * rhs.data[5];
accum.data[2] += data[1] * rhs.data[6];
accum.data[3] += data[1] * rhs.data[7];
// k=2
accum.data[0] += data[2] * rhs.data[8];
accum.data[1] += data[2] * rhs.data[9];
accum.data[2] += data[2] * rhs.data[10];
accum.data[3] += data[2] * rhs.data[11];
// k=3
accum.data[0] += data[3] * rhs.data[12];
accum.data[1] += data[3] * rhs.data[13];
accum.data[2] += data[3] * rhs.data[14];
accum.data[3] += data[3] * rhs.data[15];
return accum;
}
/// Matrix product of size 1-by-4-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 4> operator*(Matrix<Element, 4, 4> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 1-by-4-by-4
CUTLASS_HOST_DEVICE
Matrix & operator*=(Matrix<Element, 4, 4> const &rhs) {
*this = product(rhs);
return *this;
}
/// Dot product of vectors with extent 4
CUTLASS_HOST_DEVICE
Element dot(Matrix<Element, 4, 1> const &rhs, Element accum = Element()) const {
accum += data[0] * rhs.data[0];
accum += data[1] * rhs.data[1];
accum += data[2] * rhs.data[2];
accum += data[3] * rhs.data[3];
return accum;
}
/// Dot product of vectors with extent 4
CUTLASS_HOST_DEVICE
Element dot(Matrix<Element, 1, 4> const &rhs, Element accum = Element()) const {
accum += data[0] * rhs.data[0];
accum += data[1] * rhs.data[1];
accum += data[2] * rhs.data[2];
accum += data[3] * rhs.data[3];
return accum;
}
/// Returns the sum of elements
CUTLASS_HOST_DEVICE
Element sum(Element accum = Element()) const {
accum += data[0];
accum += data[1];
accum += data[2];
accum += data[3];
return accum;
}
/// Returns the sum of squared elements
CUTLASS_HOST_DEVICE
Element norm(Element accum = Element()) const {
accum += data[0] * data[0];
accum += data[1] * data[1];
accum += data[2] * data[2];
accum += data[3] * data[3];
return accum;
}
/// Returns square root of the norm
CUTLASS_HOST_DEVICE
Element magnitude() const {
return fast_sqrt(norm());
}
/// Returns the sum of diagonal elements
CUTLASS_HOST_DEVICE
Element trace(Element accum = Element()) const {
accum += data[0];
return accum;
}
};
/// Template alias for 1-by-4 matrix
template <typename Element>
using Matrix1x4 = Matrix<Element, 1, 4>;
/// Free funciton to infer element type from template arguments
template <typename Element>
CUTLASS_HOST_DEVICE Matrix1x4<Element> make_Matrix1x4(
Element _0_0, Element _0_1, Element _0_2, Element _0_3
) {
return Matrix1x4<Element>(
_0_0, _0_1, _0_2, _0_3
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// 2-by-1 matrix template class definition
template <typename Element_>
struct Matrix<Element_, 2, 1> {
//
// Type definitions
//
/// Element data type
using Element = Element_;
/// Number of rows in matrix
static int const kRows = 2;
/// Number of columns in matrix
static int const kColumns = 1;
/// Layout of matrix in underlying array
using Layout = layout::RowMajor;
/// Number of elements in matrix
static int const kCount = 2;
//
// Data members
//
/// Elements of the matrix in row-major layout
Array<Element, kCount> data;
//
// Methods
//
/// Constructs a zero matrix
CUTLASS_HOST_DEVICE
Matrix() {
data.clear();
}
/// Copy constructor for a 2-by-1 matrix
CUTLASS_HOST_DEVICE
Matrix(Matrix const &rhs) {
data = rhs.data;
}
/// Constucts a 2-by-1 matrix from scalar elements
CUTLASS_HOST_DEVICE
Matrix(
Element _0_0,
Element _1_0
) {
data[0] = _0_0;
data[1] = _1_0;
}
/// Constructs a matrix from a uniform element
CUTLASS_HOST_DEVICE
static Matrix uniform(Element s) {
Matrix m;
m.data[0] = s;
m.data[1] = s;
return m;
}
/// Constructs a matrix from a uniform element 1
CUTLASS_HOST_DEVICE
static Matrix ones() {
return uniform(Element(1));
}
/// Constructs a matrix from a uniform element 0
CUTLASS_HOST_DEVICE
static Matrix zero() {
return Matrix();
}
/// Returns a transposed matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 2> transpose() const {
Matrix<Element, 1, 2> mt;
mt.data[0] = data[0];
mt.data[1] = data[1];
return mt;
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(int i, int j) const {
return data[i * 2 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(int i, int j) {
return data[i * 2 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element &at(int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element at(int offset) const {
return data[offset];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element operator[](Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & operator[](Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element & operator[](int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element operator[](int offset) const {
return data[offset];
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> slice_2x1(int i = 0, int j = 0) const {
Matrix<Element, 2, 1> m;
m.data[0] = data[i * 1 + j + 0];
m.data[1] = data[i * 1 + j + 1];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x1(Matrix<Element, 2, 1> const &m, int i = 0, int j = 0) {
data[i * 1 + j + 0] = m.data[0];
data[i * 1 + j + 1] = m.data[1];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> column(int j) const {
return slice_2x1(0, j);
}
CUTLASS_HOST_DEVICE
Matrix &set_column(Matrix<Element, 2, 1> const &v, int j =0) {
return set_slice_2x1(v, 0, j);
}
/// Concatenates this matrix with a a 2-by-1 matrix to form a 2-by-2 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 2> hcat(Matrix<Element, 2, 1> const & rhs) const {
return Matrix<Element, 2, 2>::hcat(*this, rhs);
}
/// Concatenates this matrix with a a 2-by-2 matrix to form a 2-by-3 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 3> hcat(Matrix<Element, 2, 2> const & rhs) const {
return Matrix<Element, 2, 3>::hcat(*this, rhs);
}
/// Concatenates this matrix with a a 2-by-3 matrix to form a 2-by-4 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 4> hcat(Matrix<Element, 2, 3> const & rhs) const {
return Matrix<Element, 2, 4>::hcat(*this, rhs);
}
/// Forms a 2-by-1 matrix by vertically concatenating an Element with an Element
CUTLASS_HOST_DEVICE
static Matrix vcat(Element upper, Element lower) {
return Matrix(
upper
, lower);
}
/// Concatenates this matrix with a an Element to form a 3-by-1 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> vcat(Element rhs) const {
return Matrix<Element, 3, 1>::vcat(*this, rhs);
}
/// Concatenates this matrix with a a 2-by-1 matrix to form a 4-by-1 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 1> vcat(Matrix<Element, 2, 1> const & rhs) const {
return Matrix<Element, 4, 1>::vcat(*this, rhs);
}
/// Elementwise add operator (2-by-1)
CUTLASS_HOST_DEVICE
Matrix add(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] + rhs.data[0];
result.data[1] = data[1] + rhs.data[1];
return result;
}
/// Elementwise add operator (2-by-1)
CUTLASS_HOST_DEVICE
Matrix operator +(Matrix const &rhs) const {
return add(rhs);
}
/// Elementwise add operator (2-by-1)
CUTLASS_HOST_DEVICE
Matrix & operator +=(Matrix const &rhs) {
data[0] += rhs.data[0];
data[1] += rhs.data[1];
return *this;
}
/// Elementwise subtract operator (2-by-1)
CUTLASS_HOST_DEVICE
Matrix subtract(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] - rhs.data[0];
result.data[1] = data[1] - rhs.data[1];
return result;
}
/// Elementwise subtract operator (2-by-1)
CUTLASS_HOST_DEVICE
Matrix operator -(Matrix const &rhs) const {
return subtract(rhs);
}
/// Elementwise subtract operator (2-by-1)
CUTLASS_HOST_DEVICE
Matrix & operator -=(Matrix const &rhs) {
data[0] -= rhs.data[0];
data[1] -= rhs.data[1];
return *this;
}
/// Elementwise multiply operator (2-by-1)
CUTLASS_HOST_DEVICE
Matrix multiply(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] * rhs.data[0];
result.data[1] = data[1] * rhs.data[1];
return result;
}
/// Scalar multiply operator (2-by-1)
CUTLASS_HOST_DEVICE
Matrix multiply(Element const &s) const {
Matrix result;
result.data[0] = data[0] * s;
result.data[1] = data[1] * s;
return result;
}
/// Scalar multiply operator (2-by-1)
CUTLASS_HOST_DEVICE
Matrix operator *(Element const &s) const {
return multiply(s);
}
/// Scalar multiply operator (2-by-1)
CUTLASS_HOST_DEVICE
Matrix & operator *=(Element const &s) {
data[0] *= s;
data[1] *= s;
return *this;
}
/// Elementwise divide operator (2-by-1)
CUTLASS_HOST_DEVICE
Matrix divide(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] / rhs.data[0];
result.data[1] = data[1] / rhs.data[1];
return result;
}
/// Scalar divide operator (2-by-1)
CUTLASS_HOST_DEVICE
Matrix divide(Element const &s) const {
Matrix result;
result.data[0] = data[0] / s;
result.data[1] = data[1] / s;
return result;
}
/// Scalar divide operator (2-by-1)
CUTLASS_HOST_DEVICE
Matrix operator /(Element const &s) const {
return divide(s);
}
/// Scalar divide operator (2-by-1)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Element const &s) {
data[0] /= s;
data[1] /= s;
return *this;
}
/// Elementwise divide operator (2-by-1)
CUTLASS_HOST_DEVICE
Matrix operator /(Matrix const &rhs) const {
return divide(rhs);
}
/// Elementwise divide operator (2-by-1)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Matrix const &rhs) {
data[0] /= rhs.data[0];
data[1] /= rhs.data[1];
return *this;
}
/// Negates each element of the matrix
CUTLASS_HOST_DEVICE
Matrix operator-() const {
Matrix m;
m.data[0] = -m.data[0];
m.data[1] = -m.data[1];
return m;
}
/// Matrix product of size 2-by-1-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> product(
Matrix<Element, 1, 1> const &rhs,
Matrix<Element, 2, 1> accum = Matrix<Element, 2, 1>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[1] * rhs.data[0];
return accum;
}
/// Matrix product of size 2-by-1-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> operator*(Matrix<Element, 1, 1> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 2-by-1-by-1
CUTLASS_HOST_DEVICE
Matrix & operator*=(Matrix<Element, 1, 1> const &rhs) {
*this = product(rhs);
return *this;
}
/// Matrix product of size 2-by-2-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 2> product(
Matrix<Element, 1, 2> const &rhs,
Matrix<Element, 2, 2> accum = Matrix<Element, 2, 2>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[1] * rhs.data[0];
accum.data[3] += data[1] * rhs.data[1];
return accum;
}
/// Matrix product of size 2-by-2-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 2> operator*(Matrix<Element, 1, 2> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 2-by-3-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 3> product(
Matrix<Element, 1, 3> const &rhs,
Matrix<Element, 2, 3> accum = Matrix<Element, 2, 3>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[1] * rhs.data[0];
accum.data[4] += data[1] * rhs.data[1];
accum.data[5] += data[1] * rhs.data[2];
return accum;
}
/// Matrix product of size 2-by-3-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 3> operator*(Matrix<Element, 1, 3> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 2-by-4-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 4> product(
Matrix<Element, 1, 4> const &rhs,
Matrix<Element, 2, 4> accum = Matrix<Element, 2, 4>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[0] * rhs.data[3];
accum.data[4] += data[1] * rhs.data[0];
accum.data[5] += data[1] * rhs.data[1];
accum.data[6] += data[1] * rhs.data[2];
accum.data[7] += data[1] * rhs.data[3];
return accum;
}
/// Matrix product of size 2-by-4-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 4> operator*(Matrix<Element, 1, 4> const &rhs) const {
return product(rhs);
}
/// Dot product of vectors with extent 2
CUTLASS_HOST_DEVICE
Element dot(Matrix<Element, 2, 1> const &rhs, Element accum = Element()) const {
accum += data[0] * rhs.data[0];
accum += data[1] * rhs.data[1];
return accum;
}
/// Dot product of vectors with extent 2
CUTLASS_HOST_DEVICE
Element dot(Matrix<Element, 1, 2> const &rhs, Element accum = Element()) const {
accum += data[0] * rhs.data[0];
accum += data[1] * rhs.data[1];
return accum;
}
/// Returns the sum of elements
CUTLASS_HOST_DEVICE
Element sum(Element accum = Element()) const {
accum += data[0];
accum += data[1];
return accum;
}
/// Returns the sum of squared elements
CUTLASS_HOST_DEVICE
Element norm(Element accum = Element()) const {
accum += data[0] * data[0];
accum += data[1] * data[1];
return accum;
}
/// Returns square root of the norm
CUTLASS_HOST_DEVICE
Element magnitude() const {
return fast_sqrt(norm());
}
/// Returns the sum of diagonal elements
CUTLASS_HOST_DEVICE
Element trace(Element accum = Element()) const {
accum += data[0];
return accum;
}
};
/// Template alias for 2-by-1 matrix
template <typename Element>
using Matrix2x1 = Matrix<Element, 2, 1>;
/// Free funciton to infer element type from template arguments
template <typename Element>
CUTLASS_HOST_DEVICE Matrix2x1<Element> make_Matrix2x1(
Element _0_0,
Element _1_0
) {
return Matrix2x1<Element>(
_0_0,
_1_0
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// 2-by-2 matrix template class definition
template <typename Element_>
struct Matrix<Element_, 2, 2> {
//
// Type definitions
//
/// Element data type
using Element = Element_;
/// Number of rows in matrix
static int const kRows = 2;
/// Number of columns in matrix
static int const kColumns = 2;
/// Layout of matrix in underlying array
using Layout = layout::RowMajor;
/// Number of elements in matrix
static int const kCount = 4;
//
// Data members
//
/// Elements of the matrix in row-major layout
Array<Element, kCount> data;
//
// Methods
//
/// Constructs a zero matrix
CUTLASS_HOST_DEVICE
Matrix() {
data.clear();
}
/// Copy constructor for a 2-by-2 matrix
CUTLASS_HOST_DEVICE
Matrix(Matrix const &rhs) {
data = rhs.data;
}
/// Constucts a 2-by-2 matrix from scalar elements
CUTLASS_HOST_DEVICE
Matrix(
Element _0_0, Element _0_1,
Element _1_0, Element _1_1
) {
data[0] = _0_0; data[1] = _0_1;
data[2] = _1_0; data[3] = _1_1;
}
/// Constucts a 2-by-2 matrix from row vectors
CUTLASS_HOST_DEVICE
Matrix(
Matrix<Element, 1, 2> const &row_0,
Matrix<Element, 1, 2> const &row_1
) {
data[0] = row_0.data[0];
data[1] = row_0.data[1];
data[2] = row_1.data[0];
data[3] = row_1.data[1];
}
/// Static method to construct a 2-by-2 matrix from column vectors
CUTLASS_HOST_DEVICE
static Matrix from_columns(
Matrix<Element, 2, 1> const &column_0,
Matrix<Element, 2, 1> const &column_1
) {
Matrix result;
result.data[0] = column_0.data[0];
result.data[1] = column_1.data[0];
result.data[2] = column_0.data[1];
result.data[3] = column_1.data[1];
return result;
}
/// Constructs an identity matrix
CUTLASS_HOST_DEVICE
static Matrix identity() {
Matrix m;
m.data[0] = Element(1);
m.data[3] = Element(1);
return m;
}
/// Constructs a matrix from a uniform element
CUTLASS_HOST_DEVICE
static Matrix uniform(Element s) {
Matrix m;
m.data[0] = s;
m.data[1] = s;
m.data[2] = s;
m.data[3] = s;
return m;
}
/// Constructs a matrix from a uniform element 1
CUTLASS_HOST_DEVICE
static Matrix ones() {
return uniform(Element(1));
}
/// Constructs a matrix from a uniform element 0
CUTLASS_HOST_DEVICE
static Matrix zero() {
return Matrix();
}
/// Constructs a matrix from elements along its diagonal
CUTLASS_HOST_DEVICE
static Matrix from_diagonal(Matrix<Element, 2, 1> const &diag) {
Matrix m;
m.data[0] = diag.data[0];
m.data[3] = diag.data[1];
return m;
}
/// Constructs a matrix from elements along its diagonal
CUTLASS_HOST_DEVICE
static Matrix from_diagonal(Matrix<Element, 1, 2> const &diag) {
Matrix m;
m.data[0] = diag.data[0];
m.data[3] = diag.data[1];
return m;
}
/// Gets an array of diagonal elements
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> diagonal() const {
Matrix<Element, 2, 1> diag;
diag.data[0] = data[0];
diag.data[1] = data[3];
return diag;
}
/// Returns a transposed matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 2> transpose() const {
Matrix<Element, 2, 2> mt;
mt.data[0] = data[0];
mt.data[2] = data[1];
mt.data[1] = data[2];
mt.data[3] = data[3];
return mt;
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(int i, int j) const {
return data[i * 2 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(int i, int j) {
return data[i * 2 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element &at(int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element at(int offset) const {
return data[offset];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element operator[](Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & operator[](Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element & operator[](int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element operator[](int offset) const {
return data[offset];
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 2> slice_1x2(int i = 0, int j = 0) const {
Matrix<Element, 1, 2> m;
m.data[0] = data[i * 2 + j + 0];
m.data[1] = data[i * 2 + j + 1];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x2(Matrix<Element, 1, 2> const &m, int i = 0, int j = 0) {
data[i * 2 + j + 0] = m.data[0];
data[i * 2 + j + 1] = m.data[1];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 2> row(int i) const {
return slice_1x2(i, 0);
}
CUTLASS_HOST_DEVICE
Matrix &set_row(Matrix<Element, 1, 2> const &v, int i = 0) {
return set_slice_1x2(v, i, 0);
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> slice_2x1(int i = 0, int j = 0) const {
Matrix<Element, 2, 1> m;
m.data[0] = data[i * 2 + j + 0];
m.data[1] = data[i * 2 + j + 2];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x1(Matrix<Element, 2, 1> const &m, int i = 0, int j = 0) {
data[i * 2 + j + 0] = m.data[0];
data[i * 2 + j + 2] = m.data[1];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> column(int j) const {
return slice_2x1(0, j);
}
CUTLASS_HOST_DEVICE
Matrix &set_column(Matrix<Element, 2, 1> const &v, int j =0) {
return set_slice_2x1(v, 0, j);
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 2> slice_2x2(int i = 0, int j = 0) const {
Matrix<Element, 2, 2> m;
m.data[0] = data[i * 2 + j + 0];
m.data[1] = data[i * 2 + j + 1];
m.data[2] = data[i * 2 + j + 2];
m.data[3] = data[i * 2 + j + 3];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x2(Matrix<Element, 2, 2> const &m, int i = 0, int j = 0) {
data[i * 2 + j + 0] = m.data[0];
data[i * 2 + j + 1] = m.data[1];
data[i * 2 + j + 2] = m.data[2];
data[i * 2 + j + 3] = m.data[3];
return *this;
}
/// Forms a 2-by-2 matrix by horizontally concatenating a 2-by-1 matrix with a 2-by-1 matrix
CUTLASS_HOST_DEVICE
static Matrix hcat(Matrix<Element, 2, 1> const & lhs, Matrix<Element, 2, 1> const & rhs) {
return Matrix(
lhs.at(0, 0), rhs.at(0, 0)
, lhs.at(1, 0), rhs.at(1, 0));
}
/// Concatenates this matrix with a a 2-by-1 matrix to form a 2-by-3 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 3> hcat(Matrix<Element, 2, 1> const & rhs) const {
return Matrix<Element, 2, 3>::hcat(*this, rhs);
}
/// Concatenates this matrix with a a 2-by-2 matrix to form a 2-by-4 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 4> hcat(Matrix<Element, 2, 2> const & rhs) const {
return Matrix<Element, 2, 4>::hcat(*this, rhs);
}
/// Forms a 2-by-2 matrix by vertically concatenating a 1-by-2 matrix with a 1-by-2 matrix
CUTLASS_HOST_DEVICE
static Matrix vcat(Matrix<Element, 1, 2> const & upper, Matrix<Element, 1, 2> const & lower) {
return Matrix(
upper.at(0, 0), upper.at(0, 1)
, lower.at(0, 0), lower.at(0, 1));
}
/// Concatenates this matrix with a a 1-by-2 matrix to form a 3-by-2 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 2> vcat(Matrix<Element, 1, 2> const & rhs) const {
return Matrix<Element, 3, 2>::vcat(*this, rhs);
}
/// Concatenates this matrix with a a 2-by-2 matrix to form a 4-by-2 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 2> vcat(Matrix<Element, 2, 2> const & rhs) const {
return Matrix<Element, 4, 2>::vcat(*this, rhs);
}
/// Forms a 2-by-2 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Element A, Element B,
Element C, Element D) {
return Matrix(
A, B
, C, D
);
}
/// Elementwise add operator (2-by-2)
CUTLASS_HOST_DEVICE
Matrix add(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] + rhs.data[0];
result.data[1] = data[1] + rhs.data[1];
result.data[2] = data[2] + rhs.data[2];
result.data[3] = data[3] + rhs.data[3];
return result;
}
/// Elementwise add operator (2-by-2)
CUTLASS_HOST_DEVICE
Matrix operator +(Matrix const &rhs) const {
return add(rhs);
}
/// Elementwise add operator (2-by-2)
CUTLASS_HOST_DEVICE
Matrix & operator +=(Matrix const &rhs) {
data[0] += rhs.data[0];
data[1] += rhs.data[1];
data[2] += rhs.data[2];
data[3] += rhs.data[3];
return *this;
}
/// Elementwise subtract operator (2-by-2)
CUTLASS_HOST_DEVICE
Matrix subtract(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] - rhs.data[0];
result.data[1] = data[1] - rhs.data[1];
result.data[2] = data[2] - rhs.data[2];
result.data[3] = data[3] - rhs.data[3];
return result;
}
/// Elementwise subtract operator (2-by-2)
CUTLASS_HOST_DEVICE
Matrix operator -(Matrix const &rhs) const {
return subtract(rhs);
}
/// Elementwise subtract operator (2-by-2)
CUTLASS_HOST_DEVICE
Matrix & operator -=(Matrix const &rhs) {
data[0] -= rhs.data[0];
data[1] -= rhs.data[1];
data[2] -= rhs.data[2];
data[3] -= rhs.data[3];
return *this;
}
/// Elementwise multiply operator (2-by-2)
CUTLASS_HOST_DEVICE
Matrix multiply(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] * rhs.data[0];
result.data[1] = data[1] * rhs.data[1];
result.data[2] = data[2] * rhs.data[2];
result.data[3] = data[3] * rhs.data[3];
return result;
}
/// Scalar multiply operator (2-by-2)
CUTLASS_HOST_DEVICE
Matrix multiply(Element const &s) const {
Matrix result;
result.data[0] = data[0] * s;
result.data[1] = data[1] * s;
result.data[2] = data[2] * s;
result.data[3] = data[3] * s;
return result;
}
/// Scalar multiply operator (2-by-2)
CUTLASS_HOST_DEVICE
Matrix operator *(Element const &s) const {
return multiply(s);
}
/// Scalar multiply operator (2-by-2)
CUTLASS_HOST_DEVICE
Matrix & operator *=(Element const &s) {
data[0] *= s;
data[1] *= s;
data[2] *= s;
data[3] *= s;
return *this;
}
/// Elementwise divide operator (2-by-2)
CUTLASS_HOST_DEVICE
Matrix divide(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] / rhs.data[0];
result.data[1] = data[1] / rhs.data[1];
result.data[2] = data[2] / rhs.data[2];
result.data[3] = data[3] / rhs.data[3];
return result;
}
/// Scalar divide operator (2-by-2)
CUTLASS_HOST_DEVICE
Matrix divide(Element const &s) const {
Matrix result;
result.data[0] = data[0] / s;
result.data[1] = data[1] / s;
result.data[2] = data[2] / s;
result.data[3] = data[3] / s;
return result;
}
/// Scalar divide operator (2-by-2)
CUTLASS_HOST_DEVICE
Matrix operator /(Element const &s) const {
return divide(s);
}
/// Scalar divide operator (2-by-2)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Element const &s) {
data[0] /= s;
data[1] /= s;
data[2] /= s;
data[3] /= s;
return *this;
}
/// Elementwise divide operator (2-by-2)
CUTLASS_HOST_DEVICE
Matrix operator /(Matrix const &rhs) const {
return divide(rhs);
}
/// Elementwise divide operator (2-by-2)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Matrix const &rhs) {
data[0] /= rhs.data[0];
data[1] /= rhs.data[1];
data[2] /= rhs.data[2];
data[3] /= rhs.data[3];
return *this;
}
/// Negates each element of the matrix
CUTLASS_HOST_DEVICE
Matrix operator-() const {
Matrix m;
m.data[0] = -m.data[0];
m.data[1] = -m.data[1];
m.data[2] = -m.data[2];
m.data[3] = -m.data[3];
return m;
}
/// Matrix product of size 2-by-1-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> product(
Matrix<Element, 2, 1> const &rhs,
Matrix<Element, 2, 1> accum = Matrix<Element, 2, 1>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[2] * rhs.data[0];
// k=1
accum.data[0] += data[1] * rhs.data[1];
accum.data[1] += data[3] * rhs.data[1];
return accum;
}
/// Matrix product of size 2-by-1-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> operator*(Matrix<Element, 2, 1> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 2-by-2-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 2> product(
Matrix<Element, 2, 2> const &rhs,
Matrix<Element, 2, 2> accum = Matrix<Element, 2, 2>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[2] * rhs.data[0];
accum.data[3] += data[2] * rhs.data[1];
// k=1
accum.data[0] += data[1] * rhs.data[2];
accum.data[1] += data[1] * rhs.data[3];
accum.data[2] += data[3] * rhs.data[2];
accum.data[3] += data[3] * rhs.data[3];
return accum;
}
/// Matrix product of size 2-by-2-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 2> operator*(Matrix<Element, 2, 2> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 2-by-2-by-2
CUTLASS_HOST_DEVICE
Matrix & operator*=(Matrix<Element, 2, 2> const &rhs) {
*this = product(rhs);
return *this;
}
/// Matrix product of size 2-by-3-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 3> product(
Matrix<Element, 2, 3> const &rhs,
Matrix<Element, 2, 3> accum = Matrix<Element, 2, 3>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[2] * rhs.data[0];
accum.data[4] += data[2] * rhs.data[1];
accum.data[5] += data[2] * rhs.data[2];
// k=1
accum.data[0] += data[1] * rhs.data[3];
accum.data[1] += data[1] * rhs.data[4];
accum.data[2] += data[1] * rhs.data[5];
accum.data[3] += data[3] * rhs.data[3];
accum.data[4] += data[3] * rhs.data[4];
accum.data[5] += data[3] * rhs.data[5];
return accum;
}
/// Matrix product of size 2-by-3-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 3> operator*(Matrix<Element, 2, 3> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 2-by-4-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 4> product(
Matrix<Element, 2, 4> const &rhs,
Matrix<Element, 2, 4> accum = Matrix<Element, 2, 4>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[0] * rhs.data[3];
accum.data[4] += data[2] * rhs.data[0];
accum.data[5] += data[2] * rhs.data[1];
accum.data[6] += data[2] * rhs.data[2];
accum.data[7] += data[2] * rhs.data[3];
// k=1
accum.data[0] += data[1] * rhs.data[4];
accum.data[1] += data[1] * rhs.data[5];
accum.data[2] += data[1] * rhs.data[6];
accum.data[3] += data[1] * rhs.data[7];
accum.data[4] += data[3] * rhs.data[4];
accum.data[5] += data[3] * rhs.data[5];
accum.data[6] += data[3] * rhs.data[6];
accum.data[7] += data[3] * rhs.data[7];
return accum;
}
/// Matrix product of size 2-by-4-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 4> operator*(Matrix<Element, 2, 4> const &rhs) const {
return product(rhs);
}
/// Returns the sum of elements
CUTLASS_HOST_DEVICE
Element sum(Element accum = Element()) const {
accum += data[0];
accum += data[1];
accum += data[2];
accum += data[3];
return accum;
}
/// Returns the sum of squared elements
CUTLASS_HOST_DEVICE
Element norm(Element accum = Element()) const {
accum += data[0] * data[0];
accum += data[1] * data[1];
accum += data[2] * data[2];
accum += data[3] * data[3];
return accum;
}
/// Returns square root of the norm
CUTLASS_HOST_DEVICE
Element magnitude() const {
return fast_sqrt(norm());
}
/// Returns the sum of diagonal elements
CUTLASS_HOST_DEVICE
Element trace(Element accum = Element()) const {
accum += data[0];
accum += data[3];
return accum;
}
/// Returns 2-by-2 rotation matrix
CUTLASS_HOST_DEVICE
static Matrix rotation(Element theta) {
Element c = fast_cos(theta);
Element s = fast_sin(theta);
return Matrix(
c, -s,
s, c
);
}
/// Computes the determinant of a 2-by-2 matrix
CUTLASS_HOST_DEVICE
Element determinant(Element accum = Element()) const {
accum += data[0] * data[3] - data[1] * data[2];
return accum;
}
/// Computes the inverse of a 2-by-2 matrix given
/// the matrix's determinant
CUTLASS_HOST_DEVICE
Matrix inverse(Element det) const {
return Matrix(
data[3], -data[1],
-data[2], data[0]
) * (Element(1) / det);
}
/// Computes the inverse of a 2-by-2 matrix.
CUTLASS_HOST_DEVICE
Matrix inverse() const {
return inverse(determinant());
}
};
/// Template alias for 2-by-2 matrix
template <typename Element>
using Matrix2x2 = Matrix<Element, 2, 2>;
/// Free funciton to infer element type from template arguments
template <typename Element>
CUTLASS_HOST_DEVICE Matrix2x2<Element> make_Matrix2x2(
Element _0_0, Element _0_1,
Element _1_0, Element _1_1
) {
return Matrix2x2<Element>(
_0_0, _0_1,
_1_0, _1_1
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// 2-by-3 matrix template class definition
template <typename Element_>
struct Matrix<Element_, 2, 3> {
//
// Type definitions
//
/// Element data type
using Element = Element_;
/// Number of rows in matrix
static int const kRows = 2;
/// Number of columns in matrix
static int const kColumns = 3;
/// Layout of matrix in underlying array
using Layout = layout::RowMajor;
/// Number of elements in matrix
static int const kCount = 6;
//
// Data members
//
/// Elements of the matrix in row-major layout
Array<Element, kCount> data;
//
// Methods
//
/// Constructs a zero matrix
CUTLASS_HOST_DEVICE
Matrix() {
data.clear();
}
/// Copy constructor for a 2-by-3 matrix
CUTLASS_HOST_DEVICE
Matrix(Matrix const &rhs) {
data = rhs.data;
}
/// Constucts a 2-by-3 matrix from scalar elements
CUTLASS_HOST_DEVICE
Matrix(
Element _0_0, Element _0_1, Element _0_2,
Element _1_0, Element _1_1, Element _1_2
) {
data[0] = _0_0; data[1] = _0_1; data[2] = _0_2;
data[3] = _1_0; data[4] = _1_1; data[5] = _1_2;
}
/// Constucts a 2-by-3 matrix from row vectors
CUTLASS_HOST_DEVICE
Matrix(
Matrix<Element, 1, 3> const &row_0,
Matrix<Element, 1, 3> const &row_1
) {
data[0] = row_0.data[0];
data[1] = row_0.data[1];
data[2] = row_0.data[2];
data[3] = row_1.data[0];
data[4] = row_1.data[1];
data[5] = row_1.data[2];
}
/// Static method to construct a 2-by-3 matrix from column vectors
CUTLASS_HOST_DEVICE
static Matrix from_columns(
Matrix<Element, 3, 1> const &column_0,
Matrix<Element, 3, 1> const &column_1,
Matrix<Element, 3, 1> const &column_2
) {
Matrix result;
result.data[0] = column_0.data[0];
result.data[1] = column_1.data[0];
result.data[2] = column_2.data[0];
result.data[3] = column_0.data[1];
result.data[4] = column_1.data[1];
result.data[5] = column_2.data[1];
return result;
}
/// Constructs a matrix from a uniform element
CUTLASS_HOST_DEVICE
static Matrix uniform(Element s) {
Matrix m;
m.data[0] = s;
m.data[1] = s;
m.data[2] = s;
m.data[3] = s;
m.data[4] = s;
m.data[5] = s;
return m;
}
/// Constructs a matrix from a uniform element 1
CUTLASS_HOST_DEVICE
static Matrix ones() {
return uniform(Element(1));
}
/// Constructs a matrix from a uniform element 0
CUTLASS_HOST_DEVICE
static Matrix zero() {
return Matrix();
}
/// Constructs a matrix from elements along its diagonal
CUTLASS_HOST_DEVICE
static Matrix from_diagonal(Matrix<Element, 2, 1> const &diag) {
Matrix m;
m.data[0] = diag.data[0];
m.data[3] = diag.data[1];
return m;
}
/// Constructs a matrix from elements along its diagonal
CUTLASS_HOST_DEVICE
static Matrix from_diagonal(Matrix<Element, 1, 2> const &diag) {
Matrix m;
m.data[0] = diag.data[0];
m.data[3] = diag.data[1];
return m;
}
/// Gets an array of diagonal elements
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> diagonal() const {
Matrix<Element, 2, 1> diag;
diag.data[0] = data[0];
diag.data[1] = data[3];
return diag;
}
/// Returns a transposed matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 2> transpose() const {
Matrix<Element, 3, 2> mt;
mt.data[0] = data[0];
mt.data[2] = data[1];
mt.data[4] = data[2];
mt.data[1] = data[3];
mt.data[3] = data[4];
mt.data[5] = data[5];
return mt;
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(int i, int j) const {
return data[i * 2 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(int i, int j) {
return data[i * 2 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element &at(int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element at(int offset) const {
return data[offset];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element operator[](Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & operator[](Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element & operator[](int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element operator[](int offset) const {
return data[offset];
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 2> slice_1x2(int i = 0, int j = 0) const {
Matrix<Element, 1, 2> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 1];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x2(Matrix<Element, 1, 2> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 1] = m.data[1];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 3> slice_1x3(int i = 0, int j = 0) const {
Matrix<Element, 1, 3> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 1];
m.data[2] = data[i * 3 + j + 2];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x3(Matrix<Element, 1, 3> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 1] = m.data[1];
data[i * 3 + j + 2] = m.data[2];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 3> row(int i) const {
return slice_1x3(i, 0);
}
CUTLASS_HOST_DEVICE
Matrix &set_row(Matrix<Element, 1, 3> const &v, int i = 0) {
return set_slice_1x3(v, i, 0);
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> slice_2x1(int i = 0, int j = 0) const {
Matrix<Element, 2, 1> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 3];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x1(Matrix<Element, 2, 1> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 3] = m.data[1];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> column(int j) const {
return slice_2x1(0, j);
}
CUTLASS_HOST_DEVICE
Matrix &set_column(Matrix<Element, 2, 1> const &v, int j =0) {
return set_slice_2x1(v, 0, j);
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 2> slice_2x2(int i = 0, int j = 0) const {
Matrix<Element, 2, 2> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 1];
m.data[2] = data[i * 3 + j + 3];
m.data[3] = data[i * 3 + j + 4];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x2(Matrix<Element, 2, 2> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 1] = m.data[1];
data[i * 3 + j + 3] = m.data[2];
data[i * 3 + j + 4] = m.data[3];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 3> slice_2x3(int i = 0, int j = 0) const {
Matrix<Element, 2, 3> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 1];
m.data[2] = data[i * 3 + j + 2];
m.data[3] = data[i * 3 + j + 3];
m.data[4] = data[i * 3 + j + 4];
m.data[5] = data[i * 3 + j + 5];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x3(Matrix<Element, 2, 3> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 1] = m.data[1];
data[i * 3 + j + 2] = m.data[2];
data[i * 3 + j + 3] = m.data[3];
data[i * 3 + j + 4] = m.data[4];
data[i * 3 + j + 5] = m.data[5];
return *this;
}
/// Forms a 2-by-3 matrix by horizontally concatenating a 2-by-1 matrix with a 2-by-2 matrix
CUTLASS_HOST_DEVICE
static Matrix hcat(Matrix<Element, 2, 1> const & lhs, Matrix<Element, 2, 2> const & rhs) {
return Matrix(
lhs.at(0, 0), rhs.at(0, 0), rhs.at(0, 1)
, lhs.at(1, 0), rhs.at(1, 0), rhs.at(1, 1));
}
/// Forms a 2-by-3 matrix by horizontally concatenating a 2-by-2 matrix with a 2-by-1 matrix
CUTLASS_HOST_DEVICE
static Matrix hcat(Matrix<Element, 2, 2> const & lhs, Matrix<Element, 2, 1> const & rhs) {
return Matrix(
lhs.at(0, 0), lhs.at(0, 1), rhs.at(0, 0)
, lhs.at(1, 0), lhs.at(1, 1), rhs.at(1, 0));
}
/// Concatenates this matrix with a a 2-by-1 matrix to form a 2-by-4 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 4> hcat(Matrix<Element, 2, 1> const & rhs) const {
return Matrix<Element, 2, 4>::hcat(*this, rhs);
}
/// Forms a 2-by-3 matrix by vertically concatenating a 1-by-3 matrix with a 1-by-3 matrix
CUTLASS_HOST_DEVICE
static Matrix vcat(Matrix<Element, 1, 3> const & upper, Matrix<Element, 1, 3> const & lower) {
return Matrix(
upper.at(0, 0), upper.at(0, 1), upper.at(0, 2)
, lower.at(0, 0), lower.at(0, 1), lower.at(0, 2));
}
/// Concatenates this matrix with a a 1-by-3 matrix to form a 3-by-3 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 3> vcat(Matrix<Element, 1, 3> const & rhs) const {
return Matrix<Element, 3, 3>::vcat(*this, rhs);
}
/// Concatenates this matrix with a a 2-by-3 matrix to form a 4-by-3 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 3> vcat(Matrix<Element, 2, 3> const & rhs) const {
return Matrix<Element, 4, 3>::vcat(*this, rhs);
}
/// Forms a 2-by-3 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Element A, Matrix<Element, 1, 2> const & B,
Element C, Matrix<Element, 1, 2> const & D) {
return Matrix(
A, B.at(0, 0), B.at(0, 1)
, C, D.at(0, 0), D.at(0, 1)
);
}
/// Forms a 2-by-3 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 1, 2> const & A, Element B,
Matrix<Element, 1, 2> const & C, Element D) {
return Matrix(
A.at(0, 0), A.at(0, 1), B
, C.at(0, 0), C.at(0, 1), D
);
}
/// Elementwise add operator (2-by-3)
CUTLASS_HOST_DEVICE
Matrix add(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] + rhs.data[0];
result.data[1] = data[1] + rhs.data[1];
result.data[2] = data[2] + rhs.data[2];
result.data[3] = data[3] + rhs.data[3];
result.data[4] = data[4] + rhs.data[4];
result.data[5] = data[5] + rhs.data[5];
return result;
}
/// Elementwise add operator (2-by-3)
CUTLASS_HOST_DEVICE
Matrix operator +(Matrix const &rhs) const {
return add(rhs);
}
/// Elementwise add operator (2-by-3)
CUTLASS_HOST_DEVICE
Matrix & operator +=(Matrix const &rhs) {
data[0] += rhs.data[0];
data[1] += rhs.data[1];
data[2] += rhs.data[2];
data[3] += rhs.data[3];
data[4] += rhs.data[4];
data[5] += rhs.data[5];
return *this;
}
/// Elementwise subtract operator (2-by-3)
CUTLASS_HOST_DEVICE
Matrix subtract(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] - rhs.data[0];
result.data[1] = data[1] - rhs.data[1];
result.data[2] = data[2] - rhs.data[2];
result.data[3] = data[3] - rhs.data[3];
result.data[4] = data[4] - rhs.data[4];
result.data[5] = data[5] - rhs.data[5];
return result;
}
/// Elementwise subtract operator (2-by-3)
CUTLASS_HOST_DEVICE
Matrix operator -(Matrix const &rhs) const {
return subtract(rhs);
}
/// Elementwise subtract operator (2-by-3)
CUTLASS_HOST_DEVICE
Matrix & operator -=(Matrix const &rhs) {
data[0] -= rhs.data[0];
data[1] -= rhs.data[1];
data[2] -= rhs.data[2];
data[3] -= rhs.data[3];
data[4] -= rhs.data[4];
data[5] -= rhs.data[5];
return *this;
}
/// Elementwise multiply operator (2-by-3)
CUTLASS_HOST_DEVICE
Matrix multiply(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] * rhs.data[0];
result.data[1] = data[1] * rhs.data[1];
result.data[2] = data[2] * rhs.data[2];
result.data[3] = data[3] * rhs.data[3];
result.data[4] = data[4] * rhs.data[4];
result.data[5] = data[5] * rhs.data[5];
return result;
}
/// Scalar multiply operator (2-by-3)
CUTLASS_HOST_DEVICE
Matrix multiply(Element const &s) const {
Matrix result;
result.data[0] = data[0] * s;
result.data[1] = data[1] * s;
result.data[2] = data[2] * s;
result.data[3] = data[3] * s;
result.data[4] = data[4] * s;
result.data[5] = data[5] * s;
return result;
}
/// Scalar multiply operator (2-by-3)
CUTLASS_HOST_DEVICE
Matrix operator *(Element const &s) const {
return multiply(s);
}
/// Scalar multiply operator (2-by-3)
CUTLASS_HOST_DEVICE
Matrix & operator *=(Element const &s) {
data[0] *= s;
data[1] *= s;
data[2] *= s;
data[3] *= s;
data[4] *= s;
data[5] *= s;
return *this;
}
/// Elementwise divide operator (2-by-3)
CUTLASS_HOST_DEVICE
Matrix divide(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] / rhs.data[0];
result.data[1] = data[1] / rhs.data[1];
result.data[2] = data[2] / rhs.data[2];
result.data[3] = data[3] / rhs.data[3];
result.data[4] = data[4] / rhs.data[4];
result.data[5] = data[5] / rhs.data[5];
return result;
}
/// Scalar divide operator (2-by-3)
CUTLASS_HOST_DEVICE
Matrix divide(Element const &s) const {
Matrix result;
result.data[0] = data[0] / s;
result.data[1] = data[1] / s;
result.data[2] = data[2] / s;
result.data[3] = data[3] / s;
result.data[4] = data[4] / s;
result.data[5] = data[5] / s;
return result;
}
/// Scalar divide operator (2-by-3)
CUTLASS_HOST_DEVICE
Matrix operator /(Element const &s) const {
return divide(s);
}
/// Scalar divide operator (2-by-3)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Element const &s) {
data[0] /= s;
data[1] /= s;
data[2] /= s;
data[3] /= s;
data[4] /= s;
data[5] /= s;
return *this;
}
/// Elementwise divide operator (2-by-3)
CUTLASS_HOST_DEVICE
Matrix operator /(Matrix const &rhs) const {
return divide(rhs);
}
/// Elementwise divide operator (2-by-3)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Matrix const &rhs) {
data[0] /= rhs.data[0];
data[1] /= rhs.data[1];
data[2] /= rhs.data[2];
data[3] /= rhs.data[3];
data[4] /= rhs.data[4];
data[5] /= rhs.data[5];
return *this;
}
/// Negates each element of the matrix
CUTLASS_HOST_DEVICE
Matrix operator-() const {
Matrix m;
m.data[0] = -m.data[0];
m.data[1] = -m.data[1];
m.data[2] = -m.data[2];
m.data[3] = -m.data[3];
m.data[4] = -m.data[4];
m.data[5] = -m.data[5];
return m;
}
/// Matrix product of size 2-by-1-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> product(
Matrix<Element, 3, 1> const &rhs,
Matrix<Element, 2, 1> accum = Matrix<Element, 2, 1>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[3] * rhs.data[0];
// k=1
accum.data[0] += data[1] * rhs.data[1];
accum.data[1] += data[4] * rhs.data[1];
// k=2
accum.data[0] += data[2] * rhs.data[2];
accum.data[1] += data[5] * rhs.data[2];
return accum;
}
/// Matrix product of size 2-by-1-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> operator*(Matrix<Element, 3, 1> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 2-by-2-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 2> product(
Matrix<Element, 3, 2> const &rhs,
Matrix<Element, 2, 2> accum = Matrix<Element, 2, 2>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[3] * rhs.data[0];
accum.data[3] += data[3] * rhs.data[1];
// k=1
accum.data[0] += data[1] * rhs.data[2];
accum.data[1] += data[1] * rhs.data[3];
accum.data[2] += data[4] * rhs.data[2];
accum.data[3] += data[4] * rhs.data[3];
// k=2
accum.data[0] += data[2] * rhs.data[4];
accum.data[1] += data[2] * rhs.data[5];
accum.data[2] += data[5] * rhs.data[4];
accum.data[3] += data[5] * rhs.data[5];
return accum;
}
/// Matrix product of size 2-by-2-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 2> operator*(Matrix<Element, 3, 2> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 2-by-3-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 3> product(
Matrix<Element, 3, 3> const &rhs,
Matrix<Element, 2, 3> accum = Matrix<Element, 2, 3>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[3] * rhs.data[0];
accum.data[4] += data[3] * rhs.data[1];
accum.data[5] += data[3] * rhs.data[2];
// k=1
accum.data[0] += data[1] * rhs.data[3];
accum.data[1] += data[1] * rhs.data[4];
accum.data[2] += data[1] * rhs.data[5];
accum.data[3] += data[4] * rhs.data[3];
accum.data[4] += data[4] * rhs.data[4];
accum.data[5] += data[4] * rhs.data[5];
// k=2
accum.data[0] += data[2] * rhs.data[6];
accum.data[1] += data[2] * rhs.data[7];
accum.data[2] += data[2] * rhs.data[8];
accum.data[3] += data[5] * rhs.data[6];
accum.data[4] += data[5] * rhs.data[7];
accum.data[5] += data[5] * rhs.data[8];
return accum;
}
/// Matrix product of size 2-by-3-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 3> operator*(Matrix<Element, 3, 3> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 2-by-3-by-3
CUTLASS_HOST_DEVICE
Matrix & operator*=(Matrix<Element, 3, 3> const &rhs) {
*this = product(rhs);
return *this;
}
/// Matrix product of size 2-by-4-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 4> product(
Matrix<Element, 3, 4> const &rhs,
Matrix<Element, 2, 4> accum = Matrix<Element, 2, 4>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[0] * rhs.data[3];
accum.data[4] += data[3] * rhs.data[0];
accum.data[5] += data[3] * rhs.data[1];
accum.data[6] += data[3] * rhs.data[2];
accum.data[7] += data[3] * rhs.data[3];
// k=1
accum.data[0] += data[1] * rhs.data[4];
accum.data[1] += data[1] * rhs.data[5];
accum.data[2] += data[1] * rhs.data[6];
accum.data[3] += data[1] * rhs.data[7];
accum.data[4] += data[4] * rhs.data[4];
accum.data[5] += data[4] * rhs.data[5];
accum.data[6] += data[4] * rhs.data[6];
accum.data[7] += data[4] * rhs.data[7];
// k=2
accum.data[0] += data[2] * rhs.data[8];
accum.data[1] += data[2] * rhs.data[9];
accum.data[2] += data[2] * rhs.data[10];
accum.data[3] += data[2] * rhs.data[11];
accum.data[4] += data[5] * rhs.data[8];
accum.data[5] += data[5] * rhs.data[9];
accum.data[6] += data[5] * rhs.data[10];
accum.data[7] += data[5] * rhs.data[11];
return accum;
}
/// Matrix product of size 2-by-4-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 4> operator*(Matrix<Element, 3, 4> const &rhs) const {
return product(rhs);
}
/// Returns the sum of elements
CUTLASS_HOST_DEVICE
Element sum(Element accum = Element()) const {
accum += data[0];
accum += data[1];
accum += data[2];
accum += data[3];
accum += data[4];
accum += data[5];
return accum;
}
/// Returns the sum of squared elements
CUTLASS_HOST_DEVICE
Element norm(Element accum = Element()) const {
accum += data[0] * data[0];
accum += data[1] * data[1];
accum += data[2] * data[2];
accum += data[3] * data[3];
accum += data[4] * data[4];
accum += data[5] * data[5];
return accum;
}
/// Returns square root of the norm
CUTLASS_HOST_DEVICE
Element magnitude() const {
return fast_sqrt(norm());
}
/// Returns the sum of diagonal elements
CUTLASS_HOST_DEVICE
Element trace(Element accum = Element()) const {
accum += data[0];
accum += data[4];
return accum;
}
};
/// Template alias for 2-by-3 matrix
template <typename Element>
using Matrix2x3 = Matrix<Element, 2, 3>;
/// Free funciton to infer element type from template arguments
template <typename Element>
CUTLASS_HOST_DEVICE Matrix2x3<Element> make_Matrix2x3(
Element _0_0, Element _0_1, Element _0_2,
Element _1_0, Element _1_1, Element _1_2
) {
return Matrix2x3<Element>(
_0_0, _0_1, _0_2,
_1_0, _1_1, _1_2
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// 2-by-4 matrix template class definition
template <typename Element_>
struct Matrix<Element_, 2, 4> {
//
// Type definitions
//
/// Element data type
using Element = Element_;
/// Number of rows in matrix
static int const kRows = 2;
/// Number of columns in matrix
static int const kColumns = 4;
/// Layout of matrix in underlying array
using Layout = layout::RowMajor;
/// Number of elements in matrix
static int const kCount = 8;
//
// Data members
//
/// Elements of the matrix in row-major layout
Array<Element, kCount> data;
//
// Methods
//
/// Constructs a zero matrix
CUTLASS_HOST_DEVICE
Matrix() {
data.clear();
}
/// Copy constructor for a 2-by-4 matrix
CUTLASS_HOST_DEVICE
Matrix(Matrix const &rhs) {
data = rhs.data;
}
/// Constucts a 2-by-4 matrix from scalar elements
CUTLASS_HOST_DEVICE
Matrix(
Element _0_0, Element _0_1, Element _0_2, Element _0_3,
Element _1_0, Element _1_1, Element _1_2, Element _1_3
) {
data[0] = _0_0; data[1] = _0_1; data[2] = _0_2; data[3] = _0_3;
data[4] = _1_0; data[5] = _1_1; data[6] = _1_2; data[7] = _1_3;
}
/// Constucts a 2-by-4 matrix from row vectors
CUTLASS_HOST_DEVICE
Matrix(
Matrix<Element, 1, 4> const &row_0,
Matrix<Element, 1, 4> const &row_1
) {
data[0] = row_0.data[0];
data[1] = row_0.data[1];
data[2] = row_0.data[2];
data[3] = row_0.data[3];
data[4] = row_1.data[0];
data[5] = row_1.data[1];
data[6] = row_1.data[2];
data[7] = row_1.data[3];
}
/// Static method to construct a 2-by-4 matrix from column vectors
CUTLASS_HOST_DEVICE
static Matrix from_columns(
Matrix<Element, 4, 1> const &column_0,
Matrix<Element, 4, 1> const &column_1,
Matrix<Element, 4, 1> const &column_2,
Matrix<Element, 4, 1> const &column_3
) {
Matrix result;
result.data[0] = column_0.data[0];
result.data[1] = column_1.data[0];
result.data[2] = column_2.data[0];
result.data[3] = column_3.data[0];
result.data[4] = column_0.data[1];
result.data[5] = column_1.data[1];
result.data[6] = column_2.data[1];
result.data[7] = column_3.data[1];
return result;
}
/// Constructs a matrix from a uniform element
CUTLASS_HOST_DEVICE
static Matrix uniform(Element s) {
Matrix m;
m.data[0] = s;
m.data[1] = s;
m.data[2] = s;
m.data[3] = s;
m.data[4] = s;
m.data[5] = s;
m.data[6] = s;
m.data[7] = s;
return m;
}
/// Constructs a matrix from a uniform element 1
CUTLASS_HOST_DEVICE
static Matrix ones() {
return uniform(Element(1));
}
/// Constructs a matrix from a uniform element 0
CUTLASS_HOST_DEVICE
static Matrix zero() {
return Matrix();
}
/// Constructs a matrix from elements along its diagonal
CUTLASS_HOST_DEVICE
static Matrix from_diagonal(Matrix<Element, 2, 1> const &diag) {
Matrix m;
m.data[0] = diag.data[0];
m.data[3] = diag.data[1];
return m;
}
/// Constructs a matrix from elements along its diagonal
CUTLASS_HOST_DEVICE
static Matrix from_diagonal(Matrix<Element, 1, 2> const &diag) {
Matrix m;
m.data[0] = diag.data[0];
m.data[3] = diag.data[1];
return m;
}
/// Gets an array of diagonal elements
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> diagonal() const {
Matrix<Element, 2, 1> diag;
diag.data[0] = data[0];
diag.data[1] = data[3];
return diag;
}
/// Returns a transposed matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 2> transpose() const {
Matrix<Element, 4, 2> mt;
mt.data[0] = data[0];
mt.data[2] = data[1];
mt.data[4] = data[2];
mt.data[6] = data[3];
mt.data[1] = data[4];
mt.data[3] = data[5];
mt.data[5] = data[6];
mt.data[7] = data[7];
return mt;
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(int i, int j) const {
return data[i * 2 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(int i, int j) {
return data[i * 2 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element &at(int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element at(int offset) const {
return data[offset];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element operator[](Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & operator[](Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element & operator[](int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element operator[](int offset) const {
return data[offset];
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 2> slice_1x2(int i = 0, int j = 0) const {
Matrix<Element, 1, 2> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x2(Matrix<Element, 1, 2> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 3> slice_1x3(int i = 0, int j = 0) const {
Matrix<Element, 1, 3> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 2];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x3(Matrix<Element, 1, 3> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 2] = m.data[2];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 4> slice_1x4(int i = 0, int j = 0) const {
Matrix<Element, 1, 4> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 2];
m.data[3] = data[i * 4 + j + 3];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x4(Matrix<Element, 1, 4> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 2] = m.data[2];
data[i * 4 + j + 3] = m.data[3];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 4> row(int i) const {
return slice_1x4(i, 0);
}
CUTLASS_HOST_DEVICE
Matrix &set_row(Matrix<Element, 1, 4> const &v, int i = 0) {
return set_slice_1x4(v, i, 0);
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> slice_2x1(int i = 0, int j = 0) const {
Matrix<Element, 2, 1> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 4];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x1(Matrix<Element, 2, 1> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 4] = m.data[1];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> column(int j) const {
return slice_2x1(0, j);
}
CUTLASS_HOST_DEVICE
Matrix &set_column(Matrix<Element, 2, 1> const &v, int j =0) {
return set_slice_2x1(v, 0, j);
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 2> slice_2x2(int i = 0, int j = 0) const {
Matrix<Element, 2, 2> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 4];
m.data[3] = data[i * 4 + j + 5];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x2(Matrix<Element, 2, 2> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 4] = m.data[2];
data[i * 4 + j + 5] = m.data[3];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 3> slice_2x3(int i = 0, int j = 0) const {
Matrix<Element, 2, 3> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 2];
m.data[3] = data[i * 4 + j + 4];
m.data[4] = data[i * 4 + j + 5];
m.data[5] = data[i * 4 + j + 6];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x3(Matrix<Element, 2, 3> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 2] = m.data[2];
data[i * 4 + j + 4] = m.data[3];
data[i * 4 + j + 5] = m.data[4];
data[i * 4 + j + 6] = m.data[5];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 4> slice_2x4(int i = 0, int j = 0) const {
Matrix<Element, 2, 4> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 2];
m.data[3] = data[i * 4 + j + 3];
m.data[4] = data[i * 4 + j + 4];
m.data[5] = data[i * 4 + j + 5];
m.data[6] = data[i * 4 + j + 6];
m.data[7] = data[i * 4 + j + 7];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x4(Matrix<Element, 2, 4> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 2] = m.data[2];
data[i * 4 + j + 3] = m.data[3];
data[i * 4 + j + 4] = m.data[4];
data[i * 4 + j + 5] = m.data[5];
data[i * 4 + j + 6] = m.data[6];
data[i * 4 + j + 7] = m.data[7];
return *this;
}
/// Forms a 2-by-4 matrix by horizontally concatenating a 2-by-1 matrix with a 2-by-3 matrix
CUTLASS_HOST_DEVICE
static Matrix hcat(Matrix<Element, 2, 1> const & lhs, Matrix<Element, 2, 3> const & rhs) {
return Matrix(
lhs.at(0, 0), rhs.at(0, 0), rhs.at(0, 1), rhs.at(0, 2)
, lhs.at(1, 0), rhs.at(1, 0), rhs.at(1, 1), rhs.at(1, 2));
}
/// Forms a 2-by-4 matrix by horizontally concatenating a 2-by-2 matrix with a 2-by-2 matrix
CUTLASS_HOST_DEVICE
static Matrix hcat(Matrix<Element, 2, 2> const & lhs, Matrix<Element, 2, 2> const & rhs) {
return Matrix(
lhs.at(0, 0), lhs.at(0, 1), rhs.at(0, 0), rhs.at(0, 1)
, lhs.at(1, 0), lhs.at(1, 1), rhs.at(1, 0), rhs.at(1, 1));
}
/// Forms a 2-by-4 matrix by horizontally concatenating a 2-by-3 matrix with a 2-by-1 matrix
CUTLASS_HOST_DEVICE
static Matrix hcat(Matrix<Element, 2, 3> const & lhs, Matrix<Element, 2, 1> const & rhs) {
return Matrix(
lhs.at(0, 0), lhs.at(0, 1), lhs.at(0, 2), rhs.at(0, 0)
, lhs.at(1, 0), lhs.at(1, 1), lhs.at(1, 2), rhs.at(1, 0));
}
/// Forms a 2-by-4 matrix by vertically concatenating a 1-by-4 matrix with a 1-by-4 matrix
CUTLASS_HOST_DEVICE
static Matrix vcat(Matrix<Element, 1, 4> const & upper, Matrix<Element, 1, 4> const & lower) {
return Matrix(
upper.at(0, 0), upper.at(0, 1), upper.at(0, 2), upper.at(0, 3)
, lower.at(0, 0), lower.at(0, 1), lower.at(0, 2), lower.at(0, 3));
}
/// Concatenates this matrix with a a 1-by-4 matrix to form a 3-by-4 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 4> vcat(Matrix<Element, 1, 4> const & rhs) const {
return Matrix<Element, 3, 4>::vcat(*this, rhs);
}
/// Concatenates this matrix with a a 2-by-4 matrix to form a 4-by-4 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 4> vcat(Matrix<Element, 2, 4> const & rhs) const {
return Matrix<Element, 4, 4>::vcat(*this, rhs);
}
/// Forms a 2-by-4 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Element A, Matrix<Element, 1, 3> const & B,
Element C, Matrix<Element, 1, 3> const & D) {
return Matrix(
A, B.at(0, 0), B.at(0, 1), B.at(0, 2)
, C, D.at(0, 0), D.at(0, 1), D.at(0, 2)
);
}
/// Forms a 2-by-4 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 1, 2> const & A, Matrix<Element, 1, 2> const & B,
Matrix<Element, 1, 2> const & C, Matrix<Element, 1, 2> const & D) {
return Matrix(
A.at(0, 0), A.at(0, 1), B.at(0, 0), B.at(0, 1)
, C.at(0, 0), C.at(0, 1), D.at(0, 0), D.at(0, 1)
);
}
/// Forms a 2-by-4 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 1, 3> const & A, Element B,
Matrix<Element, 1, 3> const & C, Element D) {
return Matrix(
A.at(0, 0), A.at(0, 1), A.at(0, 2), B
, C.at(0, 0), C.at(0, 1), C.at(0, 2), D
);
}
/// Elementwise add operator (2-by-4)
CUTLASS_HOST_DEVICE
Matrix add(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] + rhs.data[0];
result.data[1] = data[1] + rhs.data[1];
result.data[2] = data[2] + rhs.data[2];
result.data[3] = data[3] + rhs.data[3];
result.data[4] = data[4] + rhs.data[4];
result.data[5] = data[5] + rhs.data[5];
result.data[6] = data[6] + rhs.data[6];
result.data[7] = data[7] + rhs.data[7];
return result;
}
/// Elementwise add operator (2-by-4)
CUTLASS_HOST_DEVICE
Matrix operator +(Matrix const &rhs) const {
return add(rhs);
}
/// Elementwise add operator (2-by-4)
CUTLASS_HOST_DEVICE
Matrix & operator +=(Matrix const &rhs) {
data[0] += rhs.data[0];
data[1] += rhs.data[1];
data[2] += rhs.data[2];
data[3] += rhs.data[3];
data[4] += rhs.data[4];
data[5] += rhs.data[5];
data[6] += rhs.data[6];
data[7] += rhs.data[7];
return *this;
}
/// Elementwise subtract operator (2-by-4)
CUTLASS_HOST_DEVICE
Matrix subtract(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] - rhs.data[0];
result.data[1] = data[1] - rhs.data[1];
result.data[2] = data[2] - rhs.data[2];
result.data[3] = data[3] - rhs.data[3];
result.data[4] = data[4] - rhs.data[4];
result.data[5] = data[5] - rhs.data[5];
result.data[6] = data[6] - rhs.data[6];
result.data[7] = data[7] - rhs.data[7];
return result;
}
/// Elementwise subtract operator (2-by-4)
CUTLASS_HOST_DEVICE
Matrix operator -(Matrix const &rhs) const {
return subtract(rhs);
}
/// Elementwise subtract operator (2-by-4)
CUTLASS_HOST_DEVICE
Matrix & operator -=(Matrix const &rhs) {
data[0] -= rhs.data[0];
data[1] -= rhs.data[1];
data[2] -= rhs.data[2];
data[3] -= rhs.data[3];
data[4] -= rhs.data[4];
data[5] -= rhs.data[5];
data[6] -= rhs.data[6];
data[7] -= rhs.data[7];
return *this;
}
/// Elementwise multiply operator (2-by-4)
CUTLASS_HOST_DEVICE
Matrix multiply(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] * rhs.data[0];
result.data[1] = data[1] * rhs.data[1];
result.data[2] = data[2] * rhs.data[2];
result.data[3] = data[3] * rhs.data[3];
result.data[4] = data[4] * rhs.data[4];
result.data[5] = data[5] * rhs.data[5];
result.data[6] = data[6] * rhs.data[6];
result.data[7] = data[7] * rhs.data[7];
return result;
}
/// Scalar multiply operator (2-by-4)
CUTLASS_HOST_DEVICE
Matrix multiply(Element const &s) const {
Matrix result;
result.data[0] = data[0] * s;
result.data[1] = data[1] * s;
result.data[2] = data[2] * s;
result.data[3] = data[3] * s;
result.data[4] = data[4] * s;
result.data[5] = data[5] * s;
result.data[6] = data[6] * s;
result.data[7] = data[7] * s;
return result;
}
/// Scalar multiply operator (2-by-4)
CUTLASS_HOST_DEVICE
Matrix operator *(Element const &s) const {
return multiply(s);
}
/// Scalar multiply operator (2-by-4)
CUTLASS_HOST_DEVICE
Matrix & operator *=(Element const &s) {
data[0] *= s;
data[1] *= s;
data[2] *= s;
data[3] *= s;
data[4] *= s;
data[5] *= s;
data[6] *= s;
data[7] *= s;
return *this;
}
/// Elementwise divide operator (2-by-4)
CUTLASS_HOST_DEVICE
Matrix divide(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] / rhs.data[0];
result.data[1] = data[1] / rhs.data[1];
result.data[2] = data[2] / rhs.data[2];
result.data[3] = data[3] / rhs.data[3];
result.data[4] = data[4] / rhs.data[4];
result.data[5] = data[5] / rhs.data[5];
result.data[6] = data[6] / rhs.data[6];
result.data[7] = data[7] / rhs.data[7];
return result;
}
/// Scalar divide operator (2-by-4)
CUTLASS_HOST_DEVICE
Matrix divide(Element const &s) const {
Matrix result;
result.data[0] = data[0] / s;
result.data[1] = data[1] / s;
result.data[2] = data[2] / s;
result.data[3] = data[3] / s;
result.data[4] = data[4] / s;
result.data[5] = data[5] / s;
result.data[6] = data[6] / s;
result.data[7] = data[7] / s;
return result;
}
/// Scalar divide operator (2-by-4)
CUTLASS_HOST_DEVICE
Matrix operator /(Element const &s) const {
return divide(s);
}
/// Scalar divide operator (2-by-4)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Element const &s) {
data[0] /= s;
data[1] /= s;
data[2] /= s;
data[3] /= s;
data[4] /= s;
data[5] /= s;
data[6] /= s;
data[7] /= s;
return *this;
}
/// Elementwise divide operator (2-by-4)
CUTLASS_HOST_DEVICE
Matrix operator /(Matrix const &rhs) const {
return divide(rhs);
}
/// Elementwise divide operator (2-by-4)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Matrix const &rhs) {
data[0] /= rhs.data[0];
data[1] /= rhs.data[1];
data[2] /= rhs.data[2];
data[3] /= rhs.data[3];
data[4] /= rhs.data[4];
data[5] /= rhs.data[5];
data[6] /= rhs.data[6];
data[7] /= rhs.data[7];
return *this;
}
/// Negates each element of the matrix
CUTLASS_HOST_DEVICE
Matrix operator-() const {
Matrix m;
m.data[0] = -m.data[0];
m.data[1] = -m.data[1];
m.data[2] = -m.data[2];
m.data[3] = -m.data[3];
m.data[4] = -m.data[4];
m.data[5] = -m.data[5];
m.data[6] = -m.data[6];
m.data[7] = -m.data[7];
return m;
}
/// Matrix product of size 2-by-1-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> product(
Matrix<Element, 4, 1> const &rhs,
Matrix<Element, 2, 1> accum = Matrix<Element, 2, 1>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[4] * rhs.data[0];
// k=1
accum.data[0] += data[1] * rhs.data[1];
accum.data[1] += data[5] * rhs.data[1];
// k=2
accum.data[0] += data[2] * rhs.data[2];
accum.data[1] += data[6] * rhs.data[2];
// k=3
accum.data[0] += data[3] * rhs.data[3];
accum.data[1] += data[7] * rhs.data[3];
return accum;
}
/// Matrix product of size 2-by-1-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> operator*(Matrix<Element, 4, 1> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 2-by-2-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 2> product(
Matrix<Element, 4, 2> const &rhs,
Matrix<Element, 2, 2> accum = Matrix<Element, 2, 2>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[4] * rhs.data[0];
accum.data[3] += data[4] * rhs.data[1];
// k=1
accum.data[0] += data[1] * rhs.data[2];
accum.data[1] += data[1] * rhs.data[3];
accum.data[2] += data[5] * rhs.data[2];
accum.data[3] += data[5] * rhs.data[3];
// k=2
accum.data[0] += data[2] * rhs.data[4];
accum.data[1] += data[2] * rhs.data[5];
accum.data[2] += data[6] * rhs.data[4];
accum.data[3] += data[6] * rhs.data[5];
// k=3
accum.data[0] += data[3] * rhs.data[6];
accum.data[1] += data[3] * rhs.data[7];
accum.data[2] += data[7] * rhs.data[6];
accum.data[3] += data[7] * rhs.data[7];
return accum;
}
/// Matrix product of size 2-by-2-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 2> operator*(Matrix<Element, 4, 2> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 2-by-3-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 3> product(
Matrix<Element, 4, 3> const &rhs,
Matrix<Element, 2, 3> accum = Matrix<Element, 2, 3>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[4] * rhs.data[0];
accum.data[4] += data[4] * rhs.data[1];
accum.data[5] += data[4] * rhs.data[2];
// k=1
accum.data[0] += data[1] * rhs.data[3];
accum.data[1] += data[1] * rhs.data[4];
accum.data[2] += data[1] * rhs.data[5];
accum.data[3] += data[5] * rhs.data[3];
accum.data[4] += data[5] * rhs.data[4];
accum.data[5] += data[5] * rhs.data[5];
// k=2
accum.data[0] += data[2] * rhs.data[6];
accum.data[1] += data[2] * rhs.data[7];
accum.data[2] += data[2] * rhs.data[8];
accum.data[3] += data[6] * rhs.data[6];
accum.data[4] += data[6] * rhs.data[7];
accum.data[5] += data[6] * rhs.data[8];
// k=3
accum.data[0] += data[3] * rhs.data[9];
accum.data[1] += data[3] * rhs.data[10];
accum.data[2] += data[3] * rhs.data[11];
accum.data[3] += data[7] * rhs.data[9];
accum.data[4] += data[7] * rhs.data[10];
accum.data[5] += data[7] * rhs.data[11];
return accum;
}
/// Matrix product of size 2-by-3-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 3> operator*(Matrix<Element, 4, 3> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 2-by-4-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 4> product(
Matrix<Element, 4, 4> const &rhs,
Matrix<Element, 2, 4> accum = Matrix<Element, 2, 4>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[0] * rhs.data[3];
accum.data[4] += data[4] * rhs.data[0];
accum.data[5] += data[4] * rhs.data[1];
accum.data[6] += data[4] * rhs.data[2];
accum.data[7] += data[4] * rhs.data[3];
// k=1
accum.data[0] += data[1] * rhs.data[4];
accum.data[1] += data[1] * rhs.data[5];
accum.data[2] += data[1] * rhs.data[6];
accum.data[3] += data[1] * rhs.data[7];
accum.data[4] += data[5] * rhs.data[4];
accum.data[5] += data[5] * rhs.data[5];
accum.data[6] += data[5] * rhs.data[6];
accum.data[7] += data[5] * rhs.data[7];
// k=2
accum.data[0] += data[2] * rhs.data[8];
accum.data[1] += data[2] * rhs.data[9];
accum.data[2] += data[2] * rhs.data[10];
accum.data[3] += data[2] * rhs.data[11];
accum.data[4] += data[6] * rhs.data[8];
accum.data[5] += data[6] * rhs.data[9];
accum.data[6] += data[6] * rhs.data[10];
accum.data[7] += data[6] * rhs.data[11];
// k=3
accum.data[0] += data[3] * rhs.data[12];
accum.data[1] += data[3] * rhs.data[13];
accum.data[2] += data[3] * rhs.data[14];
accum.data[3] += data[3] * rhs.data[15];
accum.data[4] += data[7] * rhs.data[12];
accum.data[5] += data[7] * rhs.data[13];
accum.data[6] += data[7] * rhs.data[14];
accum.data[7] += data[7] * rhs.data[15];
return accum;
}
/// Matrix product of size 2-by-4-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 4> operator*(Matrix<Element, 4, 4> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 2-by-4-by-4
CUTLASS_HOST_DEVICE
Matrix & operator*=(Matrix<Element, 4, 4> const &rhs) {
*this = product(rhs);
return *this;
}
/// Returns the sum of elements
CUTLASS_HOST_DEVICE
Element sum(Element accum = Element()) const {
accum += data[0];
accum += data[1];
accum += data[2];
accum += data[3];
accum += data[4];
accum += data[5];
accum += data[6];
accum += data[7];
return accum;
}
/// Returns the sum of squared elements
CUTLASS_HOST_DEVICE
Element norm(Element accum = Element()) const {
accum += data[0] * data[0];
accum += data[1] * data[1];
accum += data[2] * data[2];
accum += data[3] * data[3];
accum += data[4] * data[4];
accum += data[5] * data[5];
accum += data[6] * data[6];
accum += data[7] * data[7];
return accum;
}
/// Returns square root of the norm
CUTLASS_HOST_DEVICE
Element magnitude() const {
return fast_sqrt(norm());
}
/// Returns the sum of diagonal elements
CUTLASS_HOST_DEVICE
Element trace(Element accum = Element()) const {
accum += data[0];
accum += data[5];
return accum;
}
};
/// Template alias for 2-by-4 matrix
template <typename Element>
using Matrix2x4 = Matrix<Element, 2, 4>;
/// Free funciton to infer element type from template arguments
template <typename Element>
CUTLASS_HOST_DEVICE Matrix2x4<Element> make_Matrix2x4(
Element _0_0, Element _0_1, Element _0_2, Element _0_3,
Element _1_0, Element _1_1, Element _1_2, Element _1_3
) {
return Matrix2x4<Element>(
_0_0, _0_1, _0_2, _0_3,
_1_0, _1_1, _1_2, _1_3
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// 3-by-1 matrix template class definition
template <typename Element_>
struct Matrix<Element_, 3, 1> {
//
// Type definitions
//
/// Element data type
using Element = Element_;
/// Number of rows in matrix
static int const kRows = 3;
/// Number of columns in matrix
static int const kColumns = 1;
/// Layout of matrix in underlying array
using Layout = layout::RowMajor;
/// Number of elements in matrix
static int const kCount = 3;
//
// Data members
//
/// Elements of the matrix in row-major layout
Array<Element, kCount> data;
//
// Methods
//
/// Constructs a zero matrix
CUTLASS_HOST_DEVICE
Matrix() {
data.clear();
}
/// Copy constructor for a 3-by-1 matrix
CUTLASS_HOST_DEVICE
Matrix(Matrix const &rhs) {
data = rhs.data;
}
/// Constucts a 3-by-1 matrix from scalar elements
CUTLASS_HOST_DEVICE
Matrix(
Element _0_0,
Element _1_0,
Element _2_0
) {
data[0] = _0_0;
data[1] = _1_0;
data[2] = _2_0;
}
/// Constructs a matrix from a uniform element
CUTLASS_HOST_DEVICE
static Matrix uniform(Element s) {
Matrix m;
m.data[0] = s;
m.data[1] = s;
m.data[2] = s;
return m;
}
/// Constructs a matrix from a uniform element 1
CUTLASS_HOST_DEVICE
static Matrix ones() {
return uniform(Element(1));
}
/// Constructs a matrix from a uniform element 0
CUTLASS_HOST_DEVICE
static Matrix zero() {
return Matrix();
}
/// Returns a transposed matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 3> transpose() const {
Matrix<Element, 1, 3> mt;
mt.data[0] = data[0];
mt.data[1] = data[1];
mt.data[2] = data[2];
return mt;
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(int i, int j) const {
return data[i * 3 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(int i, int j) {
return data[i * 3 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element &at(int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element at(int offset) const {
return data[offset];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element operator[](Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & operator[](Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element & operator[](int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element operator[](int offset) const {
return data[offset];
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> slice_2x1(int i = 0, int j = 0) const {
Matrix<Element, 2, 1> m;
m.data[0] = data[i * 1 + j + 0];
m.data[1] = data[i * 1 + j + 1];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x1(Matrix<Element, 2, 1> const &m, int i = 0, int j = 0) {
data[i * 1 + j + 0] = m.data[0];
data[i * 1 + j + 1] = m.data[1];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> slice_3x1(int i = 0, int j = 0) const {
Matrix<Element, 3, 1> m;
m.data[0] = data[i * 1 + j + 0];
m.data[1] = data[i * 1 + j + 1];
m.data[2] = data[i * 1 + j + 2];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_3x1(Matrix<Element, 3, 1> const &m, int i = 0, int j = 0) {
data[i * 1 + j + 0] = m.data[0];
data[i * 1 + j + 1] = m.data[1];
data[i * 1 + j + 2] = m.data[2];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> column(int j) const {
return slice_3x1(0, j);
}
CUTLASS_HOST_DEVICE
Matrix &set_column(Matrix<Element, 3, 1> const &v, int j =0) {
return set_slice_3x1(v, 0, j);
}
/// Concatenates this matrix with a a 3-by-1 matrix to form a 3-by-2 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 2> hcat(Matrix<Element, 3, 1> const & rhs) const {
return Matrix<Element, 3, 2>::hcat(*this, rhs);
}
/// Concatenates this matrix with a a 3-by-2 matrix to form a 3-by-3 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 3> hcat(Matrix<Element, 3, 2> const & rhs) const {
return Matrix<Element, 3, 3>::hcat(*this, rhs);
}
/// Concatenates this matrix with a a 3-by-3 matrix to form a 3-by-4 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 4> hcat(Matrix<Element, 3, 3> const & rhs) const {
return Matrix<Element, 3, 4>::hcat(*this, rhs);
}
/// Forms a 3-by-1 matrix by vertically concatenating an Element with a 2-by-1 matrix
CUTLASS_HOST_DEVICE
static Matrix vcat(Element upper, Matrix<Element, 2, 1> const & lower) {
return Matrix(
upper
, lower.at(0, 0)
, lower.at(1, 0));
}
/// Forms a 3-by-1 matrix by vertically concatenating a 2-by-1 matrix with an Element
CUTLASS_HOST_DEVICE
static Matrix vcat(Matrix<Element, 2, 1> const & upper, Element lower) {
return Matrix(
upper.at(0, 0)
, upper.at(1, 0)
, lower);
}
/// Concatenates this matrix with a an Element to form a 4-by-1 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 1> vcat(Element rhs) const {
return Matrix<Element, 4, 1>::vcat(*this, rhs);
}
/// Elementwise add operator (3-by-1)
CUTLASS_HOST_DEVICE
Matrix add(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] + rhs.data[0];
result.data[1] = data[1] + rhs.data[1];
result.data[2] = data[2] + rhs.data[2];
return result;
}
/// Elementwise add operator (3-by-1)
CUTLASS_HOST_DEVICE
Matrix operator +(Matrix const &rhs) const {
return add(rhs);
}
/// Elementwise add operator (3-by-1)
CUTLASS_HOST_DEVICE
Matrix & operator +=(Matrix const &rhs) {
data[0] += rhs.data[0];
data[1] += rhs.data[1];
data[2] += rhs.data[2];
return *this;
}
/// Elementwise subtract operator (3-by-1)
CUTLASS_HOST_DEVICE
Matrix subtract(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] - rhs.data[0];
result.data[1] = data[1] - rhs.data[1];
result.data[2] = data[2] - rhs.data[2];
return result;
}
/// Elementwise subtract operator (3-by-1)
CUTLASS_HOST_DEVICE
Matrix operator -(Matrix const &rhs) const {
return subtract(rhs);
}
/// Elementwise subtract operator (3-by-1)
CUTLASS_HOST_DEVICE
Matrix & operator -=(Matrix const &rhs) {
data[0] -= rhs.data[0];
data[1] -= rhs.data[1];
data[2] -= rhs.data[2];
return *this;
}
/// Elementwise multiply operator (3-by-1)
CUTLASS_HOST_DEVICE
Matrix multiply(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] * rhs.data[0];
result.data[1] = data[1] * rhs.data[1];
result.data[2] = data[2] * rhs.data[2];
return result;
}
/// Scalar multiply operator (3-by-1)
CUTLASS_HOST_DEVICE
Matrix multiply(Element const &s) const {
Matrix result;
result.data[0] = data[0] * s;
result.data[1] = data[1] * s;
result.data[2] = data[2] * s;
return result;
}
/// Scalar multiply operator (3-by-1)
CUTLASS_HOST_DEVICE
Matrix operator *(Element const &s) const {
return multiply(s);
}
/// Scalar multiply operator (3-by-1)
CUTLASS_HOST_DEVICE
Matrix & operator *=(Element const &s) {
data[0] *= s;
data[1] *= s;
data[2] *= s;
return *this;
}
/// Elementwise divide operator (3-by-1)
CUTLASS_HOST_DEVICE
Matrix divide(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] / rhs.data[0];
result.data[1] = data[1] / rhs.data[1];
result.data[2] = data[2] / rhs.data[2];
return result;
}
/// Scalar divide operator (3-by-1)
CUTLASS_HOST_DEVICE
Matrix divide(Element const &s) const {
Matrix result;
result.data[0] = data[0] / s;
result.data[1] = data[1] / s;
result.data[2] = data[2] / s;
return result;
}
/// Scalar divide operator (3-by-1)
CUTLASS_HOST_DEVICE
Matrix operator /(Element const &s) const {
return divide(s);
}
/// Scalar divide operator (3-by-1)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Element const &s) {
data[0] /= s;
data[1] /= s;
data[2] /= s;
return *this;
}
/// Elementwise divide operator (3-by-1)
CUTLASS_HOST_DEVICE
Matrix operator /(Matrix const &rhs) const {
return divide(rhs);
}
/// Elementwise divide operator (3-by-1)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Matrix const &rhs) {
data[0] /= rhs.data[0];
data[1] /= rhs.data[1];
data[2] /= rhs.data[2];
return *this;
}
/// Negates each element of the matrix
CUTLASS_HOST_DEVICE
Matrix operator-() const {
Matrix m;
m.data[0] = -m.data[0];
m.data[1] = -m.data[1];
m.data[2] = -m.data[2];
return m;
}
/// Matrix product of size 3-by-1-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> product(
Matrix<Element, 1, 1> const &rhs,
Matrix<Element, 3, 1> accum = Matrix<Element, 3, 1>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[1] * rhs.data[0];
accum.data[2] += data[2] * rhs.data[0];
return accum;
}
/// Matrix product of size 3-by-1-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> operator*(Matrix<Element, 1, 1> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 3-by-1-by-1
CUTLASS_HOST_DEVICE
Matrix & operator*=(Matrix<Element, 1, 1> const &rhs) {
*this = product(rhs);
return *this;
}
/// Matrix product of size 3-by-2-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 2> product(
Matrix<Element, 1, 2> const &rhs,
Matrix<Element, 3, 2> accum = Matrix<Element, 3, 2>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[1] * rhs.data[0];
accum.data[3] += data[1] * rhs.data[1];
accum.data[4] += data[2] * rhs.data[0];
accum.data[5] += data[2] * rhs.data[1];
return accum;
}
/// Matrix product of size 3-by-2-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 2> operator*(Matrix<Element, 1, 2> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 3-by-3-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 3> product(
Matrix<Element, 1, 3> const &rhs,
Matrix<Element, 3, 3> accum = Matrix<Element, 3, 3>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[1] * rhs.data[0];
accum.data[4] += data[1] * rhs.data[1];
accum.data[5] += data[1] * rhs.data[2];
accum.data[6] += data[2] * rhs.data[0];
accum.data[7] += data[2] * rhs.data[1];
accum.data[8] += data[2] * rhs.data[2];
return accum;
}
/// Matrix product of size 3-by-3-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 3> operator*(Matrix<Element, 1, 3> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 3-by-4-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 4> product(
Matrix<Element, 1, 4> const &rhs,
Matrix<Element, 3, 4> accum = Matrix<Element, 3, 4>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[0] * rhs.data[3];
accum.data[4] += data[1] * rhs.data[0];
accum.data[5] += data[1] * rhs.data[1];
accum.data[6] += data[1] * rhs.data[2];
accum.data[7] += data[1] * rhs.data[3];
accum.data[8] += data[2] * rhs.data[0];
accum.data[9] += data[2] * rhs.data[1];
accum.data[10] += data[2] * rhs.data[2];
accum.data[11] += data[2] * rhs.data[3];
return accum;
}
/// Matrix product of size 3-by-4-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 4> operator*(Matrix<Element, 1, 4> const &rhs) const {
return product(rhs);
}
/// Dot product of vectors with extent 3
CUTLASS_HOST_DEVICE
Element dot(Matrix<Element, 3, 1> const &rhs, Element accum = Element()) const {
accum += data[0] * rhs.data[0];
accum += data[1] * rhs.data[1];
accum += data[2] * rhs.data[2];
return accum;
}
/// Dot product of vectors with extent 3
CUTLASS_HOST_DEVICE
Element dot(Matrix<Element, 1, 3> const &rhs, Element accum = Element()) const {
accum += data[0] * rhs.data[0];
accum += data[1] * rhs.data[1];
accum += data[2] * rhs.data[2];
return accum;
}
/// Returns the sum of elements
CUTLASS_HOST_DEVICE
Element sum(Element accum = Element()) const {
accum += data[0];
accum += data[1];
accum += data[2];
return accum;
}
/// Returns the sum of squared elements
CUTLASS_HOST_DEVICE
Element norm(Element accum = Element()) const {
accum += data[0] * data[0];
accum += data[1] * data[1];
accum += data[2] * data[2];
return accum;
}
/// Returns square root of the norm
CUTLASS_HOST_DEVICE
Element magnitude() const {
return fast_sqrt(norm());
}
/// Returns the sum of diagonal elements
CUTLASS_HOST_DEVICE
Element trace(Element accum = Element()) const {
accum += data[0];
return accum;
}
/// Cross product
CUTLASS_HOST_DEVICE
Matrix cross(Matrix const &rhs) const {
return Matrix(
data[1] * rhs.data[2] - data[2] * rhs.data[1],
data[0] * rhs.data[2] - data[2] * rhs.data[1],
data[0] * rhs.data[1] - data[1] * rhs.data[0]
);
}
};
/// Template alias for 3-by-1 matrix
template <typename Element>
using Matrix3x1 = Matrix<Element, 3, 1>;
/// Free funciton to infer element type from template arguments
template <typename Element>
CUTLASS_HOST_DEVICE Matrix3x1<Element> make_Matrix3x1(
Element _0_0,
Element _1_0,
Element _2_0
) {
return Matrix3x1<Element>(
_0_0,
_1_0,
_2_0
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// 3-by-2 matrix template class definition
template <typename Element_>
struct Matrix<Element_, 3, 2> {
//
// Type definitions
//
/// Element data type
using Element = Element_;
/// Number of rows in matrix
static int const kRows = 3;
/// Number of columns in matrix
static int const kColumns = 2;
/// Layout of matrix in underlying array
using Layout = layout::RowMajor;
/// Number of elements in matrix
static int const kCount = 6;
//
// Data members
//
/// Elements of the matrix in row-major layout
Array<Element, kCount> data;
//
// Methods
//
/// Constructs a zero matrix
CUTLASS_HOST_DEVICE
Matrix() {
data.clear();
}
/// Copy constructor for a 3-by-2 matrix
CUTLASS_HOST_DEVICE
Matrix(Matrix const &rhs) {
data = rhs.data;
}
/// Constucts a 3-by-2 matrix from scalar elements
CUTLASS_HOST_DEVICE
Matrix(
Element _0_0, Element _0_1,
Element _1_0, Element _1_1,
Element _2_0, Element _2_1
) {
data[0] = _0_0; data[1] = _0_1;
data[2] = _1_0; data[3] = _1_1;
data[4] = _2_0; data[5] = _2_1;
}
/// Constucts a 3-by-2 matrix from row vectors
CUTLASS_HOST_DEVICE
Matrix(
Matrix<Element, 1, 2> const &row_0,
Matrix<Element, 1, 2> const &row_1,
Matrix<Element, 1, 2> const &row_2
) {
data[0] = row_0.data[0];
data[1] = row_0.data[1];
data[2] = row_1.data[0];
data[3] = row_1.data[1];
data[4] = row_2.data[0];
data[5] = row_2.data[1];
}
/// Static method to construct a 3-by-2 matrix from column vectors
CUTLASS_HOST_DEVICE
static Matrix from_columns(
Matrix<Element, 2, 1> const &column_0,
Matrix<Element, 2, 1> const &column_1
) {
Matrix result;
result.data[0] = column_0.data[0];
result.data[1] = column_1.data[0];
result.data[2] = column_0.data[1];
result.data[3] = column_1.data[1];
result.data[4] = column_0.data[2];
result.data[5] = column_1.data[2];
return result;
}
/// Constructs a matrix from a uniform element
CUTLASS_HOST_DEVICE
static Matrix uniform(Element s) {
Matrix m;
m.data[0] = s;
m.data[1] = s;
m.data[2] = s;
m.data[3] = s;
m.data[4] = s;
m.data[5] = s;
return m;
}
/// Constructs a matrix from a uniform element 1
CUTLASS_HOST_DEVICE
static Matrix ones() {
return uniform(Element(1));
}
/// Constructs a matrix from a uniform element 0
CUTLASS_HOST_DEVICE
static Matrix zero() {
return Matrix();
}
/// Constructs a matrix from elements along its diagonal
CUTLASS_HOST_DEVICE
static Matrix from_diagonal(Matrix<Element, 2, 1> const &diag) {
Matrix m;
m.data[0] = diag.data[0];
m.data[4] = diag.data[1];
m.data[8] = diag.data[2];
return m;
}
/// Constructs a matrix from elements along its diagonal
CUTLASS_HOST_DEVICE
static Matrix from_diagonal(Matrix<Element, 1, 2> const &diag) {
Matrix m;
m.data[0] = diag.data[0];
m.data[4] = diag.data[1];
m.data[8] = diag.data[2];
return m;
}
/// Gets an array of diagonal elements
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> diagonal() const {
Matrix<Element, 2, 1> diag;
diag.data[0] = data[0];
diag.data[1] = data[4];
diag.data[2] = data[8];
return diag;
}
/// Returns a transposed matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 3> transpose() const {
Matrix<Element, 2, 3> mt;
mt.data[0] = data[0];
mt.data[3] = data[1];
mt.data[1] = data[2];
mt.data[4] = data[3];
mt.data[2] = data[4];
mt.data[5] = data[5];
return mt;
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(int i, int j) const {
return data[i * 3 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(int i, int j) {
return data[i * 3 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element &at(int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element at(int offset) const {
return data[offset];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element operator[](Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & operator[](Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element & operator[](int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element operator[](int offset) const {
return data[offset];
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 2> slice_1x2(int i = 0, int j = 0) const {
Matrix<Element, 1, 2> m;
m.data[0] = data[i * 2 + j + 0];
m.data[1] = data[i * 2 + j + 1];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x2(Matrix<Element, 1, 2> const &m, int i = 0, int j = 0) {
data[i * 2 + j + 0] = m.data[0];
data[i * 2 + j + 1] = m.data[1];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 2> row(int i) const {
return slice_1x2(i, 0);
}
CUTLASS_HOST_DEVICE
Matrix &set_row(Matrix<Element, 1, 2> const &v, int i = 0) {
return set_slice_1x2(v, i, 0);
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> slice_2x1(int i = 0, int j = 0) const {
Matrix<Element, 2, 1> m;
m.data[0] = data[i * 2 + j + 0];
m.data[1] = data[i * 2 + j + 2];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x1(Matrix<Element, 2, 1> const &m, int i = 0, int j = 0) {
data[i * 2 + j + 0] = m.data[0];
data[i * 2 + j + 2] = m.data[1];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 2> slice_2x2(int i = 0, int j = 0) const {
Matrix<Element, 2, 2> m;
m.data[0] = data[i * 2 + j + 0];
m.data[1] = data[i * 2 + j + 1];
m.data[2] = data[i * 2 + j + 2];
m.data[3] = data[i * 2 + j + 3];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x2(Matrix<Element, 2, 2> const &m, int i = 0, int j = 0) {
data[i * 2 + j + 0] = m.data[0];
data[i * 2 + j + 1] = m.data[1];
data[i * 2 + j + 2] = m.data[2];
data[i * 2 + j + 3] = m.data[3];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> slice_3x1(int i = 0, int j = 0) const {
Matrix<Element, 3, 1> m;
m.data[0] = data[i * 2 + j + 0];
m.data[1] = data[i * 2 + j + 2];
m.data[2] = data[i * 2 + j + 4];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_3x1(Matrix<Element, 3, 1> const &m, int i = 0, int j = 0) {
data[i * 2 + j + 0] = m.data[0];
data[i * 2 + j + 2] = m.data[1];
data[i * 2 + j + 4] = m.data[2];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> column(int j) const {
return slice_3x1(0, j);
}
CUTLASS_HOST_DEVICE
Matrix &set_column(Matrix<Element, 3, 1> const &v, int j =0) {
return set_slice_3x1(v, 0, j);
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 2> slice_3x2(int i = 0, int j = 0) const {
Matrix<Element, 3, 2> m;
m.data[0] = data[i * 2 + j + 0];
m.data[1] = data[i * 2 + j + 1];
m.data[2] = data[i * 2 + j + 2];
m.data[3] = data[i * 2 + j + 3];
m.data[4] = data[i * 2 + j + 4];
m.data[5] = data[i * 2 + j + 5];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_3x2(Matrix<Element, 3, 2> const &m, int i = 0, int j = 0) {
data[i * 2 + j + 0] = m.data[0];
data[i * 2 + j + 1] = m.data[1];
data[i * 2 + j + 2] = m.data[2];
data[i * 2 + j + 3] = m.data[3];
data[i * 2 + j + 4] = m.data[4];
data[i * 2 + j + 5] = m.data[5];
return *this;
}
/// Forms a 3-by-2 matrix by horizontally concatenating a 3-by-1 matrix with a 3-by-1 matrix
CUTLASS_HOST_DEVICE
static Matrix hcat(Matrix<Element, 3, 1> const & lhs, Matrix<Element, 3, 1> const & rhs) {
return Matrix(
lhs.at(0, 0), rhs.at(0, 0)
, lhs.at(1, 0), rhs.at(1, 0)
, lhs.at(2, 0), rhs.at(2, 0));
}
/// Concatenates this matrix with a a 3-by-1 matrix to form a 3-by-3 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 3> hcat(Matrix<Element, 3, 1> const & rhs) const {
return Matrix<Element, 3, 3>::hcat(*this, rhs);
}
/// Concatenates this matrix with a a 3-by-2 matrix to form a 3-by-4 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 4> hcat(Matrix<Element, 3, 2> const & rhs) const {
return Matrix<Element, 3, 4>::hcat(*this, rhs);
}
/// Forms a 3-by-2 matrix by vertically concatenating a 1-by-2 matrix with a 2-by-2 matrix
CUTLASS_HOST_DEVICE
static Matrix vcat(Matrix<Element, 1, 2> const & upper, Matrix<Element, 2, 2> const & lower) {
return Matrix(
upper.at(0, 0), upper.at(0, 1)
, lower.at(0, 0), lower.at(0, 1)
, lower.at(1, 0), lower.at(1, 1));
}
/// Forms a 3-by-2 matrix by vertically concatenating a 2-by-2 matrix with a 1-by-2 matrix
CUTLASS_HOST_DEVICE
static Matrix vcat(Matrix<Element, 2, 2> const & upper, Matrix<Element, 1, 2> const & lower) {
return Matrix(
upper.at(0, 0), upper.at(0, 1)
, upper.at(1, 0), upper.at(1, 1)
, lower.at(0, 0), lower.at(0, 1));
}
/// Concatenates this matrix with a a 1-by-2 matrix to form a 4-by-2 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 2> vcat(Matrix<Element, 1, 2> const & rhs) const {
return Matrix<Element, 4, 2>::vcat(*this, rhs);
}
/// Forms a 3-by-2 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Element A, Element B,
Matrix<Element, 2, 1> const & C, Matrix<Element, 2, 1> const & D) {
return Matrix(
A, B
, C.at(0, 0), D.at(0, 0)
, C.at(1, 0), D.at(1, 0)
);
}
/// Forms a 3-by-2 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 2, 1> const & A, Matrix<Element, 2, 1> const & B,
Element C, Element D) {
return Matrix(
A.at(0, 0), B.at(0, 0)
, A.at(1, 0), B.at(1, 0)
, C, D
);
}
/// Elementwise add operator (3-by-2)
CUTLASS_HOST_DEVICE
Matrix add(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] + rhs.data[0];
result.data[1] = data[1] + rhs.data[1];
result.data[2] = data[2] + rhs.data[2];
result.data[3] = data[3] + rhs.data[3];
result.data[4] = data[4] + rhs.data[4];
result.data[5] = data[5] + rhs.data[5];
return result;
}
/// Elementwise add operator (3-by-2)
CUTLASS_HOST_DEVICE
Matrix operator +(Matrix const &rhs) const {
return add(rhs);
}
/// Elementwise add operator (3-by-2)
CUTLASS_HOST_DEVICE
Matrix & operator +=(Matrix const &rhs) {
data[0] += rhs.data[0];
data[1] += rhs.data[1];
data[2] += rhs.data[2];
data[3] += rhs.data[3];
data[4] += rhs.data[4];
data[5] += rhs.data[5];
return *this;
}
/// Elementwise subtract operator (3-by-2)
CUTLASS_HOST_DEVICE
Matrix subtract(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] - rhs.data[0];
result.data[1] = data[1] - rhs.data[1];
result.data[2] = data[2] - rhs.data[2];
result.data[3] = data[3] - rhs.data[3];
result.data[4] = data[4] - rhs.data[4];
result.data[5] = data[5] - rhs.data[5];
return result;
}
/// Elementwise subtract operator (3-by-2)
CUTLASS_HOST_DEVICE
Matrix operator -(Matrix const &rhs) const {
return subtract(rhs);
}
/// Elementwise subtract operator (3-by-2)
CUTLASS_HOST_DEVICE
Matrix & operator -=(Matrix const &rhs) {
data[0] -= rhs.data[0];
data[1] -= rhs.data[1];
data[2] -= rhs.data[2];
data[3] -= rhs.data[3];
data[4] -= rhs.data[4];
data[5] -= rhs.data[5];
return *this;
}
/// Elementwise multiply operator (3-by-2)
CUTLASS_HOST_DEVICE
Matrix multiply(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] * rhs.data[0];
result.data[1] = data[1] * rhs.data[1];
result.data[2] = data[2] * rhs.data[2];
result.data[3] = data[3] * rhs.data[3];
result.data[4] = data[4] * rhs.data[4];
result.data[5] = data[5] * rhs.data[5];
return result;
}
/// Scalar multiply operator (3-by-2)
CUTLASS_HOST_DEVICE
Matrix multiply(Element const &s) const {
Matrix result;
result.data[0] = data[0] * s;
result.data[1] = data[1] * s;
result.data[2] = data[2] * s;
result.data[3] = data[3] * s;
result.data[4] = data[4] * s;
result.data[5] = data[5] * s;
return result;
}
/// Scalar multiply operator (3-by-2)
CUTLASS_HOST_DEVICE
Matrix operator *(Element const &s) const {
return multiply(s);
}
/// Scalar multiply operator (3-by-2)
CUTLASS_HOST_DEVICE
Matrix & operator *=(Element const &s) {
data[0] *= s;
data[1] *= s;
data[2] *= s;
data[3] *= s;
data[4] *= s;
data[5] *= s;
return *this;
}
/// Elementwise divide operator (3-by-2)
CUTLASS_HOST_DEVICE
Matrix divide(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] / rhs.data[0];
result.data[1] = data[1] / rhs.data[1];
result.data[2] = data[2] / rhs.data[2];
result.data[3] = data[3] / rhs.data[3];
result.data[4] = data[4] / rhs.data[4];
result.data[5] = data[5] / rhs.data[5];
return result;
}
/// Scalar divide operator (3-by-2)
CUTLASS_HOST_DEVICE
Matrix divide(Element const &s) const {
Matrix result;
result.data[0] = data[0] / s;
result.data[1] = data[1] / s;
result.data[2] = data[2] / s;
result.data[3] = data[3] / s;
result.data[4] = data[4] / s;
result.data[5] = data[5] / s;
return result;
}
/// Scalar divide operator (3-by-2)
CUTLASS_HOST_DEVICE
Matrix operator /(Element const &s) const {
return divide(s);
}
/// Scalar divide operator (3-by-2)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Element const &s) {
data[0] /= s;
data[1] /= s;
data[2] /= s;
data[3] /= s;
data[4] /= s;
data[5] /= s;
return *this;
}
/// Elementwise divide operator (3-by-2)
CUTLASS_HOST_DEVICE
Matrix operator /(Matrix const &rhs) const {
return divide(rhs);
}
/// Elementwise divide operator (3-by-2)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Matrix const &rhs) {
data[0] /= rhs.data[0];
data[1] /= rhs.data[1];
data[2] /= rhs.data[2];
data[3] /= rhs.data[3];
data[4] /= rhs.data[4];
data[5] /= rhs.data[5];
return *this;
}
/// Negates each element of the matrix
CUTLASS_HOST_DEVICE
Matrix operator-() const {
Matrix m;
m.data[0] = -m.data[0];
m.data[1] = -m.data[1];
m.data[2] = -m.data[2];
m.data[3] = -m.data[3];
m.data[4] = -m.data[4];
m.data[5] = -m.data[5];
return m;
}
/// Matrix product of size 3-by-1-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> product(
Matrix<Element, 2, 1> const &rhs,
Matrix<Element, 3, 1> accum = Matrix<Element, 3, 1>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[2] * rhs.data[0];
accum.data[2] += data[4] * rhs.data[0];
// k=1
accum.data[0] += data[1] * rhs.data[1];
accum.data[1] += data[3] * rhs.data[1];
accum.data[2] += data[5] * rhs.data[1];
return accum;
}
/// Matrix product of size 3-by-1-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> operator*(Matrix<Element, 2, 1> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 3-by-2-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 2> product(
Matrix<Element, 2, 2> const &rhs,
Matrix<Element, 3, 2> accum = Matrix<Element, 3, 2>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[2] * rhs.data[0];
accum.data[3] += data[2] * rhs.data[1];
accum.data[4] += data[4] * rhs.data[0];
accum.data[5] += data[4] * rhs.data[1];
// k=1
accum.data[0] += data[1] * rhs.data[2];
accum.data[1] += data[1] * rhs.data[3];
accum.data[2] += data[3] * rhs.data[2];
accum.data[3] += data[3] * rhs.data[3];
accum.data[4] += data[5] * rhs.data[2];
accum.data[5] += data[5] * rhs.data[3];
return accum;
}
/// Matrix product of size 3-by-2-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 2> operator*(Matrix<Element, 2, 2> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 3-by-2-by-2
CUTLASS_HOST_DEVICE
Matrix & operator*=(Matrix<Element, 2, 2> const &rhs) {
*this = product(rhs);
return *this;
}
/// Matrix product of size 3-by-3-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 3> product(
Matrix<Element, 2, 3> const &rhs,
Matrix<Element, 3, 3> accum = Matrix<Element, 3, 3>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[2] * rhs.data[0];
accum.data[4] += data[2] * rhs.data[1];
accum.data[5] += data[2] * rhs.data[2];
accum.data[6] += data[4] * rhs.data[0];
accum.data[7] += data[4] * rhs.data[1];
accum.data[8] += data[4] * rhs.data[2];
// k=1
accum.data[0] += data[1] * rhs.data[3];
accum.data[1] += data[1] * rhs.data[4];
accum.data[2] += data[1] * rhs.data[5];
accum.data[3] += data[3] * rhs.data[3];
accum.data[4] += data[3] * rhs.data[4];
accum.data[5] += data[3] * rhs.data[5];
accum.data[6] += data[5] * rhs.data[3];
accum.data[7] += data[5] * rhs.data[4];
accum.data[8] += data[5] * rhs.data[5];
return accum;
}
/// Matrix product of size 3-by-3-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 3> operator*(Matrix<Element, 2, 3> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 3-by-4-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 4> product(
Matrix<Element, 2, 4> const &rhs,
Matrix<Element, 3, 4> accum = Matrix<Element, 3, 4>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[0] * rhs.data[3];
accum.data[4] += data[2] * rhs.data[0];
accum.data[5] += data[2] * rhs.data[1];
accum.data[6] += data[2] * rhs.data[2];
accum.data[7] += data[2] * rhs.data[3];
accum.data[8] += data[4] * rhs.data[0];
accum.data[9] += data[4] * rhs.data[1];
accum.data[10] += data[4] * rhs.data[2];
accum.data[11] += data[4] * rhs.data[3];
// k=1
accum.data[0] += data[1] * rhs.data[4];
accum.data[1] += data[1] * rhs.data[5];
accum.data[2] += data[1] * rhs.data[6];
accum.data[3] += data[1] * rhs.data[7];
accum.data[4] += data[3] * rhs.data[4];
accum.data[5] += data[3] * rhs.data[5];
accum.data[6] += data[3] * rhs.data[6];
accum.data[7] += data[3] * rhs.data[7];
accum.data[8] += data[5] * rhs.data[4];
accum.data[9] += data[5] * rhs.data[5];
accum.data[10] += data[5] * rhs.data[6];
accum.data[11] += data[5] * rhs.data[7];
return accum;
}
/// Matrix product of size 3-by-4-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 4> operator*(Matrix<Element, 2, 4> const &rhs) const {
return product(rhs);
}
/// Returns the sum of elements
CUTLASS_HOST_DEVICE
Element sum(Element accum = Element()) const {
accum += data[0];
accum += data[1];
accum += data[2];
accum += data[3];
accum += data[4];
accum += data[5];
return accum;
}
/// Returns the sum of squared elements
CUTLASS_HOST_DEVICE
Element norm(Element accum = Element()) const {
accum += data[0] * data[0];
accum += data[1] * data[1];
accum += data[2] * data[2];
accum += data[3] * data[3];
accum += data[4] * data[4];
accum += data[5] * data[5];
return accum;
}
/// Returns square root of the norm
CUTLASS_HOST_DEVICE
Element magnitude() const {
return fast_sqrt(norm());
}
/// Returns the sum of diagonal elements
CUTLASS_HOST_DEVICE
Element trace(Element accum = Element()) const {
accum += data[0];
accum += data[3];
return accum;
}
};
/// Template alias for 3-by-2 matrix
template <typename Element>
using Matrix3x2 = Matrix<Element, 3, 2>;
/// Free funciton to infer element type from template arguments
template <typename Element>
CUTLASS_HOST_DEVICE Matrix3x2<Element> make_Matrix3x2(
Element _0_0, Element _0_1,
Element _1_0, Element _1_1,
Element _2_0, Element _2_1
) {
return Matrix3x2<Element>(
_0_0, _0_1,
_1_0, _1_1,
_2_0, _2_1
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// 3-by-3 matrix template class definition
template <typename Element_>
struct Matrix<Element_, 3, 3> {
//
// Type definitions
//
/// Element data type
using Element = Element_;
/// Number of rows in matrix
static int const kRows = 3;
/// Number of columns in matrix
static int const kColumns = 3;
/// Layout of matrix in underlying array
using Layout = layout::RowMajor;
/// Number of elements in matrix
static int const kCount = 9;
//
// Data members
//
/// Elements of the matrix in row-major layout
Array<Element, kCount> data;
//
// Methods
//
/// Constructs a zero matrix
CUTLASS_HOST_DEVICE
Matrix() {
data.clear();
}
/// Copy constructor for a 3-by-3 matrix
CUTLASS_HOST_DEVICE
Matrix(Matrix const &rhs) {
data = rhs.data;
}
/// Constucts a 3-by-3 matrix from scalar elements
CUTLASS_HOST_DEVICE
Matrix(
Element _0_0, Element _0_1, Element _0_2,
Element _1_0, Element _1_1, Element _1_2,
Element _2_0, Element _2_1, Element _2_2
) {
data[0] = _0_0; data[1] = _0_1; data[2] = _0_2;
data[3] = _1_0; data[4] = _1_1; data[5] = _1_2;
data[6] = _2_0; data[7] = _2_1; data[8] = _2_2;
}
/// Constucts a 3-by-3 matrix from row vectors
CUTLASS_HOST_DEVICE
Matrix(
Matrix<Element, 1, 3> const &row_0,
Matrix<Element, 1, 3> const &row_1,
Matrix<Element, 1, 3> const &row_2
) {
data[0] = row_0.data[0];
data[1] = row_0.data[1];
data[2] = row_0.data[2];
data[3] = row_1.data[0];
data[4] = row_1.data[1];
data[5] = row_1.data[2];
data[6] = row_2.data[0];
data[7] = row_2.data[1];
data[8] = row_2.data[2];
}
/// Static method to construct a 3-by-3 matrix from column vectors
CUTLASS_HOST_DEVICE
static Matrix from_columns(
Matrix<Element, 3, 1> const &column_0,
Matrix<Element, 3, 1> const &column_1,
Matrix<Element, 3, 1> const &column_2
) {
Matrix result;
result.data[0] = column_0.data[0];
result.data[1] = column_1.data[0];
result.data[2] = column_2.data[0];
result.data[3] = column_0.data[1];
result.data[4] = column_1.data[1];
result.data[5] = column_2.data[1];
result.data[6] = column_0.data[2];
result.data[7] = column_1.data[2];
result.data[8] = column_2.data[2];
return result;
}
/// Constructs an identity matrix
CUTLASS_HOST_DEVICE
static Matrix identity() {
Matrix m;
m.data[0] = Element(1);
m.data[4] = Element(1);
m.data[8] = Element(1);
return m;
}
/// Constructs a matrix from a uniform element
CUTLASS_HOST_DEVICE
static Matrix uniform(Element s) {
Matrix m;
m.data[0] = s;
m.data[1] = s;
m.data[2] = s;
m.data[3] = s;
m.data[4] = s;
m.data[5] = s;
m.data[6] = s;
m.data[7] = s;
m.data[8] = s;
return m;
}
/// Constructs a matrix from a uniform element 1
CUTLASS_HOST_DEVICE
static Matrix ones() {
return uniform(Element(1));
}
/// Constructs a matrix from a uniform element 0
CUTLASS_HOST_DEVICE
static Matrix zero() {
return Matrix();
}
/// Constructs a matrix from elements along its diagonal
CUTLASS_HOST_DEVICE
static Matrix from_diagonal(Matrix<Element, 3, 1> const &diag) {
Matrix m;
m.data[0] = diag.data[0];
m.data[4] = diag.data[1];
m.data[8] = diag.data[2];
return m;
}
/// Constructs a matrix from elements along its diagonal
CUTLASS_HOST_DEVICE
static Matrix from_diagonal(Matrix<Element, 1, 3> const &diag) {
Matrix m;
m.data[0] = diag.data[0];
m.data[4] = diag.data[1];
m.data[8] = diag.data[2];
return m;
}
/// Gets an array of diagonal elements
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> diagonal() const {
Matrix<Element, 3, 1> diag;
diag.data[0] = data[0];
diag.data[1] = data[4];
diag.data[2] = data[8];
return diag;
}
/// Returns a transposed matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 3> transpose() const {
Matrix<Element, 3, 3> mt;
mt.data[0] = data[0];
mt.data[3] = data[1];
mt.data[6] = data[2];
mt.data[1] = data[3];
mt.data[4] = data[4];
mt.data[7] = data[5];
mt.data[2] = data[6];
mt.data[5] = data[7];
mt.data[8] = data[8];
return mt;
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(int i, int j) const {
return data[i * 3 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(int i, int j) {
return data[i * 3 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element &at(int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element at(int offset) const {
return data[offset];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element operator[](Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & operator[](Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element & operator[](int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element operator[](int offset) const {
return data[offset];
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 2> slice_1x2(int i = 0, int j = 0) const {
Matrix<Element, 1, 2> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 1];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x2(Matrix<Element, 1, 2> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 1] = m.data[1];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 3> slice_1x3(int i = 0, int j = 0) const {
Matrix<Element, 1, 3> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 1];
m.data[2] = data[i * 3 + j + 2];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x3(Matrix<Element, 1, 3> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 1] = m.data[1];
data[i * 3 + j + 2] = m.data[2];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 3> row(int i) const {
return slice_1x3(i, 0);
}
CUTLASS_HOST_DEVICE
Matrix &set_row(Matrix<Element, 1, 3> const &v, int i = 0) {
return set_slice_1x3(v, i, 0);
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> slice_2x1(int i = 0, int j = 0) const {
Matrix<Element, 2, 1> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 3];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x1(Matrix<Element, 2, 1> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 3] = m.data[1];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 2> slice_2x2(int i = 0, int j = 0) const {
Matrix<Element, 2, 2> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 1];
m.data[2] = data[i * 3 + j + 3];
m.data[3] = data[i * 3 + j + 4];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x2(Matrix<Element, 2, 2> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 1] = m.data[1];
data[i * 3 + j + 3] = m.data[2];
data[i * 3 + j + 4] = m.data[3];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 3> slice_2x3(int i = 0, int j = 0) const {
Matrix<Element, 2, 3> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 1];
m.data[2] = data[i * 3 + j + 2];
m.data[3] = data[i * 3 + j + 3];
m.data[4] = data[i * 3 + j + 4];
m.data[5] = data[i * 3 + j + 5];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x3(Matrix<Element, 2, 3> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 1] = m.data[1];
data[i * 3 + j + 2] = m.data[2];
data[i * 3 + j + 3] = m.data[3];
data[i * 3 + j + 4] = m.data[4];
data[i * 3 + j + 5] = m.data[5];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> slice_3x1(int i = 0, int j = 0) const {
Matrix<Element, 3, 1> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 3];
m.data[2] = data[i * 3 + j + 6];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_3x1(Matrix<Element, 3, 1> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 3] = m.data[1];
data[i * 3 + j + 6] = m.data[2];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> column(int j) const {
return slice_3x1(0, j);
}
CUTLASS_HOST_DEVICE
Matrix &set_column(Matrix<Element, 3, 1> const &v, int j =0) {
return set_slice_3x1(v, 0, j);
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 2> slice_3x2(int i = 0, int j = 0) const {
Matrix<Element, 3, 2> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 1];
m.data[2] = data[i * 3 + j + 3];
m.data[3] = data[i * 3 + j + 4];
m.data[4] = data[i * 3 + j + 6];
m.data[5] = data[i * 3 + j + 7];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_3x2(Matrix<Element, 3, 2> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 1] = m.data[1];
data[i * 3 + j + 3] = m.data[2];
data[i * 3 + j + 4] = m.data[3];
data[i * 3 + j + 6] = m.data[4];
data[i * 3 + j + 7] = m.data[5];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 3> slice_3x3(int i = 0, int j = 0) const {
Matrix<Element, 3, 3> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 1];
m.data[2] = data[i * 3 + j + 2];
m.data[3] = data[i * 3 + j + 3];
m.data[4] = data[i * 3 + j + 4];
m.data[5] = data[i * 3 + j + 5];
m.data[6] = data[i * 3 + j + 6];
m.data[7] = data[i * 3 + j + 7];
m.data[8] = data[i * 3 + j + 8];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_3x3(Matrix<Element, 3, 3> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 1] = m.data[1];
data[i * 3 + j + 2] = m.data[2];
data[i * 3 + j + 3] = m.data[3];
data[i * 3 + j + 4] = m.data[4];
data[i * 3 + j + 5] = m.data[5];
data[i * 3 + j + 6] = m.data[6];
data[i * 3 + j + 7] = m.data[7];
data[i * 3 + j + 8] = m.data[8];
return *this;
}
/// Forms a 3-by-3 matrix by horizontally concatenating a 3-by-1 matrix with a 3-by-2 matrix
CUTLASS_HOST_DEVICE
static Matrix hcat(Matrix<Element, 3, 1> const & lhs, Matrix<Element, 3, 2> const & rhs) {
return Matrix(
lhs.at(0, 0), rhs.at(0, 0), rhs.at(0, 1)
, lhs.at(1, 0), rhs.at(1, 0), rhs.at(1, 1)
, lhs.at(2, 0), rhs.at(2, 0), rhs.at(2, 1));
}
/// Forms a 3-by-3 matrix by horizontally concatenating a 3-by-2 matrix with a 3-by-1 matrix
CUTLASS_HOST_DEVICE
static Matrix hcat(Matrix<Element, 3, 2> const & lhs, Matrix<Element, 3, 1> const & rhs) {
return Matrix(
lhs.at(0, 0), lhs.at(0, 1), rhs.at(0, 0)
, lhs.at(1, 0), lhs.at(1, 1), rhs.at(1, 0)
, lhs.at(2, 0), lhs.at(2, 1), rhs.at(2, 0));
}
/// Concatenates this matrix with a a 3-by-1 matrix to form a 3-by-4 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 4> hcat(Matrix<Element, 3, 1> const & rhs) const {
return Matrix<Element, 3, 4>::hcat(*this, rhs);
}
/// Forms a 3-by-3 matrix by vertically concatenating a 1-by-3 matrix with a 2-by-3 matrix
CUTLASS_HOST_DEVICE
static Matrix vcat(Matrix<Element, 1, 3> const & upper, Matrix<Element, 2, 3> const & lower) {
return Matrix(
upper.at(0, 0), upper.at(0, 1), upper.at(0, 2)
, lower.at(0, 0), lower.at(0, 1), lower.at(0, 2)
, lower.at(1, 0), lower.at(1, 1), lower.at(1, 2));
}
/// Forms a 3-by-3 matrix by vertically concatenating a 2-by-3 matrix with a 1-by-3 matrix
CUTLASS_HOST_DEVICE
static Matrix vcat(Matrix<Element, 2, 3> const & upper, Matrix<Element, 1, 3> const & lower) {
return Matrix(
upper.at(0, 0), upper.at(0, 1), upper.at(0, 2)
, upper.at(1, 0), upper.at(1, 1), upper.at(1, 2)
, lower.at(0, 0), lower.at(0, 1), lower.at(0, 2));
}
/// Concatenates this matrix with a a 1-by-3 matrix to form a 4-by-3 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 3> vcat(Matrix<Element, 1, 3> const & rhs) const {
return Matrix<Element, 4, 3>::vcat(*this, rhs);
}
/// Forms a 3-by-3 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Element A, Matrix<Element, 1, 2> const & B,
Matrix<Element, 2, 1> const & C, Matrix<Element, 2, 2> const & D) {
return Matrix(
A, B.at(0, 0), B.at(0, 1)
, C.at(0, 0), D.at(0, 0), D.at(0, 1)
, C.at(1, 0), D.at(1, 0), D.at(1, 1)
);
}
/// Forms a 3-by-3 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 1, 2> const & A, Element B,
Matrix<Element, 2, 2> const & C, Matrix<Element, 2, 1> const & D) {
return Matrix(
A.at(0, 0), A.at(0, 1), B
, C.at(0, 0), C.at(0, 1), D.at(0, 0)
, C.at(1, 0), C.at(1, 1), D.at(1, 0)
);
}
/// Forms a 3-by-3 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 2, 1> const & A, Matrix<Element, 2, 2> const & B,
Element C, Matrix<Element, 1, 2> const & D) {
return Matrix(
A.at(0, 0), B.at(0, 0), B.at(0, 1)
, A.at(1, 0), B.at(1, 0), B.at(1, 1)
, C, D.at(0, 0), D.at(0, 1)
);
}
/// Forms a 3-by-3 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 2, 2> const & A, Matrix<Element, 2, 1> const & B,
Matrix<Element, 1, 2> const & C, Element D) {
return Matrix(
A.at(0, 0), A.at(0, 1), B.at(0, 0)
, A.at(1, 0), A.at(1, 1), B.at(1, 0)
, C.at(0, 0), C.at(0, 1), D
);
}
/// Elementwise add operator (3-by-3)
CUTLASS_HOST_DEVICE
Matrix add(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] + rhs.data[0];
result.data[1] = data[1] + rhs.data[1];
result.data[2] = data[2] + rhs.data[2];
result.data[3] = data[3] + rhs.data[3];
result.data[4] = data[4] + rhs.data[4];
result.data[5] = data[5] + rhs.data[5];
result.data[6] = data[6] + rhs.data[6];
result.data[7] = data[7] + rhs.data[7];
result.data[8] = data[8] + rhs.data[8];
return result;
}
/// Elementwise add operator (3-by-3)
CUTLASS_HOST_DEVICE
Matrix operator +(Matrix const &rhs) const {
return add(rhs);
}
/// Elementwise add operator (3-by-3)
CUTLASS_HOST_DEVICE
Matrix & operator +=(Matrix const &rhs) {
data[0] += rhs.data[0];
data[1] += rhs.data[1];
data[2] += rhs.data[2];
data[3] += rhs.data[3];
data[4] += rhs.data[4];
data[5] += rhs.data[5];
data[6] += rhs.data[6];
data[7] += rhs.data[7];
data[8] += rhs.data[8];
return *this;
}
/// Elementwise subtract operator (3-by-3)
CUTLASS_HOST_DEVICE
Matrix subtract(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] - rhs.data[0];
result.data[1] = data[1] - rhs.data[1];
result.data[2] = data[2] - rhs.data[2];
result.data[3] = data[3] - rhs.data[3];
result.data[4] = data[4] - rhs.data[4];
result.data[5] = data[5] - rhs.data[5];
result.data[6] = data[6] - rhs.data[6];
result.data[7] = data[7] - rhs.data[7];
result.data[8] = data[8] - rhs.data[8];
return result;
}
/// Elementwise subtract operator (3-by-3)
CUTLASS_HOST_DEVICE
Matrix operator -(Matrix const &rhs) const {
return subtract(rhs);
}
/// Elementwise subtract operator (3-by-3)
CUTLASS_HOST_DEVICE
Matrix & operator -=(Matrix const &rhs) {
data[0] -= rhs.data[0];
data[1] -= rhs.data[1];
data[2] -= rhs.data[2];
data[3] -= rhs.data[3];
data[4] -= rhs.data[4];
data[5] -= rhs.data[5];
data[6] -= rhs.data[6];
data[7] -= rhs.data[7];
data[8] -= rhs.data[8];
return *this;
}
/// Elementwise multiply operator (3-by-3)
CUTLASS_HOST_DEVICE
Matrix multiply(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] * rhs.data[0];
result.data[1] = data[1] * rhs.data[1];
result.data[2] = data[2] * rhs.data[2];
result.data[3] = data[3] * rhs.data[3];
result.data[4] = data[4] * rhs.data[4];
result.data[5] = data[5] * rhs.data[5];
result.data[6] = data[6] * rhs.data[6];
result.data[7] = data[7] * rhs.data[7];
result.data[8] = data[8] * rhs.data[8];
return result;
}
/// Scalar multiply operator (3-by-3)
CUTLASS_HOST_DEVICE
Matrix multiply(Element const &s) const {
Matrix result;
result.data[0] = data[0] * s;
result.data[1] = data[1] * s;
result.data[2] = data[2] * s;
result.data[3] = data[3] * s;
result.data[4] = data[4] * s;
result.data[5] = data[5] * s;
result.data[6] = data[6] * s;
result.data[7] = data[7] * s;
result.data[8] = data[8] * s;
return result;
}
/// Scalar multiply operator (3-by-3)
CUTLASS_HOST_DEVICE
Matrix operator *(Element const &s) const {
return multiply(s);
}
/// Scalar multiply operator (3-by-3)
CUTLASS_HOST_DEVICE
Matrix & operator *=(Element const &s) {
data[0] *= s;
data[1] *= s;
data[2] *= s;
data[3] *= s;
data[4] *= s;
data[5] *= s;
data[6] *= s;
data[7] *= s;
data[8] *= s;
return *this;
}
/// Elementwise divide operator (3-by-3)
CUTLASS_HOST_DEVICE
Matrix divide(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] / rhs.data[0];
result.data[1] = data[1] / rhs.data[1];
result.data[2] = data[2] / rhs.data[2];
result.data[3] = data[3] / rhs.data[3];
result.data[4] = data[4] / rhs.data[4];
result.data[5] = data[5] / rhs.data[5];
result.data[6] = data[6] / rhs.data[6];
result.data[7] = data[7] / rhs.data[7];
result.data[8] = data[8] / rhs.data[8];
return result;
}
/// Scalar divide operator (3-by-3)
CUTLASS_HOST_DEVICE
Matrix divide(Element const &s) const {
Matrix result;
result.data[0] = data[0] / s;
result.data[1] = data[1] / s;
result.data[2] = data[2] / s;
result.data[3] = data[3] / s;
result.data[4] = data[4] / s;
result.data[5] = data[5] / s;
result.data[6] = data[6] / s;
result.data[7] = data[7] / s;
result.data[8] = data[8] / s;
return result;
}
/// Scalar divide operator (3-by-3)
CUTLASS_HOST_DEVICE
Matrix operator /(Element const &s) const {
return divide(s);
}
/// Scalar divide operator (3-by-3)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Element const &s) {
data[0] /= s;
data[1] /= s;
data[2] /= s;
data[3] /= s;
data[4] /= s;
data[5] /= s;
data[6] /= s;
data[7] /= s;
data[8] /= s;
return *this;
}
/// Elementwise divide operator (3-by-3)
CUTLASS_HOST_DEVICE
Matrix operator /(Matrix const &rhs) const {
return divide(rhs);
}
/// Elementwise divide operator (3-by-3)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Matrix const &rhs) {
data[0] /= rhs.data[0];
data[1] /= rhs.data[1];
data[2] /= rhs.data[2];
data[3] /= rhs.data[3];
data[4] /= rhs.data[4];
data[5] /= rhs.data[5];
data[6] /= rhs.data[6];
data[7] /= rhs.data[7];
data[8] /= rhs.data[8];
return *this;
}
/// Negates each element of the matrix
CUTLASS_HOST_DEVICE
Matrix operator-() const {
Matrix m;
m.data[0] = -m.data[0];
m.data[1] = -m.data[1];
m.data[2] = -m.data[2];
m.data[3] = -m.data[3];
m.data[4] = -m.data[4];
m.data[5] = -m.data[5];
m.data[6] = -m.data[6];
m.data[7] = -m.data[7];
m.data[8] = -m.data[8];
return m;
}
/// Matrix product of size 3-by-1-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> product(
Matrix<Element, 3, 1> const &rhs,
Matrix<Element, 3, 1> accum = Matrix<Element, 3, 1>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[3] * rhs.data[0];
accum.data[2] += data[6] * rhs.data[0];
// k=1
accum.data[0] += data[1] * rhs.data[1];
accum.data[1] += data[4] * rhs.data[1];
accum.data[2] += data[7] * rhs.data[1];
// k=2
accum.data[0] += data[2] * rhs.data[2];
accum.data[1] += data[5] * rhs.data[2];
accum.data[2] += data[8] * rhs.data[2];
return accum;
}
/// Matrix product of size 3-by-1-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> operator*(Matrix<Element, 3, 1> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 3-by-2-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 2> product(
Matrix<Element, 3, 2> const &rhs,
Matrix<Element, 3, 2> accum = Matrix<Element, 3, 2>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[3] * rhs.data[0];
accum.data[3] += data[3] * rhs.data[1];
accum.data[4] += data[6] * rhs.data[0];
accum.data[5] += data[6] * rhs.data[1];
// k=1
accum.data[0] += data[1] * rhs.data[2];
accum.data[1] += data[1] * rhs.data[3];
accum.data[2] += data[4] * rhs.data[2];
accum.data[3] += data[4] * rhs.data[3];
accum.data[4] += data[7] * rhs.data[2];
accum.data[5] += data[7] * rhs.data[3];
// k=2
accum.data[0] += data[2] * rhs.data[4];
accum.data[1] += data[2] * rhs.data[5];
accum.data[2] += data[5] * rhs.data[4];
accum.data[3] += data[5] * rhs.data[5];
accum.data[4] += data[8] * rhs.data[4];
accum.data[5] += data[8] * rhs.data[5];
return accum;
}
/// Matrix product of size 3-by-2-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 2> operator*(Matrix<Element, 3, 2> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 3-by-3-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 3> product(
Matrix<Element, 3, 3> const &rhs,
Matrix<Element, 3, 3> accum = Matrix<Element, 3, 3>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[3] * rhs.data[0];
accum.data[4] += data[3] * rhs.data[1];
accum.data[5] += data[3] * rhs.data[2];
accum.data[6] += data[6] * rhs.data[0];
accum.data[7] += data[6] * rhs.data[1];
accum.data[8] += data[6] * rhs.data[2];
// k=1
accum.data[0] += data[1] * rhs.data[3];
accum.data[1] += data[1] * rhs.data[4];
accum.data[2] += data[1] * rhs.data[5];
accum.data[3] += data[4] * rhs.data[3];
accum.data[4] += data[4] * rhs.data[4];
accum.data[5] += data[4] * rhs.data[5];
accum.data[6] += data[7] * rhs.data[3];
accum.data[7] += data[7] * rhs.data[4];
accum.data[8] += data[7] * rhs.data[5];
// k=2
accum.data[0] += data[2] * rhs.data[6];
accum.data[1] += data[2] * rhs.data[7];
accum.data[2] += data[2] * rhs.data[8];
accum.data[3] += data[5] * rhs.data[6];
accum.data[4] += data[5] * rhs.data[7];
accum.data[5] += data[5] * rhs.data[8];
accum.data[6] += data[8] * rhs.data[6];
accum.data[7] += data[8] * rhs.data[7];
accum.data[8] += data[8] * rhs.data[8];
return accum;
}
/// Matrix product of size 3-by-3-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 3> operator*(Matrix<Element, 3, 3> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 3-by-3-by-3
CUTLASS_HOST_DEVICE
Matrix & operator*=(Matrix<Element, 3, 3> const &rhs) {
*this = product(rhs);
return *this;
}
/// Matrix product of size 3-by-4-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 4> product(
Matrix<Element, 3, 4> const &rhs,
Matrix<Element, 3, 4> accum = Matrix<Element, 3, 4>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[0] * rhs.data[3];
accum.data[4] += data[3] * rhs.data[0];
accum.data[5] += data[3] * rhs.data[1];
accum.data[6] += data[3] * rhs.data[2];
accum.data[7] += data[3] * rhs.data[3];
accum.data[8] += data[6] * rhs.data[0];
accum.data[9] += data[6] * rhs.data[1];
accum.data[10] += data[6] * rhs.data[2];
accum.data[11] += data[6] * rhs.data[3];
// k=1
accum.data[0] += data[1] * rhs.data[4];
accum.data[1] += data[1] * rhs.data[5];
accum.data[2] += data[1] * rhs.data[6];
accum.data[3] += data[1] * rhs.data[7];
accum.data[4] += data[4] * rhs.data[4];
accum.data[5] += data[4] * rhs.data[5];
accum.data[6] += data[4] * rhs.data[6];
accum.data[7] += data[4] * rhs.data[7];
accum.data[8] += data[7] * rhs.data[4];
accum.data[9] += data[7] * rhs.data[5];
accum.data[10] += data[7] * rhs.data[6];
accum.data[11] += data[7] * rhs.data[7];
// k=2
accum.data[0] += data[2] * rhs.data[8];
accum.data[1] += data[2] * rhs.data[9];
accum.data[2] += data[2] * rhs.data[10];
accum.data[3] += data[2] * rhs.data[11];
accum.data[4] += data[5] * rhs.data[8];
accum.data[5] += data[5] * rhs.data[9];
accum.data[6] += data[5] * rhs.data[10];
accum.data[7] += data[5] * rhs.data[11];
accum.data[8] += data[8] * rhs.data[8];
accum.data[9] += data[8] * rhs.data[9];
accum.data[10] += data[8] * rhs.data[10];
accum.data[11] += data[8] * rhs.data[11];
return accum;
}
/// Matrix product of size 3-by-4-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 4> operator*(Matrix<Element, 3, 4> const &rhs) const {
return product(rhs);
}
/// Returns the sum of elements
CUTLASS_HOST_DEVICE
Element sum(Element accum = Element()) const {
accum += data[0];
accum += data[1];
accum += data[2];
accum += data[3];
accum += data[4];
accum += data[5];
accum += data[6];
accum += data[7];
accum += data[8];
return accum;
}
/// Returns the sum of squared elements
CUTLASS_HOST_DEVICE
Element norm(Element accum = Element()) const {
accum += data[0] * data[0];
accum += data[1] * data[1];
accum += data[2] * data[2];
accum += data[3] * data[3];
accum += data[4] * data[4];
accum += data[5] * data[5];
accum += data[6] * data[6];
accum += data[7] * data[7];
accum += data[8] * data[8];
return accum;
}
/// Returns square root of the norm
CUTLASS_HOST_DEVICE
Element magnitude() const {
return fast_sqrt(norm());
}
/// Returns the sum of diagonal elements
CUTLASS_HOST_DEVICE
Element trace(Element accum = Element()) const {
accum += data[0];
accum += data[4];
accum += data[8];
return accum;
}
/// Returns 3-by-3 rotation matrix around the X axis
CUTLASS_HOST_DEVICE
static Matrix rotation_X(Element theta) {
Matrix m = identity();
Element c = fast_cos(theta);
Element s = fast_sin(theta);
m.at(1, 1) = c;
m.at(1, 2) = -s;
m.at(2, 1) = s;
m.at(2, 2) = c;
return m;
}
/// Returns 3-by-3 rotation matrix around the Y axis
CUTLASS_HOST_DEVICE
static Matrix rotation_Y(Element theta) {
Matrix m = identity();
Element c = fast_cos(theta);
Element s = fast_sin(theta);
m.at(0, 0) = c;
m.at(2, 0) = -s;
m.at(0, 2) = s;
m.at(2, 2) = c;
return m;
}
/// Returns 3-by-3 rotation matrix around the Z axis
CUTLASS_HOST_DEVICE
static Matrix rotation_Z(Element theta) {
Matrix m = Matrix::identity();
Element c = fast_cos(theta);
Element s = fast_sin(theta);
m.at(0, 0) = c;
m.at(0, 1) = -s;
m.at(1, 0) = s;
m.at(1, 1) = c;
return m;
}
/// Returns a 3-by-3 rotation matrix around a unit-length axis
CUTLASS_HOST_DEVICE
static Matrix rotation(Element theta, Matrix<Element, 3, 1> const &u) {
Element x = u.data[0];
Element y = u.data[1];
Element z = u.data[2];
Element c = fast_cos(theta);
Element s = fast_sin(theta);
Element one_minus_cos = Element(1) - fast_cos(theta);
Matrix m;
m.set_slice3x3({
c + x * x * one_minus_cos, x * y * one_minus_cos - z * s, x * z * one_minus_cos + y * s,
y * x * one_minus_cos * z * s, c + y * y * one_minus_cos, y * z * one_minus_cos - x * s,
z * x * one_minus_cos - y * s, z * y * one_minus_cos + x * s, c + z * z * one_minus_cos
});
return m;
}
/// Returns a 3-by-3 reflection about the plane specified by the
/// unit-length normal vector n_unit
CUTLASS_HOST_DEVICE
static Matrix reflection(Matrix<Element, 3, 1> const &n_unit) {
Element a = n_unit.data[0];
Element b = n_unit.data[1];
Element c = n_unit.data[2];
Matrix m = Matrix::identity();
m.set_slice3x3({
Element(1) - Element(2) * a * a, Element(-2) * a * b, Element(-2) * a * c,
Element(-2) * a * b, Element(1) - Element(2) * b * b, Element(-2) * b * c,
Element(-2) * a * c, Element(-2) * b * c, Element(1) - Element(2) * c * c
});
return m;
}
/// Computes the determinant of a 3-by-3 matrix
CUTLASS_HOST_DEVICE
Element determinant(Element accum = Element()) const {
accum += at(0, 0) * Matrix<Element, 2, 2>({ at(1, 1), at(1, 2), at(2, 1), at(2, 2) }).determinant();
accum -= at(0, 1) * Matrix<Element, 2, 2>({ at(1, 0), at(1, 2), at(2, 0), at(2, 2) }).determinant();
accum += at(0, 2) * Matrix<Element, 2, 2>({ at(1, 0), at(1, 1), at(2, 0), at(2, 1) }).determinant();
return accum;
}
/// Computes the inverse of a 3-by-3 matrix given
/// the matrix's determinant
CUTLASS_HOST_DEVICE
Matrix inverse(Element det) const {
return Matrix(
at(1, 1) * at(2, 2) - at(1, 2) * at(2, 1),
at(0, 2) * at(2, 1) - at(0, 1) * at(2, 2),
at(0, 1) * at(1, 2) - at(0, 2) * at(1, 1),
at(1, 2) * at(2, 0) - at(1, 0) * at(2, 2),
at(0, 0) * at(2, 2) - at(0, 2) * at(2, 0),
at(0, 2) * at(1, 0) - at(0, 0) * at(1, 2),
at(1, 0) * at(2, 1) - at(1, 1) * at(2, 0),
at(0, 1) * at(2, 0) - at(0, 0) * at(2, 1),
at(0, 0) * at(1, 1) - at(0, 1) * at(1, 0)
) * (Element(1) / det);
}
/// Computes the inverse of a 3-by-3 matrix
CUTLASS_HOST_DEVICE
Matrix inverse() const {
return inverse(determinant());
}
};
/// Template alias for 3-by-3 matrix
template <typename Element>
using Matrix3x3 = Matrix<Element, 3, 3>;
/// Free funciton to infer element type from template arguments
template <typename Element>
CUTLASS_HOST_DEVICE Matrix3x3<Element> make_Matrix3x3(
Element _0_0, Element _0_1, Element _0_2,
Element _1_0, Element _1_1, Element _1_2,
Element _2_0, Element _2_1, Element _2_2
) {
return Matrix3x3<Element>(
_0_0, _0_1, _0_2,
_1_0, _1_1, _1_2,
_2_0, _2_1, _2_2
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// 3-by-4 matrix template class definition
template <typename Element_>
struct Matrix<Element_, 3, 4> {
//
// Type definitions
//
/// Element data type
using Element = Element_;
/// Number of rows in matrix
static int const kRows = 3;
/// Number of columns in matrix
static int const kColumns = 4;
/// Layout of matrix in underlying array
using Layout = layout::RowMajor;
/// Number of elements in matrix
static int const kCount = 12;
//
// Data members
//
/// Elements of the matrix in row-major layout
Array<Element, kCount> data;
//
// Methods
//
/// Constructs a zero matrix
CUTLASS_HOST_DEVICE
Matrix() {
data.clear();
}
/// Copy constructor for a 3-by-4 matrix
CUTLASS_HOST_DEVICE
Matrix(Matrix const &rhs) {
data = rhs.data;
}
/// Constucts a 3-by-4 matrix from scalar elements
CUTLASS_HOST_DEVICE
Matrix(
Element _0_0, Element _0_1, Element _0_2, Element _0_3,
Element _1_0, Element _1_1, Element _1_2, Element _1_3,
Element _2_0, Element _2_1, Element _2_2, Element _2_3
) {
data[0] = _0_0; data[1] = _0_1; data[2] = _0_2; data[3] = _0_3;
data[4] = _1_0; data[5] = _1_1; data[6] = _1_2; data[7] = _1_3;
data[8] = _2_0; data[9] = _2_1; data[10] = _2_2; data[11] = _2_3;
}
/// Constucts a 3-by-4 matrix from row vectors
CUTLASS_HOST_DEVICE
Matrix(
Matrix<Element, 1, 4> const &row_0,
Matrix<Element, 1, 4> const &row_1,
Matrix<Element, 1, 4> const &row_2
) {
data[0] = row_0.data[0];
data[1] = row_0.data[1];
data[2] = row_0.data[2];
data[3] = row_0.data[3];
data[4] = row_1.data[0];
data[5] = row_1.data[1];
data[6] = row_1.data[2];
data[7] = row_1.data[3];
data[8] = row_2.data[0];
data[9] = row_2.data[1];
data[10] = row_2.data[2];
data[11] = row_2.data[3];
}
/// Static method to construct a 3-by-4 matrix from column vectors
CUTLASS_HOST_DEVICE
static Matrix from_columns(
Matrix<Element, 4, 1> const &column_0,
Matrix<Element, 4, 1> const &column_1,
Matrix<Element, 4, 1> const &column_2,
Matrix<Element, 4, 1> const &column_3
) {
Matrix result;
result.data[0] = column_0.data[0];
result.data[1] = column_1.data[0];
result.data[2] = column_2.data[0];
result.data[3] = column_3.data[0];
result.data[4] = column_0.data[1];
result.data[5] = column_1.data[1];
result.data[6] = column_2.data[1];
result.data[7] = column_3.data[1];
result.data[8] = column_0.data[2];
result.data[9] = column_1.data[2];
result.data[10] = column_2.data[2];
result.data[11] = column_3.data[2];
return result;
}
/// Constructs a matrix from a uniform element
CUTLASS_HOST_DEVICE
static Matrix uniform(Element s) {
Matrix m;
m.data[0] = s;
m.data[1] = s;
m.data[2] = s;
m.data[3] = s;
m.data[4] = s;
m.data[5] = s;
m.data[6] = s;
m.data[7] = s;
m.data[8] = s;
m.data[9] = s;
m.data[10] = s;
m.data[11] = s;
return m;
}
/// Constructs a matrix from a uniform element 1
CUTLASS_HOST_DEVICE
static Matrix ones() {
return uniform(Element(1));
}
/// Constructs a matrix from a uniform element 0
CUTLASS_HOST_DEVICE
static Matrix zero() {
return Matrix();
}
/// Constructs a matrix from elements along its diagonal
CUTLASS_HOST_DEVICE
static Matrix from_diagonal(Matrix<Element, 3, 1> const &diag) {
Matrix m;
m.data[0] = diag.data[0];
m.data[4] = diag.data[1];
m.data[8] = diag.data[2];
return m;
}
/// Constructs a matrix from elements along its diagonal
CUTLASS_HOST_DEVICE
static Matrix from_diagonal(Matrix<Element, 1, 3> const &diag) {
Matrix m;
m.data[0] = diag.data[0];
m.data[4] = diag.data[1];
m.data[8] = diag.data[2];
return m;
}
/// Gets an array of diagonal elements
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> diagonal() const {
Matrix<Element, 3, 1> diag;
diag.data[0] = data[0];
diag.data[1] = data[4];
diag.data[2] = data[8];
return diag;
}
/// Returns a transposed matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 3> transpose() const {
Matrix<Element, 4, 3> mt;
mt.data[0] = data[0];
mt.data[3] = data[1];
mt.data[6] = data[2];
mt.data[9] = data[3];
mt.data[1] = data[4];
mt.data[4] = data[5];
mt.data[7] = data[6];
mt.data[10] = data[7];
mt.data[2] = data[8];
mt.data[5] = data[9];
mt.data[8] = data[10];
mt.data[11] = data[11];
return mt;
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(int i, int j) const {
return data[i * 3 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(int i, int j) {
return data[i * 3 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element &at(int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element at(int offset) const {
return data[offset];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element operator[](Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & operator[](Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element & operator[](int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element operator[](int offset) const {
return data[offset];
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 2> slice_1x2(int i = 0, int j = 0) const {
Matrix<Element, 1, 2> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x2(Matrix<Element, 1, 2> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 3> slice_1x3(int i = 0, int j = 0) const {
Matrix<Element, 1, 3> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 2];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x3(Matrix<Element, 1, 3> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 2] = m.data[2];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 4> slice_1x4(int i = 0, int j = 0) const {
Matrix<Element, 1, 4> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 2];
m.data[3] = data[i * 4 + j + 3];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x4(Matrix<Element, 1, 4> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 2] = m.data[2];
data[i * 4 + j + 3] = m.data[3];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 4> row(int i) const {
return slice_1x4(i, 0);
}
CUTLASS_HOST_DEVICE
Matrix &set_row(Matrix<Element, 1, 4> const &v, int i = 0) {
return set_slice_1x4(v, i, 0);
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> slice_2x1(int i = 0, int j = 0) const {
Matrix<Element, 2, 1> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 4];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x1(Matrix<Element, 2, 1> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 4] = m.data[1];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 2> slice_2x2(int i = 0, int j = 0) const {
Matrix<Element, 2, 2> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 4];
m.data[3] = data[i * 4 + j + 5];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x2(Matrix<Element, 2, 2> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 4] = m.data[2];
data[i * 4 + j + 5] = m.data[3];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 3> slice_2x3(int i = 0, int j = 0) const {
Matrix<Element, 2, 3> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 2];
m.data[3] = data[i * 4 + j + 4];
m.data[4] = data[i * 4 + j + 5];
m.data[5] = data[i * 4 + j + 6];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x3(Matrix<Element, 2, 3> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 2] = m.data[2];
data[i * 4 + j + 4] = m.data[3];
data[i * 4 + j + 5] = m.data[4];
data[i * 4 + j + 6] = m.data[5];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 4> slice_2x4(int i = 0, int j = 0) const {
Matrix<Element, 2, 4> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 2];
m.data[3] = data[i * 4 + j + 3];
m.data[4] = data[i * 4 + j + 4];
m.data[5] = data[i * 4 + j + 5];
m.data[6] = data[i * 4 + j + 6];
m.data[7] = data[i * 4 + j + 7];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x4(Matrix<Element, 2, 4> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 2] = m.data[2];
data[i * 4 + j + 3] = m.data[3];
data[i * 4 + j + 4] = m.data[4];
data[i * 4 + j + 5] = m.data[5];
data[i * 4 + j + 6] = m.data[6];
data[i * 4 + j + 7] = m.data[7];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> slice_3x1(int i = 0, int j = 0) const {
Matrix<Element, 3, 1> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 4];
m.data[2] = data[i * 4 + j + 8];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_3x1(Matrix<Element, 3, 1> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 4] = m.data[1];
data[i * 4 + j + 8] = m.data[2];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> column(int j) const {
return slice_3x1(0, j);
}
CUTLASS_HOST_DEVICE
Matrix &set_column(Matrix<Element, 3, 1> const &v, int j =0) {
return set_slice_3x1(v, 0, j);
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 2> slice_3x2(int i = 0, int j = 0) const {
Matrix<Element, 3, 2> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 4];
m.data[3] = data[i * 4 + j + 5];
m.data[4] = data[i * 4 + j + 8];
m.data[5] = data[i * 4 + j + 9];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_3x2(Matrix<Element, 3, 2> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 4] = m.data[2];
data[i * 4 + j + 5] = m.data[3];
data[i * 4 + j + 8] = m.data[4];
data[i * 4 + j + 9] = m.data[5];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 3> slice_3x3(int i = 0, int j = 0) const {
Matrix<Element, 3, 3> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 2];
m.data[3] = data[i * 4 + j + 4];
m.data[4] = data[i * 4 + j + 5];
m.data[5] = data[i * 4 + j + 6];
m.data[6] = data[i * 4 + j + 8];
m.data[7] = data[i * 4 + j + 9];
m.data[8] = data[i * 4 + j + 10];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_3x3(Matrix<Element, 3, 3> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 2] = m.data[2];
data[i * 4 + j + 4] = m.data[3];
data[i * 4 + j + 5] = m.data[4];
data[i * 4 + j + 6] = m.data[5];
data[i * 4 + j + 8] = m.data[6];
data[i * 4 + j + 9] = m.data[7];
data[i * 4 + j + 10] = m.data[8];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 4> slice_3x4(int i = 0, int j = 0) const {
Matrix<Element, 3, 4> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 2];
m.data[3] = data[i * 4 + j + 3];
m.data[4] = data[i * 4 + j + 4];
m.data[5] = data[i * 4 + j + 5];
m.data[6] = data[i * 4 + j + 6];
m.data[7] = data[i * 4 + j + 7];
m.data[8] = data[i * 4 + j + 8];
m.data[9] = data[i * 4 + j + 9];
m.data[10] = data[i * 4 + j + 10];
m.data[11] = data[i * 4 + j + 11];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_3x4(Matrix<Element, 3, 4> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 2] = m.data[2];
data[i * 4 + j + 3] = m.data[3];
data[i * 4 + j + 4] = m.data[4];
data[i * 4 + j + 5] = m.data[5];
data[i * 4 + j + 6] = m.data[6];
data[i * 4 + j + 7] = m.data[7];
data[i * 4 + j + 8] = m.data[8];
data[i * 4 + j + 9] = m.data[9];
data[i * 4 + j + 10] = m.data[10];
data[i * 4 + j + 11] = m.data[11];
return *this;
}
/// Forms a 3-by-4 matrix by horizontally concatenating a 3-by-1 matrix with a 3-by-3 matrix
CUTLASS_HOST_DEVICE
static Matrix hcat(Matrix<Element, 3, 1> const & lhs, Matrix<Element, 3, 3> const & rhs) {
return Matrix(
lhs.at(0, 0), rhs.at(0, 0), rhs.at(0, 1), rhs.at(0, 2)
, lhs.at(1, 0), rhs.at(1, 0), rhs.at(1, 1), rhs.at(1, 2)
, lhs.at(2, 0), rhs.at(2, 0), rhs.at(2, 1), rhs.at(2, 2));
}
/// Forms a 3-by-4 matrix by horizontally concatenating a 3-by-2 matrix with a 3-by-2 matrix
CUTLASS_HOST_DEVICE
static Matrix hcat(Matrix<Element, 3, 2> const & lhs, Matrix<Element, 3, 2> const & rhs) {
return Matrix(
lhs.at(0, 0), lhs.at(0, 1), rhs.at(0, 0), rhs.at(0, 1)
, lhs.at(1, 0), lhs.at(1, 1), rhs.at(1, 0), rhs.at(1, 1)
, lhs.at(2, 0), lhs.at(2, 1), rhs.at(2, 0), rhs.at(2, 1));
}
/// Forms a 3-by-4 matrix by horizontally concatenating a 3-by-3 matrix with a 3-by-1 matrix
CUTLASS_HOST_DEVICE
static Matrix hcat(Matrix<Element, 3, 3> const & lhs, Matrix<Element, 3, 1> const & rhs) {
return Matrix(
lhs.at(0, 0), lhs.at(0, 1), lhs.at(0, 2), rhs.at(0, 0)
, lhs.at(1, 0), lhs.at(1, 1), lhs.at(1, 2), rhs.at(1, 0)
, lhs.at(2, 0), lhs.at(2, 1), lhs.at(2, 2), rhs.at(2, 0));
}
/// Forms a 3-by-4 matrix by vertically concatenating a 1-by-4 matrix with a 2-by-4 matrix
CUTLASS_HOST_DEVICE
static Matrix vcat(Matrix<Element, 1, 4> const & upper, Matrix<Element, 2, 4> const & lower) {
return Matrix(
upper.at(0, 0), upper.at(0, 1), upper.at(0, 2), upper.at(0, 3)
, lower.at(0, 0), lower.at(0, 1), lower.at(0, 2), lower.at(0, 3)
, lower.at(1, 0), lower.at(1, 1), lower.at(1, 2), lower.at(1, 3));
}
/// Forms a 3-by-4 matrix by vertically concatenating a 2-by-4 matrix with a 1-by-4 matrix
CUTLASS_HOST_DEVICE
static Matrix vcat(Matrix<Element, 2, 4> const & upper, Matrix<Element, 1, 4> const & lower) {
return Matrix(
upper.at(0, 0), upper.at(0, 1), upper.at(0, 2), upper.at(0, 3)
, upper.at(1, 0), upper.at(1, 1), upper.at(1, 2), upper.at(1, 3)
, lower.at(0, 0), lower.at(0, 1), lower.at(0, 2), lower.at(0, 3));
}
/// Concatenates this matrix with a a 1-by-4 matrix to form a 4-by-4 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 4> vcat(Matrix<Element, 1, 4> const & rhs) const {
return Matrix<Element, 4, 4>::vcat(*this, rhs);
}
/// Forms a 3-by-4 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Element A, Matrix<Element, 1, 3> const & B,
Matrix<Element, 2, 1> const & C, Matrix<Element, 2, 3> const & D) {
return Matrix(
A, B.at(0, 0), B.at(0, 1), B.at(0, 2)
, C.at(0, 0), D.at(0, 0), D.at(0, 1), D.at(0, 2)
, C.at(1, 0), D.at(1, 0), D.at(1, 1), D.at(1, 2)
);
}
/// Forms a 3-by-4 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 1, 2> const & A, Matrix<Element, 1, 2> const & B,
Matrix<Element, 2, 2> const & C, Matrix<Element, 2, 2> const & D) {
return Matrix(
A.at(0, 0), A.at(0, 1), B.at(0, 0), B.at(0, 1)
, C.at(0, 0), C.at(0, 1), D.at(0, 0), D.at(0, 1)
, C.at(1, 0), C.at(1, 1), D.at(1, 0), D.at(1, 1)
);
}
/// Forms a 3-by-4 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 1, 3> const & A, Element B,
Matrix<Element, 2, 3> const & C, Matrix<Element, 2, 1> const & D) {
return Matrix(
A.at(0, 0), A.at(0, 1), A.at(0, 2), B
, C.at(0, 0), C.at(0, 1), C.at(0, 2), D.at(0, 0)
, C.at(1, 0), C.at(1, 1), C.at(1, 2), D.at(1, 0)
);
}
/// Forms a 3-by-4 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 2, 1> const & A, Matrix<Element, 2, 3> const & B,
Element C, Matrix<Element, 1, 3> const & D) {
return Matrix(
A.at(0, 0), B.at(0, 0), B.at(0, 1), B.at(0, 2)
, A.at(1, 0), B.at(1, 0), B.at(1, 1), B.at(1, 2)
, C, D.at(0, 0), D.at(0, 1), D.at(0, 2)
);
}
/// Forms a 3-by-4 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 2, 2> const & A, Matrix<Element, 2, 2> const & B,
Matrix<Element, 1, 2> const & C, Matrix<Element, 1, 2> const & D) {
return Matrix(
A.at(0, 0), A.at(0, 1), B.at(0, 0), B.at(0, 1)
, A.at(1, 0), A.at(1, 1), B.at(1, 0), B.at(1, 1)
, C.at(0, 0), C.at(0, 1), D.at(0, 0), D.at(0, 1)
);
}
/// Forms a 3-by-4 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 2, 3> const & A, Matrix<Element, 2, 1> const & B,
Matrix<Element, 1, 3> const & C, Element D) {
return Matrix(
A.at(0, 0), A.at(0, 1), A.at(0, 2), B.at(0, 0)
, A.at(1, 0), A.at(1, 1), A.at(1, 2), B.at(1, 0)
, C.at(0, 0), C.at(0, 1), C.at(0, 2), D
);
}
/// Elementwise add operator (3-by-4)
CUTLASS_HOST_DEVICE
Matrix add(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] + rhs.data[0];
result.data[1] = data[1] + rhs.data[1];
result.data[2] = data[2] + rhs.data[2];
result.data[3] = data[3] + rhs.data[3];
result.data[4] = data[4] + rhs.data[4];
result.data[5] = data[5] + rhs.data[5];
result.data[6] = data[6] + rhs.data[6];
result.data[7] = data[7] + rhs.data[7];
result.data[8] = data[8] + rhs.data[8];
result.data[9] = data[9] + rhs.data[9];
result.data[10] = data[10] + rhs.data[10];
result.data[11] = data[11] + rhs.data[11];
return result;
}
/// Elementwise add operator (3-by-4)
CUTLASS_HOST_DEVICE
Matrix operator +(Matrix const &rhs) const {
return add(rhs);
}
/// Elementwise add operator (3-by-4)
CUTLASS_HOST_DEVICE
Matrix & operator +=(Matrix const &rhs) {
data[0] += rhs.data[0];
data[1] += rhs.data[1];
data[2] += rhs.data[2];
data[3] += rhs.data[3];
data[4] += rhs.data[4];
data[5] += rhs.data[5];
data[6] += rhs.data[6];
data[7] += rhs.data[7];
data[8] += rhs.data[8];
data[9] += rhs.data[9];
data[10] += rhs.data[10];
data[11] += rhs.data[11];
return *this;
}
/// Elementwise subtract operator (3-by-4)
CUTLASS_HOST_DEVICE
Matrix subtract(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] - rhs.data[0];
result.data[1] = data[1] - rhs.data[1];
result.data[2] = data[2] - rhs.data[2];
result.data[3] = data[3] - rhs.data[3];
result.data[4] = data[4] - rhs.data[4];
result.data[5] = data[5] - rhs.data[5];
result.data[6] = data[6] - rhs.data[6];
result.data[7] = data[7] - rhs.data[7];
result.data[8] = data[8] - rhs.data[8];
result.data[9] = data[9] - rhs.data[9];
result.data[10] = data[10] - rhs.data[10];
result.data[11] = data[11] - rhs.data[11];
return result;
}
/// Elementwise subtract operator (3-by-4)
CUTLASS_HOST_DEVICE
Matrix operator -(Matrix const &rhs) const {
return subtract(rhs);
}
/// Elementwise subtract operator (3-by-4)
CUTLASS_HOST_DEVICE
Matrix & operator -=(Matrix const &rhs) {
data[0] -= rhs.data[0];
data[1] -= rhs.data[1];
data[2] -= rhs.data[2];
data[3] -= rhs.data[3];
data[4] -= rhs.data[4];
data[5] -= rhs.data[5];
data[6] -= rhs.data[6];
data[7] -= rhs.data[7];
data[8] -= rhs.data[8];
data[9] -= rhs.data[9];
data[10] -= rhs.data[10];
data[11] -= rhs.data[11];
return *this;
}
/// Elementwise multiply operator (3-by-4)
CUTLASS_HOST_DEVICE
Matrix multiply(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] * rhs.data[0];
result.data[1] = data[1] * rhs.data[1];
result.data[2] = data[2] * rhs.data[2];
result.data[3] = data[3] * rhs.data[3];
result.data[4] = data[4] * rhs.data[4];
result.data[5] = data[5] * rhs.data[5];
result.data[6] = data[6] * rhs.data[6];
result.data[7] = data[7] * rhs.data[7];
result.data[8] = data[8] * rhs.data[8];
result.data[9] = data[9] * rhs.data[9];
result.data[10] = data[10] * rhs.data[10];
result.data[11] = data[11] * rhs.data[11];
return result;
}
/// Scalar multiply operator (3-by-4)
CUTLASS_HOST_DEVICE
Matrix multiply(Element const &s) const {
Matrix result;
result.data[0] = data[0] * s;
result.data[1] = data[1] * s;
result.data[2] = data[2] * s;
result.data[3] = data[3] * s;
result.data[4] = data[4] * s;
result.data[5] = data[5] * s;
result.data[6] = data[6] * s;
result.data[7] = data[7] * s;
result.data[8] = data[8] * s;
result.data[9] = data[9] * s;
result.data[10] = data[10] * s;
result.data[11] = data[11] * s;
return result;
}
/// Scalar multiply operator (3-by-4)
CUTLASS_HOST_DEVICE
Matrix operator *(Element const &s) const {
return multiply(s);
}
/// Scalar multiply operator (3-by-4)
CUTLASS_HOST_DEVICE
Matrix & operator *=(Element const &s) {
data[0] *= s;
data[1] *= s;
data[2] *= s;
data[3] *= s;
data[4] *= s;
data[5] *= s;
data[6] *= s;
data[7] *= s;
data[8] *= s;
data[9] *= s;
data[10] *= s;
data[11] *= s;
return *this;
}
/// Elementwise divide operator (3-by-4)
CUTLASS_HOST_DEVICE
Matrix divide(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] / rhs.data[0];
result.data[1] = data[1] / rhs.data[1];
result.data[2] = data[2] / rhs.data[2];
result.data[3] = data[3] / rhs.data[3];
result.data[4] = data[4] / rhs.data[4];
result.data[5] = data[5] / rhs.data[5];
result.data[6] = data[6] / rhs.data[6];
result.data[7] = data[7] / rhs.data[7];
result.data[8] = data[8] / rhs.data[8];
result.data[9] = data[9] / rhs.data[9];
result.data[10] = data[10] / rhs.data[10];
result.data[11] = data[11] / rhs.data[11];
return result;
}
/// Scalar divide operator (3-by-4)
CUTLASS_HOST_DEVICE
Matrix divide(Element const &s) const {
Matrix result;
result.data[0] = data[0] / s;
result.data[1] = data[1] / s;
result.data[2] = data[2] / s;
result.data[3] = data[3] / s;
result.data[4] = data[4] / s;
result.data[5] = data[5] / s;
result.data[6] = data[6] / s;
result.data[7] = data[7] / s;
result.data[8] = data[8] / s;
result.data[9] = data[9] / s;
result.data[10] = data[10] / s;
result.data[11] = data[11] / s;
return result;
}
/// Scalar divide operator (3-by-4)
CUTLASS_HOST_DEVICE
Matrix operator /(Element const &s) const {
return divide(s);
}
/// Scalar divide operator (3-by-4)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Element const &s) {
data[0] /= s;
data[1] /= s;
data[2] /= s;
data[3] /= s;
data[4] /= s;
data[5] /= s;
data[6] /= s;
data[7] /= s;
data[8] /= s;
data[9] /= s;
data[10] /= s;
data[11] /= s;
return *this;
}
/// Elementwise divide operator (3-by-4)
CUTLASS_HOST_DEVICE
Matrix operator /(Matrix const &rhs) const {
return divide(rhs);
}
/// Elementwise divide operator (3-by-4)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Matrix const &rhs) {
data[0] /= rhs.data[0];
data[1] /= rhs.data[1];
data[2] /= rhs.data[2];
data[3] /= rhs.data[3];
data[4] /= rhs.data[4];
data[5] /= rhs.data[5];
data[6] /= rhs.data[6];
data[7] /= rhs.data[7];
data[8] /= rhs.data[8];
data[9] /= rhs.data[9];
data[10] /= rhs.data[10];
data[11] /= rhs.data[11];
return *this;
}
/// Negates each element of the matrix
CUTLASS_HOST_DEVICE
Matrix operator-() const {
Matrix m;
m.data[0] = -m.data[0];
m.data[1] = -m.data[1];
m.data[2] = -m.data[2];
m.data[3] = -m.data[3];
m.data[4] = -m.data[4];
m.data[5] = -m.data[5];
m.data[6] = -m.data[6];
m.data[7] = -m.data[7];
m.data[8] = -m.data[8];
m.data[9] = -m.data[9];
m.data[10] = -m.data[10];
m.data[11] = -m.data[11];
return m;
}
/// Matrix product of size 3-by-1-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> product(
Matrix<Element, 4, 1> const &rhs,
Matrix<Element, 3, 1> accum = Matrix<Element, 3, 1>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[4] * rhs.data[0];
accum.data[2] += data[8] * rhs.data[0];
// k=1
accum.data[0] += data[1] * rhs.data[1];
accum.data[1] += data[5] * rhs.data[1];
accum.data[2] += data[9] * rhs.data[1];
// k=2
accum.data[0] += data[2] * rhs.data[2];
accum.data[1] += data[6] * rhs.data[2];
accum.data[2] += data[10] * rhs.data[2];
// k=3
accum.data[0] += data[3] * rhs.data[3];
accum.data[1] += data[7] * rhs.data[3];
accum.data[2] += data[11] * rhs.data[3];
return accum;
}
/// Matrix product of size 3-by-1-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> operator*(Matrix<Element, 4, 1> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 3-by-2-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 2> product(
Matrix<Element, 4, 2> const &rhs,
Matrix<Element, 3, 2> accum = Matrix<Element, 3, 2>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[4] * rhs.data[0];
accum.data[3] += data[4] * rhs.data[1];
accum.data[4] += data[8] * rhs.data[0];
accum.data[5] += data[8] * rhs.data[1];
// k=1
accum.data[0] += data[1] * rhs.data[2];
accum.data[1] += data[1] * rhs.data[3];
accum.data[2] += data[5] * rhs.data[2];
accum.data[3] += data[5] * rhs.data[3];
accum.data[4] += data[9] * rhs.data[2];
accum.data[5] += data[9] * rhs.data[3];
// k=2
accum.data[0] += data[2] * rhs.data[4];
accum.data[1] += data[2] * rhs.data[5];
accum.data[2] += data[6] * rhs.data[4];
accum.data[3] += data[6] * rhs.data[5];
accum.data[4] += data[10] * rhs.data[4];
accum.data[5] += data[10] * rhs.data[5];
// k=3
accum.data[0] += data[3] * rhs.data[6];
accum.data[1] += data[3] * rhs.data[7];
accum.data[2] += data[7] * rhs.data[6];
accum.data[3] += data[7] * rhs.data[7];
accum.data[4] += data[11] * rhs.data[6];
accum.data[5] += data[11] * rhs.data[7];
return accum;
}
/// Matrix product of size 3-by-2-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 2> operator*(Matrix<Element, 4, 2> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 3-by-3-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 3> product(
Matrix<Element, 4, 3> const &rhs,
Matrix<Element, 3, 3> accum = Matrix<Element, 3, 3>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[4] * rhs.data[0];
accum.data[4] += data[4] * rhs.data[1];
accum.data[5] += data[4] * rhs.data[2];
accum.data[6] += data[8] * rhs.data[0];
accum.data[7] += data[8] * rhs.data[1];
accum.data[8] += data[8] * rhs.data[2];
// k=1
accum.data[0] += data[1] * rhs.data[3];
accum.data[1] += data[1] * rhs.data[4];
accum.data[2] += data[1] * rhs.data[5];
accum.data[3] += data[5] * rhs.data[3];
accum.data[4] += data[5] * rhs.data[4];
accum.data[5] += data[5] * rhs.data[5];
accum.data[6] += data[9] * rhs.data[3];
accum.data[7] += data[9] * rhs.data[4];
accum.data[8] += data[9] * rhs.data[5];
// k=2
accum.data[0] += data[2] * rhs.data[6];
accum.data[1] += data[2] * rhs.data[7];
accum.data[2] += data[2] * rhs.data[8];
accum.data[3] += data[6] * rhs.data[6];
accum.data[4] += data[6] * rhs.data[7];
accum.data[5] += data[6] * rhs.data[8];
accum.data[6] += data[10] * rhs.data[6];
accum.data[7] += data[10] * rhs.data[7];
accum.data[8] += data[10] * rhs.data[8];
// k=3
accum.data[0] += data[3] * rhs.data[9];
accum.data[1] += data[3] * rhs.data[10];
accum.data[2] += data[3] * rhs.data[11];
accum.data[3] += data[7] * rhs.data[9];
accum.data[4] += data[7] * rhs.data[10];
accum.data[5] += data[7] * rhs.data[11];
accum.data[6] += data[11] * rhs.data[9];
accum.data[7] += data[11] * rhs.data[10];
accum.data[8] += data[11] * rhs.data[11];
return accum;
}
/// Matrix product of size 3-by-3-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 3> operator*(Matrix<Element, 4, 3> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 3-by-4-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 4> product(
Matrix<Element, 4, 4> const &rhs,
Matrix<Element, 3, 4> accum = Matrix<Element, 3, 4>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[0] * rhs.data[3];
accum.data[4] += data[4] * rhs.data[0];
accum.data[5] += data[4] * rhs.data[1];
accum.data[6] += data[4] * rhs.data[2];
accum.data[7] += data[4] * rhs.data[3];
accum.data[8] += data[8] * rhs.data[0];
accum.data[9] += data[8] * rhs.data[1];
accum.data[10] += data[8] * rhs.data[2];
accum.data[11] += data[8] * rhs.data[3];
// k=1
accum.data[0] += data[1] * rhs.data[4];
accum.data[1] += data[1] * rhs.data[5];
accum.data[2] += data[1] * rhs.data[6];
accum.data[3] += data[1] * rhs.data[7];
accum.data[4] += data[5] * rhs.data[4];
accum.data[5] += data[5] * rhs.data[5];
accum.data[6] += data[5] * rhs.data[6];
accum.data[7] += data[5] * rhs.data[7];
accum.data[8] += data[9] * rhs.data[4];
accum.data[9] += data[9] * rhs.data[5];
accum.data[10] += data[9] * rhs.data[6];
accum.data[11] += data[9] * rhs.data[7];
// k=2
accum.data[0] += data[2] * rhs.data[8];
accum.data[1] += data[2] * rhs.data[9];
accum.data[2] += data[2] * rhs.data[10];
accum.data[3] += data[2] * rhs.data[11];
accum.data[4] += data[6] * rhs.data[8];
accum.data[5] += data[6] * rhs.data[9];
accum.data[6] += data[6] * rhs.data[10];
accum.data[7] += data[6] * rhs.data[11];
accum.data[8] += data[10] * rhs.data[8];
accum.data[9] += data[10] * rhs.data[9];
accum.data[10] += data[10] * rhs.data[10];
accum.data[11] += data[10] * rhs.data[11];
// k=3
accum.data[0] += data[3] * rhs.data[12];
accum.data[1] += data[3] * rhs.data[13];
accum.data[2] += data[3] * rhs.data[14];
accum.data[3] += data[3] * rhs.data[15];
accum.data[4] += data[7] * rhs.data[12];
accum.data[5] += data[7] * rhs.data[13];
accum.data[6] += data[7] * rhs.data[14];
accum.data[7] += data[7] * rhs.data[15];
accum.data[8] += data[11] * rhs.data[12];
accum.data[9] += data[11] * rhs.data[13];
accum.data[10] += data[11] * rhs.data[14];
accum.data[11] += data[11] * rhs.data[15];
return accum;
}
/// Matrix product of size 3-by-4-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 4> operator*(Matrix<Element, 4, 4> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 3-by-4-by-4
CUTLASS_HOST_DEVICE
Matrix & operator*=(Matrix<Element, 4, 4> const &rhs) {
*this = product(rhs);
return *this;
}
/// Returns the sum of elements
CUTLASS_HOST_DEVICE
Element sum(Element accum = Element()) const {
accum += data[0];
accum += data[1];
accum += data[2];
accum += data[3];
accum += data[4];
accum += data[5];
accum += data[6];
accum += data[7];
accum += data[8];
accum += data[9];
accum += data[10];
accum += data[11];
return accum;
}
/// Returns the sum of squared elements
CUTLASS_HOST_DEVICE
Element norm(Element accum = Element()) const {
accum += data[0] * data[0];
accum += data[1] * data[1];
accum += data[2] * data[2];
accum += data[3] * data[3];
accum += data[4] * data[4];
accum += data[5] * data[5];
accum += data[6] * data[6];
accum += data[7] * data[7];
accum += data[8] * data[8];
accum += data[9] * data[9];
accum += data[10] * data[10];
accum += data[11] * data[11];
return accum;
}
/// Returns square root of the norm
CUTLASS_HOST_DEVICE
Element magnitude() const {
return fast_sqrt(norm());
}
/// Returns the sum of diagonal elements
CUTLASS_HOST_DEVICE
Element trace(Element accum = Element()) const {
accum += data[0];
accum += data[5];
accum += data[10];
return accum;
}
};
/// Template alias for 3-by-4 matrix
template <typename Element>
using Matrix3x4 = Matrix<Element, 3, 4>;
/// Free funciton to infer element type from template arguments
template <typename Element>
CUTLASS_HOST_DEVICE Matrix3x4<Element> make_Matrix3x4(
Element _0_0, Element _0_1, Element _0_2, Element _0_3,
Element _1_0, Element _1_1, Element _1_2, Element _1_3,
Element _2_0, Element _2_1, Element _2_2, Element _2_3
) {
return Matrix3x4<Element>(
_0_0, _0_1, _0_2, _0_3,
_1_0, _1_1, _1_2, _1_3,
_2_0, _2_1, _2_2, _2_3
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// 4-by-1 matrix template class definition
template <typename Element_>
struct Matrix<Element_, 4, 1> {
//
// Type definitions
//
/// Element data type
using Element = Element_;
/// Number of rows in matrix
static int const kRows = 4;
/// Number of columns in matrix
static int const kColumns = 1;
/// Layout of matrix in underlying array
using Layout = layout::RowMajor;
/// Number of elements in matrix
static int const kCount = 4;
//
// Data members
//
/// Elements of the matrix in row-major layout
Array<Element, kCount> data;
//
// Methods
//
/// Constructs a zero matrix
CUTLASS_HOST_DEVICE
Matrix() {
data.clear();
}
/// Copy constructor for a 4-by-1 matrix
CUTLASS_HOST_DEVICE
Matrix(Matrix const &rhs) {
data = rhs.data;
}
/// Constucts a 4-by-1 matrix from scalar elements
CUTLASS_HOST_DEVICE
Matrix(
Element _0_0,
Element _1_0,
Element _2_0,
Element _3_0
) {
data[0] = _0_0;
data[1] = _1_0;
data[2] = _2_0;
data[3] = _3_0;
}
/// Constructs a matrix from a uniform element
CUTLASS_HOST_DEVICE
static Matrix uniform(Element s) {
Matrix m;
m.data[0] = s;
m.data[1] = s;
m.data[2] = s;
m.data[3] = s;
return m;
}
/// Constructs a matrix from a uniform element 1
CUTLASS_HOST_DEVICE
static Matrix ones() {
return uniform(Element(1));
}
/// Constructs a matrix from a uniform element 0
CUTLASS_HOST_DEVICE
static Matrix zero() {
return Matrix();
}
/// Returns a transposed matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 4> transpose() const {
Matrix<Element, 1, 4> mt;
mt.data[0] = data[0];
mt.data[1] = data[1];
mt.data[2] = data[2];
mt.data[3] = data[3];
return mt;
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(int i, int j) const {
return data[i * 4 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(int i, int j) {
return data[i * 4 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element &at(int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element at(int offset) const {
return data[offset];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element operator[](Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & operator[](Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element & operator[](int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element operator[](int offset) const {
return data[offset];
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> slice_2x1(int i = 0, int j = 0) const {
Matrix<Element, 2, 1> m;
m.data[0] = data[i * 1 + j + 0];
m.data[1] = data[i * 1 + j + 1];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x1(Matrix<Element, 2, 1> const &m, int i = 0, int j = 0) {
data[i * 1 + j + 0] = m.data[0];
data[i * 1 + j + 1] = m.data[1];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> slice_3x1(int i = 0, int j = 0) const {
Matrix<Element, 3, 1> m;
m.data[0] = data[i * 1 + j + 0];
m.data[1] = data[i * 1 + j + 1];
m.data[2] = data[i * 1 + j + 2];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_3x1(Matrix<Element, 3, 1> const &m, int i = 0, int j = 0) {
data[i * 1 + j + 0] = m.data[0];
data[i * 1 + j + 1] = m.data[1];
data[i * 1 + j + 2] = m.data[2];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 1> slice_4x1(int i = 0, int j = 0) const {
Matrix<Element, 4, 1> m;
m.data[0] = data[i * 1 + j + 0];
m.data[1] = data[i * 1 + j + 1];
m.data[2] = data[i * 1 + j + 2];
m.data[3] = data[i * 1 + j + 3];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_4x1(Matrix<Element, 4, 1> const &m, int i = 0, int j = 0) {
data[i * 1 + j + 0] = m.data[0];
data[i * 1 + j + 1] = m.data[1];
data[i * 1 + j + 2] = m.data[2];
data[i * 1 + j + 3] = m.data[3];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 1> column(int j) const {
return slice_4x1(0, j);
}
CUTLASS_HOST_DEVICE
Matrix &set_column(Matrix<Element, 4, 1> const &v, int j =0) {
return set_slice_4x1(v, 0, j);
}
/// Concatenates this matrix with a a 4-by-1 matrix to form a 4-by-2 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 2> hcat(Matrix<Element, 4, 1> const & rhs) const {
return Matrix<Element, 4, 2>::hcat(*this, rhs);
}
/// Concatenates this matrix with a a 4-by-2 matrix to form a 4-by-3 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 3> hcat(Matrix<Element, 4, 2> const & rhs) const {
return Matrix<Element, 4, 3>::hcat(*this, rhs);
}
/// Concatenates this matrix with a a 4-by-3 matrix to form a 4-by-4 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 4> hcat(Matrix<Element, 4, 3> const & rhs) const {
return Matrix<Element, 4, 4>::hcat(*this, rhs);
}
/// Forms a 4-by-1 matrix by vertically concatenating an Element with a 3-by-1 matrix
CUTLASS_HOST_DEVICE
static Matrix vcat(Element upper, Matrix<Element, 3, 1> const & lower) {
return Matrix(
upper
, lower.at(0, 0)
, lower.at(1, 0)
, lower.at(2, 0));
}
/// Forms a 4-by-1 matrix by vertically concatenating a 2-by-1 matrix with a 2-by-1 matrix
CUTLASS_HOST_DEVICE
static Matrix vcat(Matrix<Element, 2, 1> const & upper, Matrix<Element, 2, 1> const & lower) {
return Matrix(
upper.at(0, 0)
, upper.at(1, 0)
, lower.at(0, 0)
, lower.at(1, 0));
}
/// Forms a 4-by-1 matrix by vertically concatenating a 3-by-1 matrix with an Element
CUTLASS_HOST_DEVICE
static Matrix vcat(Matrix<Element, 3, 1> const & upper, Element lower) {
return Matrix(
upper.at(0, 0)
, upper.at(1, 0)
, upper.at(2, 0)
, lower);
}
/// Elementwise add operator (4-by-1)
CUTLASS_HOST_DEVICE
Matrix add(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] + rhs.data[0];
result.data[1] = data[1] + rhs.data[1];
result.data[2] = data[2] + rhs.data[2];
result.data[3] = data[3] + rhs.data[3];
return result;
}
/// Elementwise add operator (4-by-1)
CUTLASS_HOST_DEVICE
Matrix operator +(Matrix const &rhs) const {
return add(rhs);
}
/// Elementwise add operator (4-by-1)
CUTLASS_HOST_DEVICE
Matrix & operator +=(Matrix const &rhs) {
data[0] += rhs.data[0];
data[1] += rhs.data[1];
data[2] += rhs.data[2];
data[3] += rhs.data[3];
return *this;
}
/// Elementwise subtract operator (4-by-1)
CUTLASS_HOST_DEVICE
Matrix subtract(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] - rhs.data[0];
result.data[1] = data[1] - rhs.data[1];
result.data[2] = data[2] - rhs.data[2];
result.data[3] = data[3] - rhs.data[3];
return result;
}
/// Elementwise subtract operator (4-by-1)
CUTLASS_HOST_DEVICE
Matrix operator -(Matrix const &rhs) const {
return subtract(rhs);
}
/// Elementwise subtract operator (4-by-1)
CUTLASS_HOST_DEVICE
Matrix & operator -=(Matrix const &rhs) {
data[0] -= rhs.data[0];
data[1] -= rhs.data[1];
data[2] -= rhs.data[2];
data[3] -= rhs.data[3];
return *this;
}
/// Elementwise multiply operator (4-by-1)
CUTLASS_HOST_DEVICE
Matrix multiply(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] * rhs.data[0];
result.data[1] = data[1] * rhs.data[1];
result.data[2] = data[2] * rhs.data[2];
result.data[3] = data[3] * rhs.data[3];
return result;
}
/// Scalar multiply operator (4-by-1)
CUTLASS_HOST_DEVICE
Matrix multiply(Element const &s) const {
Matrix result;
result.data[0] = data[0] * s;
result.data[1] = data[1] * s;
result.data[2] = data[2] * s;
result.data[3] = data[3] * s;
return result;
}
/// Scalar multiply operator (4-by-1)
CUTLASS_HOST_DEVICE
Matrix operator *(Element const &s) const {
return multiply(s);
}
/// Scalar multiply operator (4-by-1)
CUTLASS_HOST_DEVICE
Matrix & operator *=(Element const &s) {
data[0] *= s;
data[1] *= s;
data[2] *= s;
data[3] *= s;
return *this;
}
/// Elementwise divide operator (4-by-1)
CUTLASS_HOST_DEVICE
Matrix divide(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] / rhs.data[0];
result.data[1] = data[1] / rhs.data[1];
result.data[2] = data[2] / rhs.data[2];
result.data[3] = data[3] / rhs.data[3];
return result;
}
/// Scalar divide operator (4-by-1)
CUTLASS_HOST_DEVICE
Matrix divide(Element const &s) const {
Matrix result;
result.data[0] = data[0] / s;
result.data[1] = data[1] / s;
result.data[2] = data[2] / s;
result.data[3] = data[3] / s;
return result;
}
/// Scalar divide operator (4-by-1)
CUTLASS_HOST_DEVICE
Matrix operator /(Element const &s) const {
return divide(s);
}
/// Scalar divide operator (4-by-1)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Element const &s) {
data[0] /= s;
data[1] /= s;
data[2] /= s;
data[3] /= s;
return *this;
}
/// Elementwise divide operator (4-by-1)
CUTLASS_HOST_DEVICE
Matrix operator /(Matrix const &rhs) const {
return divide(rhs);
}
/// Elementwise divide operator (4-by-1)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Matrix const &rhs) {
data[0] /= rhs.data[0];
data[1] /= rhs.data[1];
data[2] /= rhs.data[2];
data[3] /= rhs.data[3];
return *this;
}
/// Negates each element of the matrix
CUTLASS_HOST_DEVICE
Matrix operator-() const {
Matrix m;
m.data[0] = -m.data[0];
m.data[1] = -m.data[1];
m.data[2] = -m.data[2];
m.data[3] = -m.data[3];
return m;
}
/// Matrix product of size 4-by-1-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 1> product(
Matrix<Element, 1, 1> const &rhs,
Matrix<Element, 4, 1> accum = Matrix<Element, 4, 1>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[1] * rhs.data[0];
accum.data[2] += data[2] * rhs.data[0];
accum.data[3] += data[3] * rhs.data[0];
return accum;
}
/// Matrix product of size 4-by-1-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 1> operator*(Matrix<Element, 1, 1> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 4-by-1-by-1
CUTLASS_HOST_DEVICE
Matrix & operator*=(Matrix<Element, 1, 1> const &rhs) {
*this = product(rhs);
return *this;
}
/// Matrix product of size 4-by-2-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 2> product(
Matrix<Element, 1, 2> const &rhs,
Matrix<Element, 4, 2> accum = Matrix<Element, 4, 2>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[1] * rhs.data[0];
accum.data[3] += data[1] * rhs.data[1];
accum.data[4] += data[2] * rhs.data[0];
accum.data[5] += data[2] * rhs.data[1];
accum.data[6] += data[3] * rhs.data[0];
accum.data[7] += data[3] * rhs.data[1];
return accum;
}
/// Matrix product of size 4-by-2-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 2> operator*(Matrix<Element, 1, 2> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 4-by-3-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 3> product(
Matrix<Element, 1, 3> const &rhs,
Matrix<Element, 4, 3> accum = Matrix<Element, 4, 3>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[1] * rhs.data[0];
accum.data[4] += data[1] * rhs.data[1];
accum.data[5] += data[1] * rhs.data[2];
accum.data[6] += data[2] * rhs.data[0];
accum.data[7] += data[2] * rhs.data[1];
accum.data[8] += data[2] * rhs.data[2];
accum.data[9] += data[3] * rhs.data[0];
accum.data[10] += data[3] * rhs.data[1];
accum.data[11] += data[3] * rhs.data[2];
return accum;
}
/// Matrix product of size 4-by-3-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 3> operator*(Matrix<Element, 1, 3> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 4-by-4-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 4> product(
Matrix<Element, 1, 4> const &rhs,
Matrix<Element, 4, 4> accum = Matrix<Element, 4, 4>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[0] * rhs.data[3];
accum.data[4] += data[1] * rhs.data[0];
accum.data[5] += data[1] * rhs.data[1];
accum.data[6] += data[1] * rhs.data[2];
accum.data[7] += data[1] * rhs.data[3];
accum.data[8] += data[2] * rhs.data[0];
accum.data[9] += data[2] * rhs.data[1];
accum.data[10] += data[2] * rhs.data[2];
accum.data[11] += data[2] * rhs.data[3];
accum.data[12] += data[3] * rhs.data[0];
accum.data[13] += data[3] * rhs.data[1];
accum.data[14] += data[3] * rhs.data[2];
accum.data[15] += data[3] * rhs.data[3];
return accum;
}
/// Matrix product of size 4-by-4-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 4> operator*(Matrix<Element, 1, 4> const &rhs) const {
return product(rhs);
}
/// Dot product of vectors with extent 4
CUTLASS_HOST_DEVICE
Element dot(Matrix<Element, 4, 1> const &rhs, Element accum = Element()) const {
accum += data[0] * rhs.data[0];
accum += data[1] * rhs.data[1];
accum += data[2] * rhs.data[2];
accum += data[3] * rhs.data[3];
return accum;
}
/// Dot product of vectors with extent 4
CUTLASS_HOST_DEVICE
Element dot(Matrix<Element, 1, 4> const &rhs, Element accum = Element()) const {
accum += data[0] * rhs.data[0];
accum += data[1] * rhs.data[1];
accum += data[2] * rhs.data[2];
accum += data[3] * rhs.data[3];
return accum;
}
/// Returns the sum of elements
CUTLASS_HOST_DEVICE
Element sum(Element accum = Element()) const {
accum += data[0];
accum += data[1];
accum += data[2];
accum += data[3];
return accum;
}
/// Returns the sum of squared elements
CUTLASS_HOST_DEVICE
Element norm(Element accum = Element()) const {
accum += data[0] * data[0];
accum += data[1] * data[1];
accum += data[2] * data[2];
accum += data[3] * data[3];
return accum;
}
/// Returns square root of the norm
CUTLASS_HOST_DEVICE
Element magnitude() const {
return fast_sqrt(norm());
}
/// Returns the sum of diagonal elements
CUTLASS_HOST_DEVICE
Element trace(Element accum = Element()) const {
accum += data[0];
return accum;
}
};
/// Template alias for 4-by-1 matrix
template <typename Element>
using Matrix4x1 = Matrix<Element, 4, 1>;
/// Free funciton to infer element type from template arguments
template <typename Element>
CUTLASS_HOST_DEVICE Matrix4x1<Element> make_Matrix4x1(
Element _0_0,
Element _1_0,
Element _2_0,
Element _3_0
) {
return Matrix4x1<Element>(
_0_0,
_1_0,
_2_0,
_3_0
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// 4-by-2 matrix template class definition
template <typename Element_>
struct Matrix<Element_, 4, 2> {
//
// Type definitions
//
/// Element data type
using Element = Element_;
/// Number of rows in matrix
static int const kRows = 4;
/// Number of columns in matrix
static int const kColumns = 2;
/// Layout of matrix in underlying array
using Layout = layout::RowMajor;
/// Number of elements in matrix
static int const kCount = 8;
//
// Data members
//
/// Elements of the matrix in row-major layout
Array<Element, kCount> data;
//
// Methods
//
/// Constructs a zero matrix
CUTLASS_HOST_DEVICE
Matrix() {
data.clear();
}
/// Copy constructor for a 4-by-2 matrix
CUTLASS_HOST_DEVICE
Matrix(Matrix const &rhs) {
data = rhs.data;
}
/// Constucts a 4-by-2 matrix from scalar elements
CUTLASS_HOST_DEVICE
Matrix(
Element _0_0, Element _0_1,
Element _1_0, Element _1_1,
Element _2_0, Element _2_1,
Element _3_0, Element _3_1
) {
data[0] = _0_0; data[1] = _0_1;
data[2] = _1_0; data[3] = _1_1;
data[4] = _2_0; data[5] = _2_1;
data[6] = _3_0; data[7] = _3_1;
}
/// Constucts a 4-by-2 matrix from row vectors
CUTLASS_HOST_DEVICE
Matrix(
Matrix<Element, 1, 2> const &row_0,
Matrix<Element, 1, 2> const &row_1,
Matrix<Element, 1, 2> const &row_2,
Matrix<Element, 1, 2> const &row_3
) {
data[0] = row_0.data[0];
data[1] = row_0.data[1];
data[2] = row_1.data[0];
data[3] = row_1.data[1];
data[4] = row_2.data[0];
data[5] = row_2.data[1];
data[6] = row_3.data[0];
data[7] = row_3.data[1];
}
/// Static method to construct a 4-by-2 matrix from column vectors
CUTLASS_HOST_DEVICE
static Matrix from_columns(
Matrix<Element, 2, 1> const &column_0,
Matrix<Element, 2, 1> const &column_1
) {
Matrix result;
result.data[0] = column_0.data[0];
result.data[1] = column_1.data[0];
result.data[2] = column_0.data[1];
result.data[3] = column_1.data[1];
result.data[4] = column_0.data[2];
result.data[5] = column_1.data[2];
result.data[6] = column_0.data[3];
result.data[7] = column_1.data[3];
return result;
}
/// Constructs a matrix from a uniform element
CUTLASS_HOST_DEVICE
static Matrix uniform(Element s) {
Matrix m;
m.data[0] = s;
m.data[1] = s;
m.data[2] = s;
m.data[3] = s;
m.data[4] = s;
m.data[5] = s;
m.data[6] = s;
m.data[7] = s;
return m;
}
/// Constructs a matrix from a uniform element 1
CUTLASS_HOST_DEVICE
static Matrix ones() {
return uniform(Element(1));
}
/// Constructs a matrix from a uniform element 0
CUTLASS_HOST_DEVICE
static Matrix zero() {
return Matrix();
}
/// Constructs a matrix from elements along its diagonal
CUTLASS_HOST_DEVICE
static Matrix from_diagonal(Matrix<Element, 2, 1> const &diag) {
Matrix m;
m.data[0] = diag.data[0];
m.data[5] = diag.data[1];
m.data[10] = diag.data[2];
m.data[15] = diag.data[3];
return m;
}
/// Constructs a matrix from elements along its diagonal
CUTLASS_HOST_DEVICE
static Matrix from_diagonal(Matrix<Element, 1, 2> const &diag) {
Matrix m;
m.data[0] = diag.data[0];
m.data[5] = diag.data[1];
m.data[10] = diag.data[2];
m.data[15] = diag.data[3];
return m;
}
/// Gets an array of diagonal elements
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> diagonal() const {
Matrix<Element, 2, 1> diag;
diag.data[0] = data[0];
diag.data[1] = data[5];
diag.data[2] = data[10];
diag.data[3] = data[15];
return diag;
}
/// Returns a transposed matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 4> transpose() const {
Matrix<Element, 2, 4> mt;
mt.data[0] = data[0];
mt.data[4] = data[1];
mt.data[1] = data[2];
mt.data[5] = data[3];
mt.data[2] = data[4];
mt.data[6] = data[5];
mt.data[3] = data[6];
mt.data[7] = data[7];
return mt;
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(int i, int j) const {
return data[i * 4 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(int i, int j) {
return data[i * 4 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element &at(int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element at(int offset) const {
return data[offset];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element operator[](Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & operator[](Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element & operator[](int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element operator[](int offset) const {
return data[offset];
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 2> slice_1x2(int i = 0, int j = 0) const {
Matrix<Element, 1, 2> m;
m.data[0] = data[i * 2 + j + 0];
m.data[1] = data[i * 2 + j + 1];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x2(Matrix<Element, 1, 2> const &m, int i = 0, int j = 0) {
data[i * 2 + j + 0] = m.data[0];
data[i * 2 + j + 1] = m.data[1];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 2> row(int i) const {
return slice_1x2(i, 0);
}
CUTLASS_HOST_DEVICE
Matrix &set_row(Matrix<Element, 1, 2> const &v, int i = 0) {
return set_slice_1x2(v, i, 0);
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> slice_2x1(int i = 0, int j = 0) const {
Matrix<Element, 2, 1> m;
m.data[0] = data[i * 2 + j + 0];
m.data[1] = data[i * 2 + j + 2];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x1(Matrix<Element, 2, 1> const &m, int i = 0, int j = 0) {
data[i * 2 + j + 0] = m.data[0];
data[i * 2 + j + 2] = m.data[1];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 2> slice_2x2(int i = 0, int j = 0) const {
Matrix<Element, 2, 2> m;
m.data[0] = data[i * 2 + j + 0];
m.data[1] = data[i * 2 + j + 1];
m.data[2] = data[i * 2 + j + 2];
m.data[3] = data[i * 2 + j + 3];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x2(Matrix<Element, 2, 2> const &m, int i = 0, int j = 0) {
data[i * 2 + j + 0] = m.data[0];
data[i * 2 + j + 1] = m.data[1];
data[i * 2 + j + 2] = m.data[2];
data[i * 2 + j + 3] = m.data[3];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> slice_3x1(int i = 0, int j = 0) const {
Matrix<Element, 3, 1> m;
m.data[0] = data[i * 2 + j + 0];
m.data[1] = data[i * 2 + j + 2];
m.data[2] = data[i * 2 + j + 4];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_3x1(Matrix<Element, 3, 1> const &m, int i = 0, int j = 0) {
data[i * 2 + j + 0] = m.data[0];
data[i * 2 + j + 2] = m.data[1];
data[i * 2 + j + 4] = m.data[2];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 2> slice_3x2(int i = 0, int j = 0) const {
Matrix<Element, 3, 2> m;
m.data[0] = data[i * 2 + j + 0];
m.data[1] = data[i * 2 + j + 1];
m.data[2] = data[i * 2 + j + 2];
m.data[3] = data[i * 2 + j + 3];
m.data[4] = data[i * 2 + j + 4];
m.data[5] = data[i * 2 + j + 5];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_3x2(Matrix<Element, 3, 2> const &m, int i = 0, int j = 0) {
data[i * 2 + j + 0] = m.data[0];
data[i * 2 + j + 1] = m.data[1];
data[i * 2 + j + 2] = m.data[2];
data[i * 2 + j + 3] = m.data[3];
data[i * 2 + j + 4] = m.data[4];
data[i * 2 + j + 5] = m.data[5];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 1> slice_4x1(int i = 0, int j = 0) const {
Matrix<Element, 4, 1> m;
m.data[0] = data[i * 2 + j + 0];
m.data[1] = data[i * 2 + j + 2];
m.data[2] = data[i * 2 + j + 4];
m.data[3] = data[i * 2 + j + 6];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_4x1(Matrix<Element, 4, 1> const &m, int i = 0, int j = 0) {
data[i * 2 + j + 0] = m.data[0];
data[i * 2 + j + 2] = m.data[1];
data[i * 2 + j + 4] = m.data[2];
data[i * 2 + j + 6] = m.data[3];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 1> column(int j) const {
return slice_4x1(0, j);
}
CUTLASS_HOST_DEVICE
Matrix &set_column(Matrix<Element, 4, 1> const &v, int j =0) {
return set_slice_4x1(v, 0, j);
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 2> slice_4x2(int i = 0, int j = 0) const {
Matrix<Element, 4, 2> m;
m.data[0] = data[i * 2 + j + 0];
m.data[1] = data[i * 2 + j + 1];
m.data[2] = data[i * 2 + j + 2];
m.data[3] = data[i * 2 + j + 3];
m.data[4] = data[i * 2 + j + 4];
m.data[5] = data[i * 2 + j + 5];
m.data[6] = data[i * 2 + j + 6];
m.data[7] = data[i * 2 + j + 7];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_4x2(Matrix<Element, 4, 2> const &m, int i = 0, int j = 0) {
data[i * 2 + j + 0] = m.data[0];
data[i * 2 + j + 1] = m.data[1];
data[i * 2 + j + 2] = m.data[2];
data[i * 2 + j + 3] = m.data[3];
data[i * 2 + j + 4] = m.data[4];
data[i * 2 + j + 5] = m.data[5];
data[i * 2 + j + 6] = m.data[6];
data[i * 2 + j + 7] = m.data[7];
return *this;
}
/// Forms a 4-by-2 matrix by horizontally concatenating a 4-by-1 matrix with a 4-by-1 matrix
CUTLASS_HOST_DEVICE
static Matrix hcat(Matrix<Element, 4, 1> const & lhs, Matrix<Element, 4, 1> const & rhs) {
return Matrix(
lhs.at(0, 0), rhs.at(0, 0)
, lhs.at(1, 0), rhs.at(1, 0)
, lhs.at(2, 0), rhs.at(2, 0)
, lhs.at(3, 0), rhs.at(3, 0));
}
/// Concatenates this matrix with a a 4-by-1 matrix to form a 4-by-3 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 3> hcat(Matrix<Element, 4, 1> const & rhs) const {
return Matrix<Element, 4, 3>::hcat(*this, rhs);
}
/// Concatenates this matrix with a a 4-by-2 matrix to form a 4-by-4 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 4> hcat(Matrix<Element, 4, 2> const & rhs) const {
return Matrix<Element, 4, 4>::hcat(*this, rhs);
}
/// Forms a 4-by-2 matrix by vertically concatenating a 1-by-2 matrix with a 3-by-2 matrix
CUTLASS_HOST_DEVICE
static Matrix vcat(Matrix<Element, 1, 2> const & upper, Matrix<Element, 3, 2> const & lower) {
return Matrix(
upper.at(0, 0), upper.at(0, 1)
, lower.at(0, 0), lower.at(0, 1)
, lower.at(1, 0), lower.at(1, 1)
, lower.at(2, 0), lower.at(2, 1));
}
/// Forms a 4-by-2 matrix by vertically concatenating a 2-by-2 matrix with a 2-by-2 matrix
CUTLASS_HOST_DEVICE
static Matrix vcat(Matrix<Element, 2, 2> const & upper, Matrix<Element, 2, 2> const & lower) {
return Matrix(
upper.at(0, 0), upper.at(0, 1)
, upper.at(1, 0), upper.at(1, 1)
, lower.at(0, 0), lower.at(0, 1)
, lower.at(1, 0), lower.at(1, 1));
}
/// Forms a 4-by-2 matrix by vertically concatenating a 3-by-2 matrix with a 1-by-2 matrix
CUTLASS_HOST_DEVICE
static Matrix vcat(Matrix<Element, 3, 2> const & upper, Matrix<Element, 1, 2> const & lower) {
return Matrix(
upper.at(0, 0), upper.at(0, 1)
, upper.at(1, 0), upper.at(1, 1)
, upper.at(2, 0), upper.at(2, 1)
, lower.at(0, 0), lower.at(0, 1));
}
/// Forms a 4-by-2 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Element A, Element B,
Matrix<Element, 3, 1> const & C, Matrix<Element, 3, 1> const & D) {
return Matrix(
A, B
, C.at(0, 0), D.at(0, 0)
, C.at(1, 0), D.at(1, 0)
, C.at(2, 0), D.at(2, 0)
);
}
/// Forms a 4-by-2 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 2, 1> const & A, Matrix<Element, 2, 1> const & B,
Matrix<Element, 2, 1> const & C, Matrix<Element, 2, 1> const & D) {
return Matrix(
A.at(0, 0), B.at(0, 0)
, A.at(1, 0), B.at(1, 0)
, C.at(0, 0), D.at(0, 0)
, C.at(1, 0), D.at(1, 0)
);
}
/// Forms a 4-by-2 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 3, 1> const & A, Matrix<Element, 3, 1> const & B,
Element C, Element D) {
return Matrix(
A.at(0, 0), B.at(0, 0)
, A.at(1, 0), B.at(1, 0)
, A.at(2, 0), B.at(2, 0)
, C, D
);
}
/// Elementwise add operator (4-by-2)
CUTLASS_HOST_DEVICE
Matrix add(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] + rhs.data[0];
result.data[1] = data[1] + rhs.data[1];
result.data[2] = data[2] + rhs.data[2];
result.data[3] = data[3] + rhs.data[3];
result.data[4] = data[4] + rhs.data[4];
result.data[5] = data[5] + rhs.data[5];
result.data[6] = data[6] + rhs.data[6];
result.data[7] = data[7] + rhs.data[7];
return result;
}
/// Elementwise add operator (4-by-2)
CUTLASS_HOST_DEVICE
Matrix operator +(Matrix const &rhs) const {
return add(rhs);
}
/// Elementwise add operator (4-by-2)
CUTLASS_HOST_DEVICE
Matrix & operator +=(Matrix const &rhs) {
data[0] += rhs.data[0];
data[1] += rhs.data[1];
data[2] += rhs.data[2];
data[3] += rhs.data[3];
data[4] += rhs.data[4];
data[5] += rhs.data[5];
data[6] += rhs.data[6];
data[7] += rhs.data[7];
return *this;
}
/// Elementwise subtract operator (4-by-2)
CUTLASS_HOST_DEVICE
Matrix subtract(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] - rhs.data[0];
result.data[1] = data[1] - rhs.data[1];
result.data[2] = data[2] - rhs.data[2];
result.data[3] = data[3] - rhs.data[3];
result.data[4] = data[4] - rhs.data[4];
result.data[5] = data[5] - rhs.data[5];
result.data[6] = data[6] - rhs.data[6];
result.data[7] = data[7] - rhs.data[7];
return result;
}
/// Elementwise subtract operator (4-by-2)
CUTLASS_HOST_DEVICE
Matrix operator -(Matrix const &rhs) const {
return subtract(rhs);
}
/// Elementwise subtract operator (4-by-2)
CUTLASS_HOST_DEVICE
Matrix & operator -=(Matrix const &rhs) {
data[0] -= rhs.data[0];
data[1] -= rhs.data[1];
data[2] -= rhs.data[2];
data[3] -= rhs.data[3];
data[4] -= rhs.data[4];
data[5] -= rhs.data[5];
data[6] -= rhs.data[6];
data[7] -= rhs.data[7];
return *this;
}
/// Elementwise multiply operator (4-by-2)
CUTLASS_HOST_DEVICE
Matrix multiply(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] * rhs.data[0];
result.data[1] = data[1] * rhs.data[1];
result.data[2] = data[2] * rhs.data[2];
result.data[3] = data[3] * rhs.data[3];
result.data[4] = data[4] * rhs.data[4];
result.data[5] = data[5] * rhs.data[5];
result.data[6] = data[6] * rhs.data[6];
result.data[7] = data[7] * rhs.data[7];
return result;
}
/// Scalar multiply operator (4-by-2)
CUTLASS_HOST_DEVICE
Matrix multiply(Element const &s) const {
Matrix result;
result.data[0] = data[0] * s;
result.data[1] = data[1] * s;
result.data[2] = data[2] * s;
result.data[3] = data[3] * s;
result.data[4] = data[4] * s;
result.data[5] = data[5] * s;
result.data[6] = data[6] * s;
result.data[7] = data[7] * s;
return result;
}
/// Scalar multiply operator (4-by-2)
CUTLASS_HOST_DEVICE
Matrix operator *(Element const &s) const {
return multiply(s);
}
/// Scalar multiply operator (4-by-2)
CUTLASS_HOST_DEVICE
Matrix & operator *=(Element const &s) {
data[0] *= s;
data[1] *= s;
data[2] *= s;
data[3] *= s;
data[4] *= s;
data[5] *= s;
data[6] *= s;
data[7] *= s;
return *this;
}
/// Elementwise divide operator (4-by-2)
CUTLASS_HOST_DEVICE
Matrix divide(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] / rhs.data[0];
result.data[1] = data[1] / rhs.data[1];
result.data[2] = data[2] / rhs.data[2];
result.data[3] = data[3] / rhs.data[3];
result.data[4] = data[4] / rhs.data[4];
result.data[5] = data[5] / rhs.data[5];
result.data[6] = data[6] / rhs.data[6];
result.data[7] = data[7] / rhs.data[7];
return result;
}
/// Scalar divide operator (4-by-2)
CUTLASS_HOST_DEVICE
Matrix divide(Element const &s) const {
Matrix result;
result.data[0] = data[0] / s;
result.data[1] = data[1] / s;
result.data[2] = data[2] / s;
result.data[3] = data[3] / s;
result.data[4] = data[4] / s;
result.data[5] = data[5] / s;
result.data[6] = data[6] / s;
result.data[7] = data[7] / s;
return result;
}
/// Scalar divide operator (4-by-2)
CUTLASS_HOST_DEVICE
Matrix operator /(Element const &s) const {
return divide(s);
}
/// Scalar divide operator (4-by-2)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Element const &s) {
data[0] /= s;
data[1] /= s;
data[2] /= s;
data[3] /= s;
data[4] /= s;
data[5] /= s;
data[6] /= s;
data[7] /= s;
return *this;
}
/// Elementwise divide operator (4-by-2)
CUTLASS_HOST_DEVICE
Matrix operator /(Matrix const &rhs) const {
return divide(rhs);
}
/// Elementwise divide operator (4-by-2)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Matrix const &rhs) {
data[0] /= rhs.data[0];
data[1] /= rhs.data[1];
data[2] /= rhs.data[2];
data[3] /= rhs.data[3];
data[4] /= rhs.data[4];
data[5] /= rhs.data[5];
data[6] /= rhs.data[6];
data[7] /= rhs.data[7];
return *this;
}
/// Negates each element of the matrix
CUTLASS_HOST_DEVICE
Matrix operator-() const {
Matrix m;
m.data[0] = -m.data[0];
m.data[1] = -m.data[1];
m.data[2] = -m.data[2];
m.data[3] = -m.data[3];
m.data[4] = -m.data[4];
m.data[5] = -m.data[5];
m.data[6] = -m.data[6];
m.data[7] = -m.data[7];
return m;
}
/// Matrix product of size 4-by-1-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 1> product(
Matrix<Element, 2, 1> const &rhs,
Matrix<Element, 4, 1> accum = Matrix<Element, 4, 1>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[2] * rhs.data[0];
accum.data[2] += data[4] * rhs.data[0];
accum.data[3] += data[6] * rhs.data[0];
// k=1
accum.data[0] += data[1] * rhs.data[1];
accum.data[1] += data[3] * rhs.data[1];
accum.data[2] += data[5] * rhs.data[1];
accum.data[3] += data[7] * rhs.data[1];
return accum;
}
/// Matrix product of size 4-by-1-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 1> operator*(Matrix<Element, 2, 1> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 4-by-2-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 2> product(
Matrix<Element, 2, 2> const &rhs,
Matrix<Element, 4, 2> accum = Matrix<Element, 4, 2>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[2] * rhs.data[0];
accum.data[3] += data[2] * rhs.data[1];
accum.data[4] += data[4] * rhs.data[0];
accum.data[5] += data[4] * rhs.data[1];
accum.data[6] += data[6] * rhs.data[0];
accum.data[7] += data[6] * rhs.data[1];
// k=1
accum.data[0] += data[1] * rhs.data[2];
accum.data[1] += data[1] * rhs.data[3];
accum.data[2] += data[3] * rhs.data[2];
accum.data[3] += data[3] * rhs.data[3];
accum.data[4] += data[5] * rhs.data[2];
accum.data[5] += data[5] * rhs.data[3];
accum.data[6] += data[7] * rhs.data[2];
accum.data[7] += data[7] * rhs.data[3];
return accum;
}
/// Matrix product of size 4-by-2-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 2> operator*(Matrix<Element, 2, 2> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 4-by-2-by-2
CUTLASS_HOST_DEVICE
Matrix & operator*=(Matrix<Element, 2, 2> const &rhs) {
*this = product(rhs);
return *this;
}
/// Matrix product of size 4-by-3-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 3> product(
Matrix<Element, 2, 3> const &rhs,
Matrix<Element, 4, 3> accum = Matrix<Element, 4, 3>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[2] * rhs.data[0];
accum.data[4] += data[2] * rhs.data[1];
accum.data[5] += data[2] * rhs.data[2];
accum.data[6] += data[4] * rhs.data[0];
accum.data[7] += data[4] * rhs.data[1];
accum.data[8] += data[4] * rhs.data[2];
accum.data[9] += data[6] * rhs.data[0];
accum.data[10] += data[6] * rhs.data[1];
accum.data[11] += data[6] * rhs.data[2];
// k=1
accum.data[0] += data[1] * rhs.data[3];
accum.data[1] += data[1] * rhs.data[4];
accum.data[2] += data[1] * rhs.data[5];
accum.data[3] += data[3] * rhs.data[3];
accum.data[4] += data[3] * rhs.data[4];
accum.data[5] += data[3] * rhs.data[5];
accum.data[6] += data[5] * rhs.data[3];
accum.data[7] += data[5] * rhs.data[4];
accum.data[8] += data[5] * rhs.data[5];
accum.data[9] += data[7] * rhs.data[3];
accum.data[10] += data[7] * rhs.data[4];
accum.data[11] += data[7] * rhs.data[5];
return accum;
}
/// Matrix product of size 4-by-3-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 3> operator*(Matrix<Element, 2, 3> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 4-by-4-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 4> product(
Matrix<Element, 2, 4> const &rhs,
Matrix<Element, 4, 4> accum = Matrix<Element, 4, 4>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[0] * rhs.data[3];
accum.data[4] += data[2] * rhs.data[0];
accum.data[5] += data[2] * rhs.data[1];
accum.data[6] += data[2] * rhs.data[2];
accum.data[7] += data[2] * rhs.data[3];
accum.data[8] += data[4] * rhs.data[0];
accum.data[9] += data[4] * rhs.data[1];
accum.data[10] += data[4] * rhs.data[2];
accum.data[11] += data[4] * rhs.data[3];
accum.data[12] += data[6] * rhs.data[0];
accum.data[13] += data[6] * rhs.data[1];
accum.data[14] += data[6] * rhs.data[2];
accum.data[15] += data[6] * rhs.data[3];
// k=1
accum.data[0] += data[1] * rhs.data[4];
accum.data[1] += data[1] * rhs.data[5];
accum.data[2] += data[1] * rhs.data[6];
accum.data[3] += data[1] * rhs.data[7];
accum.data[4] += data[3] * rhs.data[4];
accum.data[5] += data[3] * rhs.data[5];
accum.data[6] += data[3] * rhs.data[6];
accum.data[7] += data[3] * rhs.data[7];
accum.data[8] += data[5] * rhs.data[4];
accum.data[9] += data[5] * rhs.data[5];
accum.data[10] += data[5] * rhs.data[6];
accum.data[11] += data[5] * rhs.data[7];
accum.data[12] += data[7] * rhs.data[4];
accum.data[13] += data[7] * rhs.data[5];
accum.data[14] += data[7] * rhs.data[6];
accum.data[15] += data[7] * rhs.data[7];
return accum;
}
/// Matrix product of size 4-by-4-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 4> operator*(Matrix<Element, 2, 4> const &rhs) const {
return product(rhs);
}
/// Returns the sum of elements
CUTLASS_HOST_DEVICE
Element sum(Element accum = Element()) const {
accum += data[0];
accum += data[1];
accum += data[2];
accum += data[3];
accum += data[4];
accum += data[5];
accum += data[6];
accum += data[7];
return accum;
}
/// Returns the sum of squared elements
CUTLASS_HOST_DEVICE
Element norm(Element accum = Element()) const {
accum += data[0] * data[0];
accum += data[1] * data[1];
accum += data[2] * data[2];
accum += data[3] * data[3];
accum += data[4] * data[4];
accum += data[5] * data[5];
accum += data[6] * data[6];
accum += data[7] * data[7];
return accum;
}
/// Returns square root of the norm
CUTLASS_HOST_DEVICE
Element magnitude() const {
return fast_sqrt(norm());
}
/// Returns the sum of diagonal elements
CUTLASS_HOST_DEVICE
Element trace(Element accum = Element()) const {
accum += data[0];
accum += data[3];
return accum;
}
};
/// Template alias for 4-by-2 matrix
template <typename Element>
using Matrix4x2 = Matrix<Element, 4, 2>;
/// Free funciton to infer element type from template arguments
template <typename Element>
CUTLASS_HOST_DEVICE Matrix4x2<Element> make_Matrix4x2(
Element _0_0, Element _0_1,
Element _1_0, Element _1_1,
Element _2_0, Element _2_1,
Element _3_0, Element _3_1
) {
return Matrix4x2<Element>(
_0_0, _0_1,
_1_0, _1_1,
_2_0, _2_1,
_3_0, _3_1
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// 4-by-3 matrix template class definition
template <typename Element_>
struct Matrix<Element_, 4, 3> {
//
// Type definitions
//
/// Element data type
using Element = Element_;
/// Number of rows in matrix
static int const kRows = 4;
/// Number of columns in matrix
static int const kColumns = 3;
/// Layout of matrix in underlying array
using Layout = layout::RowMajor;
/// Number of elements in matrix
static int const kCount = 12;
//
// Data members
//
/// Elements of the matrix in row-major layout
Array<Element, kCount> data;
//
// Methods
//
/// Constructs a zero matrix
CUTLASS_HOST_DEVICE
Matrix() {
data.clear();
}
/// Copy constructor for a 4-by-3 matrix
CUTLASS_HOST_DEVICE
Matrix(Matrix const &rhs) {
data = rhs.data;
}
/// Constucts a 4-by-3 matrix from scalar elements
CUTLASS_HOST_DEVICE
Matrix(
Element _0_0, Element _0_1, Element _0_2,
Element _1_0, Element _1_1, Element _1_2,
Element _2_0, Element _2_1, Element _2_2,
Element _3_0, Element _3_1, Element _3_2
) {
data[0] = _0_0; data[1] = _0_1; data[2] = _0_2;
data[3] = _1_0; data[4] = _1_1; data[5] = _1_2;
data[6] = _2_0; data[7] = _2_1; data[8] = _2_2;
data[9] = _3_0; data[10] = _3_1; data[11] = _3_2;
}
/// Constucts a 4-by-3 matrix from row vectors
CUTLASS_HOST_DEVICE
Matrix(
Matrix<Element, 1, 3> const &row_0,
Matrix<Element, 1, 3> const &row_1,
Matrix<Element, 1, 3> const &row_2,
Matrix<Element, 1, 3> const &row_3
) {
data[0] = row_0.data[0];
data[1] = row_0.data[1];
data[2] = row_0.data[2];
data[3] = row_1.data[0];
data[4] = row_1.data[1];
data[5] = row_1.data[2];
data[6] = row_2.data[0];
data[7] = row_2.data[1];
data[8] = row_2.data[2];
data[9] = row_3.data[0];
data[10] = row_3.data[1];
data[11] = row_3.data[2];
}
/// Static method to construct a 4-by-3 matrix from column vectors
CUTLASS_HOST_DEVICE
static Matrix from_columns(
Matrix<Element, 3, 1> const &column_0,
Matrix<Element, 3, 1> const &column_1,
Matrix<Element, 3, 1> const &column_2
) {
Matrix result;
result.data[0] = column_0.data[0];
result.data[1] = column_1.data[0];
result.data[2] = column_2.data[0];
result.data[3] = column_0.data[1];
result.data[4] = column_1.data[1];
result.data[5] = column_2.data[1];
result.data[6] = column_0.data[2];
result.data[7] = column_1.data[2];
result.data[8] = column_2.data[2];
result.data[9] = column_0.data[3];
result.data[10] = column_1.data[3];
result.data[11] = column_2.data[3];
return result;
}
/// Constructs a matrix from a uniform element
CUTLASS_HOST_DEVICE
static Matrix uniform(Element s) {
Matrix m;
m.data[0] = s;
m.data[1] = s;
m.data[2] = s;
m.data[3] = s;
m.data[4] = s;
m.data[5] = s;
m.data[6] = s;
m.data[7] = s;
m.data[8] = s;
m.data[9] = s;
m.data[10] = s;
m.data[11] = s;
return m;
}
/// Constructs a matrix from a uniform element 1
CUTLASS_HOST_DEVICE
static Matrix ones() {
return uniform(Element(1));
}
/// Constructs a matrix from a uniform element 0
CUTLASS_HOST_DEVICE
static Matrix zero() {
return Matrix();
}
/// Constructs a matrix from elements along its diagonal
CUTLASS_HOST_DEVICE
static Matrix from_diagonal(Matrix<Element, 3, 1> const &diag) {
Matrix m;
m.data[0] = diag.data[0];
m.data[5] = diag.data[1];
m.data[10] = diag.data[2];
m.data[15] = diag.data[3];
return m;
}
/// Constructs a matrix from elements along its diagonal
CUTLASS_HOST_DEVICE
static Matrix from_diagonal(Matrix<Element, 1, 3> const &diag) {
Matrix m;
m.data[0] = diag.data[0];
m.data[5] = diag.data[1];
m.data[10] = diag.data[2];
m.data[15] = diag.data[3];
return m;
}
/// Gets an array of diagonal elements
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> diagonal() const {
Matrix<Element, 3, 1> diag;
diag.data[0] = data[0];
diag.data[1] = data[5];
diag.data[2] = data[10];
diag.data[3] = data[15];
return diag;
}
/// Returns a transposed matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 4> transpose() const {
Matrix<Element, 3, 4> mt;
mt.data[0] = data[0];
mt.data[4] = data[1];
mt.data[8] = data[2];
mt.data[1] = data[3];
mt.data[5] = data[4];
mt.data[9] = data[5];
mt.data[2] = data[6];
mt.data[6] = data[7];
mt.data[10] = data[8];
mt.data[3] = data[9];
mt.data[7] = data[10];
mt.data[11] = data[11];
return mt;
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(int i, int j) const {
return data[i * 4 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(int i, int j) {
return data[i * 4 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element &at(int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element at(int offset) const {
return data[offset];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element operator[](Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & operator[](Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element & operator[](int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element operator[](int offset) const {
return data[offset];
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 2> slice_1x2(int i = 0, int j = 0) const {
Matrix<Element, 1, 2> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 1];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x2(Matrix<Element, 1, 2> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 1] = m.data[1];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 3> slice_1x3(int i = 0, int j = 0) const {
Matrix<Element, 1, 3> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 1];
m.data[2] = data[i * 3 + j + 2];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x3(Matrix<Element, 1, 3> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 1] = m.data[1];
data[i * 3 + j + 2] = m.data[2];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 3> row(int i) const {
return slice_1x3(i, 0);
}
CUTLASS_HOST_DEVICE
Matrix &set_row(Matrix<Element, 1, 3> const &v, int i = 0) {
return set_slice_1x3(v, i, 0);
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> slice_2x1(int i = 0, int j = 0) const {
Matrix<Element, 2, 1> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 3];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x1(Matrix<Element, 2, 1> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 3] = m.data[1];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 2> slice_2x2(int i = 0, int j = 0) const {
Matrix<Element, 2, 2> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 1];
m.data[2] = data[i * 3 + j + 3];
m.data[3] = data[i * 3 + j + 4];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x2(Matrix<Element, 2, 2> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 1] = m.data[1];
data[i * 3 + j + 3] = m.data[2];
data[i * 3 + j + 4] = m.data[3];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 3> slice_2x3(int i = 0, int j = 0) const {
Matrix<Element, 2, 3> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 1];
m.data[2] = data[i * 3 + j + 2];
m.data[3] = data[i * 3 + j + 3];
m.data[4] = data[i * 3 + j + 4];
m.data[5] = data[i * 3 + j + 5];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x3(Matrix<Element, 2, 3> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 1] = m.data[1];
data[i * 3 + j + 2] = m.data[2];
data[i * 3 + j + 3] = m.data[3];
data[i * 3 + j + 4] = m.data[4];
data[i * 3 + j + 5] = m.data[5];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> slice_3x1(int i = 0, int j = 0) const {
Matrix<Element, 3, 1> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 3];
m.data[2] = data[i * 3 + j + 6];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_3x1(Matrix<Element, 3, 1> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 3] = m.data[1];
data[i * 3 + j + 6] = m.data[2];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 2> slice_3x2(int i = 0, int j = 0) const {
Matrix<Element, 3, 2> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 1];
m.data[2] = data[i * 3 + j + 3];
m.data[3] = data[i * 3 + j + 4];
m.data[4] = data[i * 3 + j + 6];
m.data[5] = data[i * 3 + j + 7];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_3x2(Matrix<Element, 3, 2> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 1] = m.data[1];
data[i * 3 + j + 3] = m.data[2];
data[i * 3 + j + 4] = m.data[3];
data[i * 3 + j + 6] = m.data[4];
data[i * 3 + j + 7] = m.data[5];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 3> slice_3x3(int i = 0, int j = 0) const {
Matrix<Element, 3, 3> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 1];
m.data[2] = data[i * 3 + j + 2];
m.data[3] = data[i * 3 + j + 3];
m.data[4] = data[i * 3 + j + 4];
m.data[5] = data[i * 3 + j + 5];
m.data[6] = data[i * 3 + j + 6];
m.data[7] = data[i * 3 + j + 7];
m.data[8] = data[i * 3 + j + 8];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_3x3(Matrix<Element, 3, 3> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 1] = m.data[1];
data[i * 3 + j + 2] = m.data[2];
data[i * 3 + j + 3] = m.data[3];
data[i * 3 + j + 4] = m.data[4];
data[i * 3 + j + 5] = m.data[5];
data[i * 3 + j + 6] = m.data[6];
data[i * 3 + j + 7] = m.data[7];
data[i * 3 + j + 8] = m.data[8];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 1> slice_4x1(int i = 0, int j = 0) const {
Matrix<Element, 4, 1> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 3];
m.data[2] = data[i * 3 + j + 6];
m.data[3] = data[i * 3 + j + 9];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_4x1(Matrix<Element, 4, 1> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 3] = m.data[1];
data[i * 3 + j + 6] = m.data[2];
data[i * 3 + j + 9] = m.data[3];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 1> column(int j) const {
return slice_4x1(0, j);
}
CUTLASS_HOST_DEVICE
Matrix &set_column(Matrix<Element, 4, 1> const &v, int j =0) {
return set_slice_4x1(v, 0, j);
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 2> slice_4x2(int i = 0, int j = 0) const {
Matrix<Element, 4, 2> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 1];
m.data[2] = data[i * 3 + j + 3];
m.data[3] = data[i * 3 + j + 4];
m.data[4] = data[i * 3 + j + 6];
m.data[5] = data[i * 3 + j + 7];
m.data[6] = data[i * 3 + j + 9];
m.data[7] = data[i * 3 + j + 10];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_4x2(Matrix<Element, 4, 2> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 1] = m.data[1];
data[i * 3 + j + 3] = m.data[2];
data[i * 3 + j + 4] = m.data[3];
data[i * 3 + j + 6] = m.data[4];
data[i * 3 + j + 7] = m.data[5];
data[i * 3 + j + 9] = m.data[6];
data[i * 3 + j + 10] = m.data[7];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 3> slice_4x3(int i = 0, int j = 0) const {
Matrix<Element, 4, 3> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 1];
m.data[2] = data[i * 3 + j + 2];
m.data[3] = data[i * 3 + j + 3];
m.data[4] = data[i * 3 + j + 4];
m.data[5] = data[i * 3 + j + 5];
m.data[6] = data[i * 3 + j + 6];
m.data[7] = data[i * 3 + j + 7];
m.data[8] = data[i * 3 + j + 8];
m.data[9] = data[i * 3 + j + 9];
m.data[10] = data[i * 3 + j + 10];
m.data[11] = data[i * 3 + j + 11];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_4x3(Matrix<Element, 4, 3> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 1] = m.data[1];
data[i * 3 + j + 2] = m.data[2];
data[i * 3 + j + 3] = m.data[3];
data[i * 3 + j + 4] = m.data[4];
data[i * 3 + j + 5] = m.data[5];
data[i * 3 + j + 6] = m.data[6];
data[i * 3 + j + 7] = m.data[7];
data[i * 3 + j + 8] = m.data[8];
data[i * 3 + j + 9] = m.data[9];
data[i * 3 + j + 10] = m.data[10];
data[i * 3 + j + 11] = m.data[11];
return *this;
}
/// Forms a 4-by-3 matrix by horizontally concatenating a 4-by-1 matrix with a 4-by-2 matrix
CUTLASS_HOST_DEVICE
static Matrix hcat(Matrix<Element, 4, 1> const & lhs, Matrix<Element, 4, 2> const & rhs) {
return Matrix(
lhs.at(0, 0), rhs.at(0, 0), rhs.at(0, 1)
, lhs.at(1, 0), rhs.at(1, 0), rhs.at(1, 1)
, lhs.at(2, 0), rhs.at(2, 0), rhs.at(2, 1)
, lhs.at(3, 0), rhs.at(3, 0), rhs.at(3, 1));
}
/// Forms a 4-by-3 matrix by horizontally concatenating a 4-by-2 matrix with a 4-by-1 matrix
CUTLASS_HOST_DEVICE
static Matrix hcat(Matrix<Element, 4, 2> const & lhs, Matrix<Element, 4, 1> const & rhs) {
return Matrix(
lhs.at(0, 0), lhs.at(0, 1), rhs.at(0, 0)
, lhs.at(1, 0), lhs.at(1, 1), rhs.at(1, 0)
, lhs.at(2, 0), lhs.at(2, 1), rhs.at(2, 0)
, lhs.at(3, 0), lhs.at(3, 1), rhs.at(3, 0));
}
/// Concatenates this matrix with a a 4-by-1 matrix to form a 4-by-4 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 4> hcat(Matrix<Element, 4, 1> const & rhs) const {
return Matrix<Element, 4, 4>::hcat(*this, rhs);
}
/// Forms a 4-by-3 matrix by vertically concatenating a 1-by-3 matrix with a 3-by-3 matrix
CUTLASS_HOST_DEVICE
static Matrix vcat(Matrix<Element, 1, 3> const & upper, Matrix<Element, 3, 3> const & lower) {
return Matrix(
upper.at(0, 0), upper.at(0, 1), upper.at(0, 2)
, lower.at(0, 0), lower.at(0, 1), lower.at(0, 2)
, lower.at(1, 0), lower.at(1, 1), lower.at(1, 2)
, lower.at(2, 0), lower.at(2, 1), lower.at(2, 2));
}
/// Forms a 4-by-3 matrix by vertically concatenating a 2-by-3 matrix with a 2-by-3 matrix
CUTLASS_HOST_DEVICE
static Matrix vcat(Matrix<Element, 2, 3> const & upper, Matrix<Element, 2, 3> const & lower) {
return Matrix(
upper.at(0, 0), upper.at(0, 1), upper.at(0, 2)
, upper.at(1, 0), upper.at(1, 1), upper.at(1, 2)
, lower.at(0, 0), lower.at(0, 1), lower.at(0, 2)
, lower.at(1, 0), lower.at(1, 1), lower.at(1, 2));
}
/// Forms a 4-by-3 matrix by vertically concatenating a 3-by-3 matrix with a 1-by-3 matrix
CUTLASS_HOST_DEVICE
static Matrix vcat(Matrix<Element, 3, 3> const & upper, Matrix<Element, 1, 3> const & lower) {
return Matrix(
upper.at(0, 0), upper.at(0, 1), upper.at(0, 2)
, upper.at(1, 0), upper.at(1, 1), upper.at(1, 2)
, upper.at(2, 0), upper.at(2, 1), upper.at(2, 2)
, lower.at(0, 0), lower.at(0, 1), lower.at(0, 2));
}
/// Forms a 4-by-3 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Element A, Matrix<Element, 1, 2> const & B,
Matrix<Element, 3, 1> const & C, Matrix<Element, 3, 2> const & D) {
return Matrix(
A, B.at(0, 0), B.at(0, 1)
, C.at(0, 0), D.at(0, 0), D.at(0, 1)
, C.at(1, 0), D.at(1, 0), D.at(1, 1)
, C.at(2, 0), D.at(2, 0), D.at(2, 1)
);
}
/// Forms a 4-by-3 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 1, 2> const & A, Element B,
Matrix<Element, 3, 2> const & C, Matrix<Element, 3, 1> const & D) {
return Matrix(
A.at(0, 0), A.at(0, 1), B
, C.at(0, 0), C.at(0, 1), D.at(0, 0)
, C.at(1, 0), C.at(1, 1), D.at(1, 0)
, C.at(2, 0), C.at(2, 1), D.at(2, 0)
);
}
/// Forms a 4-by-3 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 2, 1> const & A, Matrix<Element, 2, 2> const & B,
Matrix<Element, 2, 1> const & C, Matrix<Element, 2, 2> const & D) {
return Matrix(
A.at(0, 0), B.at(0, 0), B.at(0, 1)
, A.at(1, 0), B.at(1, 0), B.at(1, 1)
, C.at(0, 0), D.at(0, 0), D.at(0, 1)
, C.at(1, 0), D.at(1, 0), D.at(1, 1)
);
}
/// Forms a 4-by-3 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 2, 2> const & A, Matrix<Element, 2, 1> const & B,
Matrix<Element, 2, 2> const & C, Matrix<Element, 2, 1> const & D) {
return Matrix(
A.at(0, 0), A.at(0, 1), B.at(0, 0)
, A.at(1, 0), A.at(1, 1), B.at(1, 0)
, C.at(0, 0), C.at(0, 1), D.at(0, 0)
, C.at(1, 0), C.at(1, 1), D.at(1, 0)
);
}
/// Forms a 4-by-3 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 3, 1> const & A, Matrix<Element, 3, 2> const & B,
Element C, Matrix<Element, 1, 2> const & D) {
return Matrix(
A.at(0, 0), B.at(0, 0), B.at(0, 1)
, A.at(1, 0), B.at(1, 0), B.at(1, 1)
, A.at(2, 0), B.at(2, 0), B.at(2, 1)
, C, D.at(0, 0), D.at(0, 1)
);
}
/// Forms a 4-by-3 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 3, 2> const & A, Matrix<Element, 3, 1> const & B,
Matrix<Element, 1, 2> const & C, Element D) {
return Matrix(
A.at(0, 0), A.at(0, 1), B.at(0, 0)
, A.at(1, 0), A.at(1, 1), B.at(1, 0)
, A.at(2, 0), A.at(2, 1), B.at(2, 0)
, C.at(0, 0), C.at(0, 1), D
);
}
/// Elementwise add operator (4-by-3)
CUTLASS_HOST_DEVICE
Matrix add(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] + rhs.data[0];
result.data[1] = data[1] + rhs.data[1];
result.data[2] = data[2] + rhs.data[2];
result.data[3] = data[3] + rhs.data[3];
result.data[4] = data[4] + rhs.data[4];
result.data[5] = data[5] + rhs.data[5];
result.data[6] = data[6] + rhs.data[6];
result.data[7] = data[7] + rhs.data[7];
result.data[8] = data[8] + rhs.data[8];
result.data[9] = data[9] + rhs.data[9];
result.data[10] = data[10] + rhs.data[10];
result.data[11] = data[11] + rhs.data[11];
return result;
}
/// Elementwise add operator (4-by-3)
CUTLASS_HOST_DEVICE
Matrix operator +(Matrix const &rhs) const {
return add(rhs);
}
/// Elementwise add operator (4-by-3)
CUTLASS_HOST_DEVICE
Matrix & operator +=(Matrix const &rhs) {
data[0] += rhs.data[0];
data[1] += rhs.data[1];
data[2] += rhs.data[2];
data[3] += rhs.data[3];
data[4] += rhs.data[4];
data[5] += rhs.data[5];
data[6] += rhs.data[6];
data[7] += rhs.data[7];
data[8] += rhs.data[8];
data[9] += rhs.data[9];
data[10] += rhs.data[10];
data[11] += rhs.data[11];
return *this;
}
/// Elementwise subtract operator (4-by-3)
CUTLASS_HOST_DEVICE
Matrix subtract(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] - rhs.data[0];
result.data[1] = data[1] - rhs.data[1];
result.data[2] = data[2] - rhs.data[2];
result.data[3] = data[3] - rhs.data[3];
result.data[4] = data[4] - rhs.data[4];
result.data[5] = data[5] - rhs.data[5];
result.data[6] = data[6] - rhs.data[6];
result.data[7] = data[7] - rhs.data[7];
result.data[8] = data[8] - rhs.data[8];
result.data[9] = data[9] - rhs.data[9];
result.data[10] = data[10] - rhs.data[10];
result.data[11] = data[11] - rhs.data[11];
return result;
}
/// Elementwise subtract operator (4-by-3)
CUTLASS_HOST_DEVICE
Matrix operator -(Matrix const &rhs) const {
return subtract(rhs);
}
/// Elementwise subtract operator (4-by-3)
CUTLASS_HOST_DEVICE
Matrix & operator -=(Matrix const &rhs) {
data[0] -= rhs.data[0];
data[1] -= rhs.data[1];
data[2] -= rhs.data[2];
data[3] -= rhs.data[3];
data[4] -= rhs.data[4];
data[5] -= rhs.data[5];
data[6] -= rhs.data[6];
data[7] -= rhs.data[7];
data[8] -= rhs.data[8];
data[9] -= rhs.data[9];
data[10] -= rhs.data[10];
data[11] -= rhs.data[11];
return *this;
}
/// Elementwise multiply operator (4-by-3)
CUTLASS_HOST_DEVICE
Matrix multiply(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] * rhs.data[0];
result.data[1] = data[1] * rhs.data[1];
result.data[2] = data[2] * rhs.data[2];
result.data[3] = data[3] * rhs.data[3];
result.data[4] = data[4] * rhs.data[4];
result.data[5] = data[5] * rhs.data[5];
result.data[6] = data[6] * rhs.data[6];
result.data[7] = data[7] * rhs.data[7];
result.data[8] = data[8] * rhs.data[8];
result.data[9] = data[9] * rhs.data[9];
result.data[10] = data[10] * rhs.data[10];
result.data[11] = data[11] * rhs.data[11];
return result;
}
/// Scalar multiply operator (4-by-3)
CUTLASS_HOST_DEVICE
Matrix multiply(Element const &s) const {
Matrix result;
result.data[0] = data[0] * s;
result.data[1] = data[1] * s;
result.data[2] = data[2] * s;
result.data[3] = data[3] * s;
result.data[4] = data[4] * s;
result.data[5] = data[5] * s;
result.data[6] = data[6] * s;
result.data[7] = data[7] * s;
result.data[8] = data[8] * s;
result.data[9] = data[9] * s;
result.data[10] = data[10] * s;
result.data[11] = data[11] * s;
return result;
}
/// Scalar multiply operator (4-by-3)
CUTLASS_HOST_DEVICE
Matrix operator *(Element const &s) const {
return multiply(s);
}
/// Scalar multiply operator (4-by-3)
CUTLASS_HOST_DEVICE
Matrix & operator *=(Element const &s) {
data[0] *= s;
data[1] *= s;
data[2] *= s;
data[3] *= s;
data[4] *= s;
data[5] *= s;
data[6] *= s;
data[7] *= s;
data[8] *= s;
data[9] *= s;
data[10] *= s;
data[11] *= s;
return *this;
}
/// Elementwise divide operator (4-by-3)
CUTLASS_HOST_DEVICE
Matrix divide(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] / rhs.data[0];
result.data[1] = data[1] / rhs.data[1];
result.data[2] = data[2] / rhs.data[2];
result.data[3] = data[3] / rhs.data[3];
result.data[4] = data[4] / rhs.data[4];
result.data[5] = data[5] / rhs.data[5];
result.data[6] = data[6] / rhs.data[6];
result.data[7] = data[7] / rhs.data[7];
result.data[8] = data[8] / rhs.data[8];
result.data[9] = data[9] / rhs.data[9];
result.data[10] = data[10] / rhs.data[10];
result.data[11] = data[11] / rhs.data[11];
return result;
}
/// Scalar divide operator (4-by-3)
CUTLASS_HOST_DEVICE
Matrix divide(Element const &s) const {
Matrix result;
result.data[0] = data[0] / s;
result.data[1] = data[1] / s;
result.data[2] = data[2] / s;
result.data[3] = data[3] / s;
result.data[4] = data[4] / s;
result.data[5] = data[5] / s;
result.data[6] = data[6] / s;
result.data[7] = data[7] / s;
result.data[8] = data[8] / s;
result.data[9] = data[9] / s;
result.data[10] = data[10] / s;
result.data[11] = data[11] / s;
return result;
}
/// Scalar divide operator (4-by-3)
CUTLASS_HOST_DEVICE
Matrix operator /(Element const &s) const {
return divide(s);
}
/// Scalar divide operator (4-by-3)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Element const &s) {
data[0] /= s;
data[1] /= s;
data[2] /= s;
data[3] /= s;
data[4] /= s;
data[5] /= s;
data[6] /= s;
data[7] /= s;
data[8] /= s;
data[9] /= s;
data[10] /= s;
data[11] /= s;
return *this;
}
/// Elementwise divide operator (4-by-3)
CUTLASS_HOST_DEVICE
Matrix operator /(Matrix const &rhs) const {
return divide(rhs);
}
/// Elementwise divide operator (4-by-3)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Matrix const &rhs) {
data[0] /= rhs.data[0];
data[1] /= rhs.data[1];
data[2] /= rhs.data[2];
data[3] /= rhs.data[3];
data[4] /= rhs.data[4];
data[5] /= rhs.data[5];
data[6] /= rhs.data[6];
data[7] /= rhs.data[7];
data[8] /= rhs.data[8];
data[9] /= rhs.data[9];
data[10] /= rhs.data[10];
data[11] /= rhs.data[11];
return *this;
}
/// Negates each element of the matrix
CUTLASS_HOST_DEVICE
Matrix operator-() const {
Matrix m;
m.data[0] = -m.data[0];
m.data[1] = -m.data[1];
m.data[2] = -m.data[2];
m.data[3] = -m.data[3];
m.data[4] = -m.data[4];
m.data[5] = -m.data[5];
m.data[6] = -m.data[6];
m.data[7] = -m.data[7];
m.data[8] = -m.data[8];
m.data[9] = -m.data[9];
m.data[10] = -m.data[10];
m.data[11] = -m.data[11];
return m;
}
/// Matrix product of size 4-by-1-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 1> product(
Matrix<Element, 3, 1> const &rhs,
Matrix<Element, 4, 1> accum = Matrix<Element, 4, 1>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[3] * rhs.data[0];
accum.data[2] += data[6] * rhs.data[0];
accum.data[3] += data[9] * rhs.data[0];
// k=1
accum.data[0] += data[1] * rhs.data[1];
accum.data[1] += data[4] * rhs.data[1];
accum.data[2] += data[7] * rhs.data[1];
accum.data[3] += data[10] * rhs.data[1];
// k=2
accum.data[0] += data[2] * rhs.data[2];
accum.data[1] += data[5] * rhs.data[2];
accum.data[2] += data[8] * rhs.data[2];
accum.data[3] += data[11] * rhs.data[2];
return accum;
}
/// Matrix product of size 4-by-1-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 1> operator*(Matrix<Element, 3, 1> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 4-by-2-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 2> product(
Matrix<Element, 3, 2> const &rhs,
Matrix<Element, 4, 2> accum = Matrix<Element, 4, 2>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[3] * rhs.data[0];
accum.data[3] += data[3] * rhs.data[1];
accum.data[4] += data[6] * rhs.data[0];
accum.data[5] += data[6] * rhs.data[1];
accum.data[6] += data[9] * rhs.data[0];
accum.data[7] += data[9] * rhs.data[1];
// k=1
accum.data[0] += data[1] * rhs.data[2];
accum.data[1] += data[1] * rhs.data[3];
accum.data[2] += data[4] * rhs.data[2];
accum.data[3] += data[4] * rhs.data[3];
accum.data[4] += data[7] * rhs.data[2];
accum.data[5] += data[7] * rhs.data[3];
accum.data[6] += data[10] * rhs.data[2];
accum.data[7] += data[10] * rhs.data[3];
// k=2
accum.data[0] += data[2] * rhs.data[4];
accum.data[1] += data[2] * rhs.data[5];
accum.data[2] += data[5] * rhs.data[4];
accum.data[3] += data[5] * rhs.data[5];
accum.data[4] += data[8] * rhs.data[4];
accum.data[5] += data[8] * rhs.data[5];
accum.data[6] += data[11] * rhs.data[4];
accum.data[7] += data[11] * rhs.data[5];
return accum;
}
/// Matrix product of size 4-by-2-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 2> operator*(Matrix<Element, 3, 2> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 4-by-3-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 3> product(
Matrix<Element, 3, 3> const &rhs,
Matrix<Element, 4, 3> accum = Matrix<Element, 4, 3>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[3] * rhs.data[0];
accum.data[4] += data[3] * rhs.data[1];
accum.data[5] += data[3] * rhs.data[2];
accum.data[6] += data[6] * rhs.data[0];
accum.data[7] += data[6] * rhs.data[1];
accum.data[8] += data[6] * rhs.data[2];
accum.data[9] += data[9] * rhs.data[0];
accum.data[10] += data[9] * rhs.data[1];
accum.data[11] += data[9] * rhs.data[2];
// k=1
accum.data[0] += data[1] * rhs.data[3];
accum.data[1] += data[1] * rhs.data[4];
accum.data[2] += data[1] * rhs.data[5];
accum.data[3] += data[4] * rhs.data[3];
accum.data[4] += data[4] * rhs.data[4];
accum.data[5] += data[4] * rhs.data[5];
accum.data[6] += data[7] * rhs.data[3];
accum.data[7] += data[7] * rhs.data[4];
accum.data[8] += data[7] * rhs.data[5];
accum.data[9] += data[10] * rhs.data[3];
accum.data[10] += data[10] * rhs.data[4];
accum.data[11] += data[10] * rhs.data[5];
// k=2
accum.data[0] += data[2] * rhs.data[6];
accum.data[1] += data[2] * rhs.data[7];
accum.data[2] += data[2] * rhs.data[8];
accum.data[3] += data[5] * rhs.data[6];
accum.data[4] += data[5] * rhs.data[7];
accum.data[5] += data[5] * rhs.data[8];
accum.data[6] += data[8] * rhs.data[6];
accum.data[7] += data[8] * rhs.data[7];
accum.data[8] += data[8] * rhs.data[8];
accum.data[9] += data[11] * rhs.data[6];
accum.data[10] += data[11] * rhs.data[7];
accum.data[11] += data[11] * rhs.data[8];
return accum;
}
/// Matrix product of size 4-by-3-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 3> operator*(Matrix<Element, 3, 3> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 4-by-3-by-3
CUTLASS_HOST_DEVICE
Matrix & operator*=(Matrix<Element, 3, 3> const &rhs) {
*this = product(rhs);
return *this;
}
/// Matrix product of size 4-by-4-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 4> product(
Matrix<Element, 3, 4> const &rhs,
Matrix<Element, 4, 4> accum = Matrix<Element, 4, 4>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[0] * rhs.data[3];
accum.data[4] += data[3] * rhs.data[0];
accum.data[5] += data[3] * rhs.data[1];
accum.data[6] += data[3] * rhs.data[2];
accum.data[7] += data[3] * rhs.data[3];
accum.data[8] += data[6] * rhs.data[0];
accum.data[9] += data[6] * rhs.data[1];
accum.data[10] += data[6] * rhs.data[2];
accum.data[11] += data[6] * rhs.data[3];
accum.data[12] += data[9] * rhs.data[0];
accum.data[13] += data[9] * rhs.data[1];
accum.data[14] += data[9] * rhs.data[2];
accum.data[15] += data[9] * rhs.data[3];
// k=1
accum.data[0] += data[1] * rhs.data[4];
accum.data[1] += data[1] * rhs.data[5];
accum.data[2] += data[1] * rhs.data[6];
accum.data[3] += data[1] * rhs.data[7];
accum.data[4] += data[4] * rhs.data[4];
accum.data[5] += data[4] * rhs.data[5];
accum.data[6] += data[4] * rhs.data[6];
accum.data[7] += data[4] * rhs.data[7];
accum.data[8] += data[7] * rhs.data[4];
accum.data[9] += data[7] * rhs.data[5];
accum.data[10] += data[7] * rhs.data[6];
accum.data[11] += data[7] * rhs.data[7];
accum.data[12] += data[10] * rhs.data[4];
accum.data[13] += data[10] * rhs.data[5];
accum.data[14] += data[10] * rhs.data[6];
accum.data[15] += data[10] * rhs.data[7];
// k=2
accum.data[0] += data[2] * rhs.data[8];
accum.data[1] += data[2] * rhs.data[9];
accum.data[2] += data[2] * rhs.data[10];
accum.data[3] += data[2] * rhs.data[11];
accum.data[4] += data[5] * rhs.data[8];
accum.data[5] += data[5] * rhs.data[9];
accum.data[6] += data[5] * rhs.data[10];
accum.data[7] += data[5] * rhs.data[11];
accum.data[8] += data[8] * rhs.data[8];
accum.data[9] += data[8] * rhs.data[9];
accum.data[10] += data[8] * rhs.data[10];
accum.data[11] += data[8] * rhs.data[11];
accum.data[12] += data[11] * rhs.data[8];
accum.data[13] += data[11] * rhs.data[9];
accum.data[14] += data[11] * rhs.data[10];
accum.data[15] += data[11] * rhs.data[11];
return accum;
}
/// Matrix product of size 4-by-4-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 4> operator*(Matrix<Element, 3, 4> const &rhs) const {
return product(rhs);
}
/// Returns the sum of elements
CUTLASS_HOST_DEVICE
Element sum(Element accum = Element()) const {
accum += data[0];
accum += data[1];
accum += data[2];
accum += data[3];
accum += data[4];
accum += data[5];
accum += data[6];
accum += data[7];
accum += data[8];
accum += data[9];
accum += data[10];
accum += data[11];
return accum;
}
/// Returns the sum of squared elements
CUTLASS_HOST_DEVICE
Element norm(Element accum = Element()) const {
accum += data[0] * data[0];
accum += data[1] * data[1];
accum += data[2] * data[2];
accum += data[3] * data[3];
accum += data[4] * data[4];
accum += data[5] * data[5];
accum += data[6] * data[6];
accum += data[7] * data[7];
accum += data[8] * data[8];
accum += data[9] * data[9];
accum += data[10] * data[10];
accum += data[11] * data[11];
return accum;
}
/// Returns square root of the norm
CUTLASS_HOST_DEVICE
Element magnitude() const {
return fast_sqrt(norm());
}
/// Returns the sum of diagonal elements
CUTLASS_HOST_DEVICE
Element trace(Element accum = Element()) const {
accum += data[0];
accum += data[4];
accum += data[8];
return accum;
}
};
/// Template alias for 4-by-3 matrix
template <typename Element>
using Matrix4x3 = Matrix<Element, 4, 3>;
/// Free funciton to infer element type from template arguments
template <typename Element>
CUTLASS_HOST_DEVICE Matrix4x3<Element> make_Matrix4x3(
Element _0_0, Element _0_1, Element _0_2,
Element _1_0, Element _1_1, Element _1_2,
Element _2_0, Element _2_1, Element _2_2,
Element _3_0, Element _3_1, Element _3_2
) {
return Matrix4x3<Element>(
_0_0, _0_1, _0_2,
_1_0, _1_1, _1_2,
_2_0, _2_1, _2_2,
_3_0, _3_1, _3_2
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// 4-by-4 matrix template class definition
template <typename Element_>
struct Matrix<Element_, 4, 4> {
//
// Type definitions
//
/// Element data type
using Element = Element_;
/// Number of rows in matrix
static int const kRows = 4;
/// Number of columns in matrix
static int const kColumns = 4;
/// Layout of matrix in underlying array
using Layout = layout::RowMajor;
/// Number of elements in matrix
static int const kCount = 16;
//
// Data members
//
/// Elements of the matrix in row-major layout
Array<Element, kCount> data;
//
// Methods
//
/// Constructs a zero matrix
CUTLASS_HOST_DEVICE
Matrix() {
data.clear();
}
/// Copy constructor for a 4-by-4 matrix
CUTLASS_HOST_DEVICE
Matrix(Matrix const &rhs) {
data = rhs.data;
}
/// Constucts a 4-by-4 matrix from scalar elements
CUTLASS_HOST_DEVICE
Matrix(
Element _0_0, Element _0_1, Element _0_2, Element _0_3,
Element _1_0, Element _1_1, Element _1_2, Element _1_3,
Element _2_0, Element _2_1, Element _2_2, Element _2_3,
Element _3_0, Element _3_1, Element _3_2, Element _3_3
) {
data[0] = _0_0; data[1] = _0_1; data[2] = _0_2; data[3] = _0_3;
data[4] = _1_0; data[5] = _1_1; data[6] = _1_2; data[7] = _1_3;
data[8] = _2_0; data[9] = _2_1; data[10] = _2_2; data[11] = _2_3;
data[12] = _3_0; data[13] = _3_1; data[14] = _3_2; data[15] = _3_3;
}
/// Constucts a 4-by-4 matrix from row vectors
CUTLASS_HOST_DEVICE
Matrix(
Matrix<Element, 1, 4> const &row_0,
Matrix<Element, 1, 4> const &row_1,
Matrix<Element, 1, 4> const &row_2,
Matrix<Element, 1, 4> const &row_3
) {
data[0] = row_0.data[0];
data[1] = row_0.data[1];
data[2] = row_0.data[2];
data[3] = row_0.data[3];
data[4] = row_1.data[0];
data[5] = row_1.data[1];
data[6] = row_1.data[2];
data[7] = row_1.data[3];
data[8] = row_2.data[0];
data[9] = row_2.data[1];
data[10] = row_2.data[2];
data[11] = row_2.data[3];
data[12] = row_3.data[0];
data[13] = row_3.data[1];
data[14] = row_3.data[2];
data[15] = row_3.data[3];
}
/// Static method to construct a 4-by-4 matrix from column vectors
CUTLASS_HOST_DEVICE
static Matrix from_columns(
Matrix<Element, 4, 1> const &column_0,
Matrix<Element, 4, 1> const &column_1,
Matrix<Element, 4, 1> const &column_2,
Matrix<Element, 4, 1> const &column_3
) {
Matrix result;
result.data[0] = column_0.data[0];
result.data[1] = column_1.data[0];
result.data[2] = column_2.data[0];
result.data[3] = column_3.data[0];
result.data[4] = column_0.data[1];
result.data[5] = column_1.data[1];
result.data[6] = column_2.data[1];
result.data[7] = column_3.data[1];
result.data[8] = column_0.data[2];
result.data[9] = column_1.data[2];
result.data[10] = column_2.data[2];
result.data[11] = column_3.data[2];
result.data[12] = column_0.data[3];
result.data[13] = column_1.data[3];
result.data[14] = column_2.data[3];
result.data[15] = column_3.data[3];
return result;
}
/// Constructs an identity matrix
CUTLASS_HOST_DEVICE
static Matrix identity() {
Matrix m;
m.data[0] = Element(1);
m.data[5] = Element(1);
m.data[10] = Element(1);
m.data[15] = Element(1);
return m;
}
/// Constructs a matrix from a uniform element
CUTLASS_HOST_DEVICE
static Matrix uniform(Element s) {
Matrix m;
m.data[0] = s;
m.data[1] = s;
m.data[2] = s;
m.data[3] = s;
m.data[4] = s;
m.data[5] = s;
m.data[6] = s;
m.data[7] = s;
m.data[8] = s;
m.data[9] = s;
m.data[10] = s;
m.data[11] = s;
m.data[12] = s;
m.data[13] = s;
m.data[14] = s;
m.data[15] = s;
return m;
}
/// Constructs a matrix from a uniform element 1
CUTLASS_HOST_DEVICE
static Matrix ones() {
return uniform(Element(1));
}
/// Constructs a matrix from a uniform element 0
CUTLASS_HOST_DEVICE
static Matrix zero() {
return Matrix();
}
/// Constructs a matrix from elements along its diagonal
CUTLASS_HOST_DEVICE
static Matrix from_diagonal(Matrix<Element, 4, 1> const &diag) {
Matrix m;
m.data[0] = diag.data[0];
m.data[5] = diag.data[1];
m.data[10] = diag.data[2];
m.data[15] = diag.data[3];
return m;
}
/// Constructs a matrix from elements along its diagonal
CUTLASS_HOST_DEVICE
static Matrix from_diagonal(Matrix<Element, 1, 4> const &diag) {
Matrix m;
m.data[0] = diag.data[0];
m.data[5] = diag.data[1];
m.data[10] = diag.data[2];
m.data[15] = diag.data[3];
return m;
}
/// Gets an array of diagonal elements
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 1> diagonal() const {
Matrix<Element, 4, 1> diag;
diag.data[0] = data[0];
diag.data[1] = data[5];
diag.data[2] = data[10];
diag.data[3] = data[15];
return diag;
}
/// Returns a transposed matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 4> transpose() const {
Matrix<Element, 4, 4> mt;
mt.data[0] = data[0];
mt.data[4] = data[1];
mt.data[8] = data[2];
mt.data[12] = data[3];
mt.data[1] = data[4];
mt.data[5] = data[5];
mt.data[9] = data[6];
mt.data[13] = data[7];
mt.data[2] = data[8];
mt.data[6] = data[9];
mt.data[10] = data[10];
mt.data[14] = data[11];
mt.data[3] = data[12];
mt.data[7] = data[13];
mt.data[11] = data[14];
mt.data[15] = data[15];
return mt;
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(int i, int j) const {
return data[i * 4 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(int i, int j) {
return data[i * 4 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element &at(int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element at(int offset) const {
return data[offset];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element operator[](Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & operator[](Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element & operator[](int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element operator[](int offset) const {
return data[offset];
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 2> slice_1x2(int i = 0, int j = 0) const {
Matrix<Element, 1, 2> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x2(Matrix<Element, 1, 2> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 3> slice_1x3(int i = 0, int j = 0) const {
Matrix<Element, 1, 3> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 2];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x3(Matrix<Element, 1, 3> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 2] = m.data[2];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 4> slice_1x4(int i = 0, int j = 0) const {
Matrix<Element, 1, 4> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 2];
m.data[3] = data[i * 4 + j + 3];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x4(Matrix<Element, 1, 4> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 2] = m.data[2];
data[i * 4 + j + 3] = m.data[3];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 4> row(int i) const {
return slice_1x4(i, 0);
}
CUTLASS_HOST_DEVICE
Matrix &set_row(Matrix<Element, 1, 4> const &v, int i = 0) {
return set_slice_1x4(v, i, 0);
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> slice_2x1(int i = 0, int j = 0) const {
Matrix<Element, 2, 1> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 4];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x1(Matrix<Element, 2, 1> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 4] = m.data[1];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 2> slice_2x2(int i = 0, int j = 0) const {
Matrix<Element, 2, 2> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 4];
m.data[3] = data[i * 4 + j + 5];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x2(Matrix<Element, 2, 2> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 4] = m.data[2];
data[i * 4 + j + 5] = m.data[3];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 3> slice_2x3(int i = 0, int j = 0) const {
Matrix<Element, 2, 3> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 2];
m.data[3] = data[i * 4 + j + 4];
m.data[4] = data[i * 4 + j + 5];
m.data[5] = data[i * 4 + j + 6];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x3(Matrix<Element, 2, 3> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 2] = m.data[2];
data[i * 4 + j + 4] = m.data[3];
data[i * 4 + j + 5] = m.data[4];
data[i * 4 + j + 6] = m.data[5];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 4> slice_2x4(int i = 0, int j = 0) const {
Matrix<Element, 2, 4> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 2];
m.data[3] = data[i * 4 + j + 3];
m.data[4] = data[i * 4 + j + 4];
m.data[5] = data[i * 4 + j + 5];
m.data[6] = data[i * 4 + j + 6];
m.data[7] = data[i * 4 + j + 7];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x4(Matrix<Element, 2, 4> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 2] = m.data[2];
data[i * 4 + j + 3] = m.data[3];
data[i * 4 + j + 4] = m.data[4];
data[i * 4 + j + 5] = m.data[5];
data[i * 4 + j + 6] = m.data[6];
data[i * 4 + j + 7] = m.data[7];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> slice_3x1(int i = 0, int j = 0) const {
Matrix<Element, 3, 1> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 4];
m.data[2] = data[i * 4 + j + 8];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_3x1(Matrix<Element, 3, 1> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 4] = m.data[1];
data[i * 4 + j + 8] = m.data[2];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 2> slice_3x2(int i = 0, int j = 0) const {
Matrix<Element, 3, 2> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 4];
m.data[3] = data[i * 4 + j + 5];
m.data[4] = data[i * 4 + j + 8];
m.data[5] = data[i * 4 + j + 9];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_3x2(Matrix<Element, 3, 2> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 4] = m.data[2];
data[i * 4 + j + 5] = m.data[3];
data[i * 4 + j + 8] = m.data[4];
data[i * 4 + j + 9] = m.data[5];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 3> slice_3x3(int i = 0, int j = 0) const {
Matrix<Element, 3, 3> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 2];
m.data[3] = data[i * 4 + j + 4];
m.data[4] = data[i * 4 + j + 5];
m.data[5] = data[i * 4 + j + 6];
m.data[6] = data[i * 4 + j + 8];
m.data[7] = data[i * 4 + j + 9];
m.data[8] = data[i * 4 + j + 10];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_3x3(Matrix<Element, 3, 3> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 2] = m.data[2];
data[i * 4 + j + 4] = m.data[3];
data[i * 4 + j + 5] = m.data[4];
data[i * 4 + j + 6] = m.data[5];
data[i * 4 + j + 8] = m.data[6];
data[i * 4 + j + 9] = m.data[7];
data[i * 4 + j + 10] = m.data[8];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 4> slice_3x4(int i = 0, int j = 0) const {
Matrix<Element, 3, 4> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 2];
m.data[3] = data[i * 4 + j + 3];
m.data[4] = data[i * 4 + j + 4];
m.data[5] = data[i * 4 + j + 5];
m.data[6] = data[i * 4 + j + 6];
m.data[7] = data[i * 4 + j + 7];
m.data[8] = data[i * 4 + j + 8];
m.data[9] = data[i * 4 + j + 9];
m.data[10] = data[i * 4 + j + 10];
m.data[11] = data[i * 4 + j + 11];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_3x4(Matrix<Element, 3, 4> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 2] = m.data[2];
data[i * 4 + j + 3] = m.data[3];
data[i * 4 + j + 4] = m.data[4];
data[i * 4 + j + 5] = m.data[5];
data[i * 4 + j + 6] = m.data[6];
data[i * 4 + j + 7] = m.data[7];
data[i * 4 + j + 8] = m.data[8];
data[i * 4 + j + 9] = m.data[9];
data[i * 4 + j + 10] = m.data[10];
data[i * 4 + j + 11] = m.data[11];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 1> slice_4x1(int i = 0, int j = 0) const {
Matrix<Element, 4, 1> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 4];
m.data[2] = data[i * 4 + j + 8];
m.data[3] = data[i * 4 + j + 12];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_4x1(Matrix<Element, 4, 1> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 4] = m.data[1];
data[i * 4 + j + 8] = m.data[2];
data[i * 4 + j + 12] = m.data[3];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 1> column(int j) const {
return slice_4x1(0, j);
}
CUTLASS_HOST_DEVICE
Matrix &set_column(Matrix<Element, 4, 1> const &v, int j =0) {
return set_slice_4x1(v, 0, j);
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 2> slice_4x2(int i = 0, int j = 0) const {
Matrix<Element, 4, 2> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 4];
m.data[3] = data[i * 4 + j + 5];
m.data[4] = data[i * 4 + j + 8];
m.data[5] = data[i * 4 + j + 9];
m.data[6] = data[i * 4 + j + 12];
m.data[7] = data[i * 4 + j + 13];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_4x2(Matrix<Element, 4, 2> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 4] = m.data[2];
data[i * 4 + j + 5] = m.data[3];
data[i * 4 + j + 8] = m.data[4];
data[i * 4 + j + 9] = m.data[5];
data[i * 4 + j + 12] = m.data[6];
data[i * 4 + j + 13] = m.data[7];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 3> slice_4x3(int i = 0, int j = 0) const {
Matrix<Element, 4, 3> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 2];
m.data[3] = data[i * 4 + j + 4];
m.data[4] = data[i * 4 + j + 5];
m.data[5] = data[i * 4 + j + 6];
m.data[6] = data[i * 4 + j + 8];
m.data[7] = data[i * 4 + j + 9];
m.data[8] = data[i * 4 + j + 10];
m.data[9] = data[i * 4 + j + 12];
m.data[10] = data[i * 4 + j + 13];
m.data[11] = data[i * 4 + j + 14];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_4x3(Matrix<Element, 4, 3> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 2] = m.data[2];
data[i * 4 + j + 4] = m.data[3];
data[i * 4 + j + 5] = m.data[4];
data[i * 4 + j + 6] = m.data[5];
data[i * 4 + j + 8] = m.data[6];
data[i * 4 + j + 9] = m.data[7];
data[i * 4 + j + 10] = m.data[8];
data[i * 4 + j + 12] = m.data[9];
data[i * 4 + j + 13] = m.data[10];
data[i * 4 + j + 14] = m.data[11];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 4> slice_4x4(int i = 0, int j = 0) const {
Matrix<Element, 4, 4> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 2];
m.data[3] = data[i * 4 + j + 3];
m.data[4] = data[i * 4 + j + 4];
m.data[5] = data[i * 4 + j + 5];
m.data[6] = data[i * 4 + j + 6];
m.data[7] = data[i * 4 + j + 7];
m.data[8] = data[i * 4 + j + 8];
m.data[9] = data[i * 4 + j + 9];
m.data[10] = data[i * 4 + j + 10];
m.data[11] = data[i * 4 + j + 11];
m.data[12] = data[i * 4 + j + 12];
m.data[13] = data[i * 4 + j + 13];
m.data[14] = data[i * 4 + j + 14];
m.data[15] = data[i * 4 + j + 15];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_4x4(Matrix<Element, 4, 4> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 2] = m.data[2];
data[i * 4 + j + 3] = m.data[3];
data[i * 4 + j + 4] = m.data[4];
data[i * 4 + j + 5] = m.data[5];
data[i * 4 + j + 6] = m.data[6];
data[i * 4 + j + 7] = m.data[7];
data[i * 4 + j + 8] = m.data[8];
data[i * 4 + j + 9] = m.data[9];
data[i * 4 + j + 10] = m.data[10];
data[i * 4 + j + 11] = m.data[11];
data[i * 4 + j + 12] = m.data[12];
data[i * 4 + j + 13] = m.data[13];
data[i * 4 + j + 14] = m.data[14];
data[i * 4 + j + 15] = m.data[15];
return *this;
}
/// Forms a 4-by-4 matrix by horizontally concatenating a 4-by-1 matrix with a 4-by-3 matrix
CUTLASS_HOST_DEVICE
static Matrix hcat(Matrix<Element, 4, 1> const & lhs, Matrix<Element, 4, 3> const & rhs) {
return Matrix(
lhs.at(0, 0), rhs.at(0, 0), rhs.at(0, 1), rhs.at(0, 2)
, lhs.at(1, 0), rhs.at(1, 0), rhs.at(1, 1), rhs.at(1, 2)
, lhs.at(2, 0), rhs.at(2, 0), rhs.at(2, 1), rhs.at(2, 2)
, lhs.at(3, 0), rhs.at(3, 0), rhs.at(3, 1), rhs.at(3, 2));
}
/// Forms a 4-by-4 matrix by horizontally concatenating a 4-by-2 matrix with a 4-by-2 matrix
CUTLASS_HOST_DEVICE
static Matrix hcat(Matrix<Element, 4, 2> const & lhs, Matrix<Element, 4, 2> const & rhs) {
return Matrix(
lhs.at(0, 0), lhs.at(0, 1), rhs.at(0, 0), rhs.at(0, 1)
, lhs.at(1, 0), lhs.at(1, 1), rhs.at(1, 0), rhs.at(1, 1)
, lhs.at(2, 0), lhs.at(2, 1), rhs.at(2, 0), rhs.at(2, 1)
, lhs.at(3, 0), lhs.at(3, 1), rhs.at(3, 0), rhs.at(3, 1));
}
/// Forms a 4-by-4 matrix by horizontally concatenating a 4-by-3 matrix with a 4-by-1 matrix
CUTLASS_HOST_DEVICE
static Matrix hcat(Matrix<Element, 4, 3> const & lhs, Matrix<Element, 4, 1> const & rhs) {
return Matrix(
lhs.at(0, 0), lhs.at(0, 1), lhs.at(0, 2), rhs.at(0, 0)
, lhs.at(1, 0), lhs.at(1, 1), lhs.at(1, 2), rhs.at(1, 0)
, lhs.at(2, 0), lhs.at(2, 1), lhs.at(2, 2), rhs.at(2, 0)
, lhs.at(3, 0), lhs.at(3, 1), lhs.at(3, 2), rhs.at(3, 0));
}
/// Forms a 4-by-4 matrix by vertically concatenating a 1-by-4 matrix with a 3-by-4 matrix
CUTLASS_HOST_DEVICE
static Matrix vcat(Matrix<Element, 1, 4> const & upper, Matrix<Element, 3, 4> const & lower) {
return Matrix(
upper.at(0, 0), upper.at(0, 1), upper.at(0, 2), upper.at(0, 3)
, lower.at(0, 0), lower.at(0, 1), lower.at(0, 2), lower.at(0, 3)
, lower.at(1, 0), lower.at(1, 1), lower.at(1, 2), lower.at(1, 3)
, lower.at(2, 0), lower.at(2, 1), lower.at(2, 2), lower.at(2, 3));
}
/// Forms a 4-by-4 matrix by vertically concatenating a 2-by-4 matrix with a 2-by-4 matrix
CUTLASS_HOST_DEVICE
static Matrix vcat(Matrix<Element, 2, 4> const & upper, Matrix<Element, 2, 4> const & lower) {
return Matrix(
upper.at(0, 0), upper.at(0, 1), upper.at(0, 2), upper.at(0, 3)
, upper.at(1, 0), upper.at(1, 1), upper.at(1, 2), upper.at(1, 3)
, lower.at(0, 0), lower.at(0, 1), lower.at(0, 2), lower.at(0, 3)
, lower.at(1, 0), lower.at(1, 1), lower.at(1, 2), lower.at(1, 3));
}
/// Forms a 4-by-4 matrix by vertically concatenating a 3-by-4 matrix with a 1-by-4 matrix
CUTLASS_HOST_DEVICE
static Matrix vcat(Matrix<Element, 3, 4> const & upper, Matrix<Element, 1, 4> const & lower) {
return Matrix(
upper.at(0, 0), upper.at(0, 1), upper.at(0, 2), upper.at(0, 3)
, upper.at(1, 0), upper.at(1, 1), upper.at(1, 2), upper.at(1, 3)
, upper.at(2, 0), upper.at(2, 1), upper.at(2, 2), upper.at(2, 3)
, lower.at(0, 0), lower.at(0, 1), lower.at(0, 2), lower.at(0, 3));
}
/// Forms a 4-by-4 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Element A, Matrix<Element, 1, 3> const & B,
Matrix<Element, 3, 1> const & C, Matrix<Element, 3, 3> const & D) {
return Matrix(
A, B.at(0, 0), B.at(0, 1), B.at(0, 2)
, C.at(0, 0), D.at(0, 0), D.at(0, 1), D.at(0, 2)
, C.at(1, 0), D.at(1, 0), D.at(1, 1), D.at(1, 2)
, C.at(2, 0), D.at(2, 0), D.at(2, 1), D.at(2, 2)
);
}
/// Forms a 4-by-4 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 1, 2> const & A, Matrix<Element, 1, 2> const & B,
Matrix<Element, 3, 2> const & C, Matrix<Element, 3, 2> const & D) {
return Matrix(
A.at(0, 0), A.at(0, 1), B.at(0, 0), B.at(0, 1)
, C.at(0, 0), C.at(0, 1), D.at(0, 0), D.at(0, 1)
, C.at(1, 0), C.at(1, 1), D.at(1, 0), D.at(1, 1)
, C.at(2, 0), C.at(2, 1), D.at(2, 0), D.at(2, 1)
);
}
/// Forms a 4-by-4 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 1, 3> const & A, Element B,
Matrix<Element, 3, 3> const & C, Matrix<Element, 3, 1> const & D) {
return Matrix(
A.at(0, 0), A.at(0, 1), A.at(0, 2), B
, C.at(0, 0), C.at(0, 1), C.at(0, 2), D.at(0, 0)
, C.at(1, 0), C.at(1, 1), C.at(1, 2), D.at(1, 0)
, C.at(2, 0), C.at(2, 1), C.at(2, 2), D.at(2, 0)
);
}
/// Forms a 4-by-4 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 2, 1> const & A, Matrix<Element, 2, 3> const & B,
Matrix<Element, 2, 1> const & C, Matrix<Element, 2, 3> const & D) {
return Matrix(
A.at(0, 0), B.at(0, 0), B.at(0, 1), B.at(0, 2)
, A.at(1, 0), B.at(1, 0), B.at(1, 1), B.at(1, 2)
, C.at(0, 0), D.at(0, 0), D.at(0, 1), D.at(0, 2)
, C.at(1, 0), D.at(1, 0), D.at(1, 1), D.at(1, 2)
);
}
/// Forms a 4-by-4 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 2, 2> const & A, Matrix<Element, 2, 2> const & B,
Matrix<Element, 2, 2> const & C, Matrix<Element, 2, 2> const & D) {
return Matrix(
A.at(0, 0), A.at(0, 1), B.at(0, 0), B.at(0, 1)
, A.at(1, 0), A.at(1, 1), B.at(1, 0), B.at(1, 1)
, C.at(0, 0), C.at(0, 1), D.at(0, 0), D.at(0, 1)
, C.at(1, 0), C.at(1, 1), D.at(1, 0), D.at(1, 1)
);
}
/// Forms a 4-by-4 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 2, 3> const & A, Matrix<Element, 2, 1> const & B,
Matrix<Element, 2, 3> const & C, Matrix<Element, 2, 1> const & D) {
return Matrix(
A.at(0, 0), A.at(0, 1), A.at(0, 2), B.at(0, 0)
, A.at(1, 0), A.at(1, 1), A.at(1, 2), B.at(1, 0)
, C.at(0, 0), C.at(0, 1), C.at(0, 2), D.at(0, 0)
, C.at(1, 0), C.at(1, 1), C.at(1, 2), D.at(1, 0)
);
}
/// Forms a 4-by-4 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 3, 1> const & A, Matrix<Element, 3, 3> const & B,
Element C, Matrix<Element, 1, 3> const & D) {
return Matrix(
A.at(0, 0), B.at(0, 0), B.at(0, 1), B.at(0, 2)
, A.at(1, 0), B.at(1, 0), B.at(1, 1), B.at(1, 2)
, A.at(2, 0), B.at(2, 0), B.at(2, 1), B.at(2, 2)
, C, D.at(0, 0), D.at(0, 1), D.at(0, 2)
);
}
/// Forms a 4-by-4 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 3, 2> const & A, Matrix<Element, 3, 2> const & B,
Matrix<Element, 1, 2> const & C, Matrix<Element, 1, 2> const & D) {
return Matrix(
A.at(0, 0), A.at(0, 1), B.at(0, 0), B.at(0, 1)
, A.at(1, 0), A.at(1, 1), B.at(1, 0), B.at(1, 1)
, A.at(2, 0), A.at(2, 1), B.at(2, 0), B.at(2, 1)
, C.at(0, 0), C.at(0, 1), D.at(0, 0), D.at(0, 1)
);
}
/// Forms a 4-by-4 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 3, 3> const & A, Matrix<Element, 3, 1> const & B,
Matrix<Element, 1, 3> const & C, Element D) {
return Matrix(
A.at(0, 0), A.at(0, 1), A.at(0, 2), B.at(0, 0)
, A.at(1, 0), A.at(1, 1), A.at(1, 2), B.at(1, 0)
, A.at(2, 0), A.at(2, 1), A.at(2, 2), B.at(2, 0)
, C.at(0, 0), C.at(0, 1), C.at(0, 2), D
);
}
/// Elementwise add operator (4-by-4)
CUTLASS_HOST_DEVICE
Matrix add(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] + rhs.data[0];
result.data[1] = data[1] + rhs.data[1];
result.data[2] = data[2] + rhs.data[2];
result.data[3] = data[3] + rhs.data[3];
result.data[4] = data[4] + rhs.data[4];
result.data[5] = data[5] + rhs.data[5];
result.data[6] = data[6] + rhs.data[6];
result.data[7] = data[7] + rhs.data[7];
result.data[8] = data[8] + rhs.data[8];
result.data[9] = data[9] + rhs.data[9];
result.data[10] = data[10] + rhs.data[10];
result.data[11] = data[11] + rhs.data[11];
result.data[12] = data[12] + rhs.data[12];
result.data[13] = data[13] + rhs.data[13];
result.data[14] = data[14] + rhs.data[14];
result.data[15] = data[15] + rhs.data[15];
return result;
}
/// Elementwise add operator (4-by-4)
CUTLASS_HOST_DEVICE
Matrix operator +(Matrix const &rhs) const {
return add(rhs);
}
/// Elementwise add operator (4-by-4)
CUTLASS_HOST_DEVICE
Matrix & operator +=(Matrix const &rhs) {
data[0] += rhs.data[0];
data[1] += rhs.data[1];
data[2] += rhs.data[2];
data[3] += rhs.data[3];
data[4] += rhs.data[4];
data[5] += rhs.data[5];
data[6] += rhs.data[6];
data[7] += rhs.data[7];
data[8] += rhs.data[8];
data[9] += rhs.data[9];
data[10] += rhs.data[10];
data[11] += rhs.data[11];
data[12] += rhs.data[12];
data[13] += rhs.data[13];
data[14] += rhs.data[14];
data[15] += rhs.data[15];
return *this;
}
/// Elementwise subtract operator (4-by-4)
CUTLASS_HOST_DEVICE
Matrix subtract(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] - rhs.data[0];
result.data[1] = data[1] - rhs.data[1];
result.data[2] = data[2] - rhs.data[2];
result.data[3] = data[3] - rhs.data[3];
result.data[4] = data[4] - rhs.data[4];
result.data[5] = data[5] - rhs.data[5];
result.data[6] = data[6] - rhs.data[6];
result.data[7] = data[7] - rhs.data[7];
result.data[8] = data[8] - rhs.data[8];
result.data[9] = data[9] - rhs.data[9];
result.data[10] = data[10] - rhs.data[10];
result.data[11] = data[11] - rhs.data[11];
result.data[12] = data[12] - rhs.data[12];
result.data[13] = data[13] - rhs.data[13];
result.data[14] = data[14] - rhs.data[14];
result.data[15] = data[15] - rhs.data[15];
return result;
}
/// Elementwise subtract operator (4-by-4)
CUTLASS_HOST_DEVICE
Matrix operator -(Matrix const &rhs) const {
return subtract(rhs);
}
/// Elementwise subtract operator (4-by-4)
CUTLASS_HOST_DEVICE
Matrix & operator -=(Matrix const &rhs) {
data[0] -= rhs.data[0];
data[1] -= rhs.data[1];
data[2] -= rhs.data[2];
data[3] -= rhs.data[3];
data[4] -= rhs.data[4];
data[5] -= rhs.data[5];
data[6] -= rhs.data[6];
data[7] -= rhs.data[7];
data[8] -= rhs.data[8];
data[9] -= rhs.data[9];
data[10] -= rhs.data[10];
data[11] -= rhs.data[11];
data[12] -= rhs.data[12];
data[13] -= rhs.data[13];
data[14] -= rhs.data[14];
data[15] -= rhs.data[15];
return *this;
}
/// Elementwise multiply operator (4-by-4)
CUTLASS_HOST_DEVICE
Matrix multiply(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] * rhs.data[0];
result.data[1] = data[1] * rhs.data[1];
result.data[2] = data[2] * rhs.data[2];
result.data[3] = data[3] * rhs.data[3];
result.data[4] = data[4] * rhs.data[4];
result.data[5] = data[5] * rhs.data[5];
result.data[6] = data[6] * rhs.data[6];
result.data[7] = data[7] * rhs.data[7];
result.data[8] = data[8] * rhs.data[8];
result.data[9] = data[9] * rhs.data[9];
result.data[10] = data[10] * rhs.data[10];
result.data[11] = data[11] * rhs.data[11];
result.data[12] = data[12] * rhs.data[12];
result.data[13] = data[13] * rhs.data[13];
result.data[14] = data[14] * rhs.data[14];
result.data[15] = data[15] * rhs.data[15];
return result;
}
/// Scalar multiply operator (4-by-4)
CUTLASS_HOST_DEVICE
Matrix multiply(Element const &s) const {
Matrix result;
result.data[0] = data[0] * s;
result.data[1] = data[1] * s;
result.data[2] = data[2] * s;
result.data[3] = data[3] * s;
result.data[4] = data[4] * s;
result.data[5] = data[5] * s;
result.data[6] = data[6] * s;
result.data[7] = data[7] * s;
result.data[8] = data[8] * s;
result.data[9] = data[9] * s;
result.data[10] = data[10] * s;
result.data[11] = data[11] * s;
result.data[12] = data[12] * s;
result.data[13] = data[13] * s;
result.data[14] = data[14] * s;
result.data[15] = data[15] * s;
return result;
}
/// Scalar multiply operator (4-by-4)
CUTLASS_HOST_DEVICE
Matrix operator *(Element const &s) const {
return multiply(s);
}
/// Scalar multiply operator (4-by-4)
CUTLASS_HOST_DEVICE
Matrix & operator *=(Element const &s) {
data[0] *= s;
data[1] *= s;
data[2] *= s;
data[3] *= s;
data[4] *= s;
data[5] *= s;
data[6] *= s;
data[7] *= s;
data[8] *= s;
data[9] *= s;
data[10] *= s;
data[11] *= s;
data[12] *= s;
data[13] *= s;
data[14] *= s;
data[15] *= s;
return *this;
}
/// Elementwise divide operator (4-by-4)
CUTLASS_HOST_DEVICE
Matrix divide(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] / rhs.data[0];
result.data[1] = data[1] / rhs.data[1];
result.data[2] = data[2] / rhs.data[2];
result.data[3] = data[3] / rhs.data[3];
result.data[4] = data[4] / rhs.data[4];
result.data[5] = data[5] / rhs.data[5];
result.data[6] = data[6] / rhs.data[6];
result.data[7] = data[7] / rhs.data[7];
result.data[8] = data[8] / rhs.data[8];
result.data[9] = data[9] / rhs.data[9];
result.data[10] = data[10] / rhs.data[10];
result.data[11] = data[11] / rhs.data[11];
result.data[12] = data[12] / rhs.data[12];
result.data[13] = data[13] / rhs.data[13];
result.data[14] = data[14] / rhs.data[14];
result.data[15] = data[15] / rhs.data[15];
return result;
}
/// Scalar divide operator (4-by-4)
CUTLASS_HOST_DEVICE
Matrix divide(Element const &s) const {
Matrix result;
result.data[0] = data[0] / s;
result.data[1] = data[1] / s;
result.data[2] = data[2] / s;
result.data[3] = data[3] / s;
result.data[4] = data[4] / s;
result.data[5] = data[5] / s;
result.data[6] = data[6] / s;
result.data[7] = data[7] / s;
result.data[8] = data[8] / s;
result.data[9] = data[9] / s;
result.data[10] = data[10] / s;
result.data[11] = data[11] / s;
result.data[12] = data[12] / s;
result.data[13] = data[13] / s;
result.data[14] = data[14] / s;
result.data[15] = data[15] / s;
return result;
}
/// Scalar divide operator (4-by-4)
CUTLASS_HOST_DEVICE
Matrix operator /(Element const &s) const {
return divide(s);
}
/// Scalar divide operator (4-by-4)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Element const &s) {
data[0] /= s;
data[1] /= s;
data[2] /= s;
data[3] /= s;
data[4] /= s;
data[5] /= s;
data[6] /= s;
data[7] /= s;
data[8] /= s;
data[9] /= s;
data[10] /= s;
data[11] /= s;
data[12] /= s;
data[13] /= s;
data[14] /= s;
data[15] /= s;
return *this;
}
/// Elementwise divide operator (4-by-4)
CUTLASS_HOST_DEVICE
Matrix operator /(Matrix const &rhs) const {
return divide(rhs);
}
/// Elementwise divide operator (4-by-4)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Matrix const &rhs) {
data[0] /= rhs.data[0];
data[1] /= rhs.data[1];
data[2] /= rhs.data[2];
data[3] /= rhs.data[3];
data[4] /= rhs.data[4];
data[5] /= rhs.data[5];
data[6] /= rhs.data[6];
data[7] /= rhs.data[7];
data[8] /= rhs.data[8];
data[9] /= rhs.data[9];
data[10] /= rhs.data[10];
data[11] /= rhs.data[11];
data[12] /= rhs.data[12];
data[13] /= rhs.data[13];
data[14] /= rhs.data[14];
data[15] /= rhs.data[15];
return *this;
}
/// Negates each element of the matrix
CUTLASS_HOST_DEVICE
Matrix operator-() const {
Matrix m;
m.data[0] = -m.data[0];
m.data[1] = -m.data[1];
m.data[2] = -m.data[2];
m.data[3] = -m.data[3];
m.data[4] = -m.data[4];
m.data[5] = -m.data[5];
m.data[6] = -m.data[6];
m.data[7] = -m.data[7];
m.data[8] = -m.data[8];
m.data[9] = -m.data[9];
m.data[10] = -m.data[10];
m.data[11] = -m.data[11];
m.data[12] = -m.data[12];
m.data[13] = -m.data[13];
m.data[14] = -m.data[14];
m.data[15] = -m.data[15];
return m;
}
/// Matrix product of size 4-by-1-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 1> product(
Matrix<Element, 4, 1> const &rhs,
Matrix<Element, 4, 1> accum = Matrix<Element, 4, 1>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[4] * rhs.data[0];
accum.data[2] += data[8] * rhs.data[0];
accum.data[3] += data[12] * rhs.data[0];
// k=1
accum.data[0] += data[1] * rhs.data[1];
accum.data[1] += data[5] * rhs.data[1];
accum.data[2] += data[9] * rhs.data[1];
accum.data[3] += data[13] * rhs.data[1];
// k=2
accum.data[0] += data[2] * rhs.data[2];
accum.data[1] += data[6] * rhs.data[2];
accum.data[2] += data[10] * rhs.data[2];
accum.data[3] += data[14] * rhs.data[2];
// k=3
accum.data[0] += data[3] * rhs.data[3];
accum.data[1] += data[7] * rhs.data[3];
accum.data[2] += data[11] * rhs.data[3];
accum.data[3] += data[15] * rhs.data[3];
return accum;
}
/// Matrix product of size 4-by-1-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 1> operator*(Matrix<Element, 4, 1> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 4-by-2-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 2> product(
Matrix<Element, 4, 2> const &rhs,
Matrix<Element, 4, 2> accum = Matrix<Element, 4, 2>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[4] * rhs.data[0];
accum.data[3] += data[4] * rhs.data[1];
accum.data[4] += data[8] * rhs.data[0];
accum.data[5] += data[8] * rhs.data[1];
accum.data[6] += data[12] * rhs.data[0];
accum.data[7] += data[12] * rhs.data[1];
// k=1
accum.data[0] += data[1] * rhs.data[2];
accum.data[1] += data[1] * rhs.data[3];
accum.data[2] += data[5] * rhs.data[2];
accum.data[3] += data[5] * rhs.data[3];
accum.data[4] += data[9] * rhs.data[2];
accum.data[5] += data[9] * rhs.data[3];
accum.data[6] += data[13] * rhs.data[2];
accum.data[7] += data[13] * rhs.data[3];
// k=2
accum.data[0] += data[2] * rhs.data[4];
accum.data[1] += data[2] * rhs.data[5];
accum.data[2] += data[6] * rhs.data[4];
accum.data[3] += data[6] * rhs.data[5];
accum.data[4] += data[10] * rhs.data[4];
accum.data[5] += data[10] * rhs.data[5];
accum.data[6] += data[14] * rhs.data[4];
accum.data[7] += data[14] * rhs.data[5];
// k=3
accum.data[0] += data[3] * rhs.data[6];
accum.data[1] += data[3] * rhs.data[7];
accum.data[2] += data[7] * rhs.data[6];
accum.data[3] += data[7] * rhs.data[7];
accum.data[4] += data[11] * rhs.data[6];
accum.data[5] += data[11] * rhs.data[7];
accum.data[6] += data[15] * rhs.data[6];
accum.data[7] += data[15] * rhs.data[7];
return accum;
}
/// Matrix product of size 4-by-2-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 2> operator*(Matrix<Element, 4, 2> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 4-by-3-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 3> product(
Matrix<Element, 4, 3> const &rhs,
Matrix<Element, 4, 3> accum = Matrix<Element, 4, 3>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[4] * rhs.data[0];
accum.data[4] += data[4] * rhs.data[1];
accum.data[5] += data[4] * rhs.data[2];
accum.data[6] += data[8] * rhs.data[0];
accum.data[7] += data[8] * rhs.data[1];
accum.data[8] += data[8] * rhs.data[2];
accum.data[9] += data[12] * rhs.data[0];
accum.data[10] += data[12] * rhs.data[1];
accum.data[11] += data[12] * rhs.data[2];
// k=1
accum.data[0] += data[1] * rhs.data[3];
accum.data[1] += data[1] * rhs.data[4];
accum.data[2] += data[1] * rhs.data[5];
accum.data[3] += data[5] * rhs.data[3];
accum.data[4] += data[5] * rhs.data[4];
accum.data[5] += data[5] * rhs.data[5];
accum.data[6] += data[9] * rhs.data[3];
accum.data[7] += data[9] * rhs.data[4];
accum.data[8] += data[9] * rhs.data[5];
accum.data[9] += data[13] * rhs.data[3];
accum.data[10] += data[13] * rhs.data[4];
accum.data[11] += data[13] * rhs.data[5];
// k=2
accum.data[0] += data[2] * rhs.data[6];
accum.data[1] += data[2] * rhs.data[7];
accum.data[2] += data[2] * rhs.data[8];
accum.data[3] += data[6] * rhs.data[6];
accum.data[4] += data[6] * rhs.data[7];
accum.data[5] += data[6] * rhs.data[8];
accum.data[6] += data[10] * rhs.data[6];
accum.data[7] += data[10] * rhs.data[7];
accum.data[8] += data[10] * rhs.data[8];
accum.data[9] += data[14] * rhs.data[6];
accum.data[10] += data[14] * rhs.data[7];
accum.data[11] += data[14] * rhs.data[8];
// k=3
accum.data[0] += data[3] * rhs.data[9];
accum.data[1] += data[3] * rhs.data[10];
accum.data[2] += data[3] * rhs.data[11];
accum.data[3] += data[7] * rhs.data[9];
accum.data[4] += data[7] * rhs.data[10];
accum.data[5] += data[7] * rhs.data[11];
accum.data[6] += data[11] * rhs.data[9];
accum.data[7] += data[11] * rhs.data[10];
accum.data[8] += data[11] * rhs.data[11];
accum.data[9] += data[15] * rhs.data[9];
accum.data[10] += data[15] * rhs.data[10];
accum.data[11] += data[15] * rhs.data[11];
return accum;
}
/// Matrix product of size 4-by-3-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 3> operator*(Matrix<Element, 4, 3> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 4-by-4-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 4> product(
Matrix<Element, 4, 4> const &rhs,
Matrix<Element, 4, 4> accum = Matrix<Element, 4, 4>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[0] * rhs.data[3];
accum.data[4] += data[4] * rhs.data[0];
accum.data[5] += data[4] * rhs.data[1];
accum.data[6] += data[4] * rhs.data[2];
accum.data[7] += data[4] * rhs.data[3];
accum.data[8] += data[8] * rhs.data[0];
accum.data[9] += data[8] * rhs.data[1];
accum.data[10] += data[8] * rhs.data[2];
accum.data[11] += data[8] * rhs.data[3];
accum.data[12] += data[12] * rhs.data[0];
accum.data[13] += data[12] * rhs.data[1];
accum.data[14] += data[12] * rhs.data[2];
accum.data[15] += data[12] * rhs.data[3];
// k=1
accum.data[0] += data[1] * rhs.data[4];
accum.data[1] += data[1] * rhs.data[5];
accum.data[2] += data[1] * rhs.data[6];
accum.data[3] += data[1] * rhs.data[7];
accum.data[4] += data[5] * rhs.data[4];
accum.data[5] += data[5] * rhs.data[5];
accum.data[6] += data[5] * rhs.data[6];
accum.data[7] += data[5] * rhs.data[7];
accum.data[8] += data[9] * rhs.data[4];
accum.data[9] += data[9] * rhs.data[5];
accum.data[10] += data[9] * rhs.data[6];
accum.data[11] += data[9] * rhs.data[7];
accum.data[12] += data[13] * rhs.data[4];
accum.data[13] += data[13] * rhs.data[5];
accum.data[14] += data[13] * rhs.data[6];
accum.data[15] += data[13] * rhs.data[7];
// k=2
accum.data[0] += data[2] * rhs.data[8];
accum.data[1] += data[2] * rhs.data[9];
accum.data[2] += data[2] * rhs.data[10];
accum.data[3] += data[2] * rhs.data[11];
accum.data[4] += data[6] * rhs.data[8];
accum.data[5] += data[6] * rhs.data[9];
accum.data[6] += data[6] * rhs.data[10];
accum.data[7] += data[6] * rhs.data[11];
accum.data[8] += data[10] * rhs.data[8];
accum.data[9] += data[10] * rhs.data[9];
accum.data[10] += data[10] * rhs.data[10];
accum.data[11] += data[10] * rhs.data[11];
accum.data[12] += data[14] * rhs.data[8];
accum.data[13] += data[14] * rhs.data[9];
accum.data[14] += data[14] * rhs.data[10];
accum.data[15] += data[14] * rhs.data[11];
// k=3
accum.data[0] += data[3] * rhs.data[12];
accum.data[1] += data[3] * rhs.data[13];
accum.data[2] += data[3] * rhs.data[14];
accum.data[3] += data[3] * rhs.data[15];
accum.data[4] += data[7] * rhs.data[12];
accum.data[5] += data[7] * rhs.data[13];
accum.data[6] += data[7] * rhs.data[14];
accum.data[7] += data[7] * rhs.data[15];
accum.data[8] += data[11] * rhs.data[12];
accum.data[9] += data[11] * rhs.data[13];
accum.data[10] += data[11] * rhs.data[14];
accum.data[11] += data[11] * rhs.data[15];
accum.data[12] += data[15] * rhs.data[12];
accum.data[13] += data[15] * rhs.data[13];
accum.data[14] += data[15] * rhs.data[14];
accum.data[15] += data[15] * rhs.data[15];
return accum;
}
/// Matrix product of size 4-by-4-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 4> operator*(Matrix<Element, 4, 4> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 4-by-4-by-4
CUTLASS_HOST_DEVICE
Matrix & operator*=(Matrix<Element, 4, 4> const &rhs) {
*this = product(rhs);
return *this;
}
/// Returns the sum of elements
CUTLASS_HOST_DEVICE
Element sum(Element accum = Element()) const {
accum += data[0];
accum += data[1];
accum += data[2];
accum += data[3];
accum += data[4];
accum += data[5];
accum += data[6];
accum += data[7];
accum += data[8];
accum += data[9];
accum += data[10];
accum += data[11];
accum += data[12];
accum += data[13];
accum += data[14];
accum += data[15];
return accum;
}
/// Returns the sum of squared elements
CUTLASS_HOST_DEVICE
Element norm(Element accum = Element()) const {
accum += data[0] * data[0];
accum += data[1] * data[1];
accum += data[2] * data[2];
accum += data[3] * data[3];
accum += data[4] * data[4];
accum += data[5] * data[5];
accum += data[6] * data[6];
accum += data[7] * data[7];
accum += data[8] * data[8];
accum += data[9] * data[9];
accum += data[10] * data[10];
accum += data[11] * data[11];
accum += data[12] * data[12];
accum += data[13] * data[13];
accum += data[14] * data[14];
accum += data[15] * data[15];
return accum;
}
/// Returns square root of the norm
CUTLASS_HOST_DEVICE
Element magnitude() const {
return fast_sqrt(norm());
}
/// Returns the sum of diagonal elements
CUTLASS_HOST_DEVICE
Element trace(Element accum = Element()) const {
accum += data[0];
accum += data[5];
accum += data[10];
accum += data[15];
return accum;
}
/// Returns 4-by-4 rotation matrix around the X axis
CUTLASS_HOST_DEVICE
static Matrix rotation_X(Element theta) {
Matrix m = identity();
Element c = fast_cos(theta);
Element s = fast_sin(theta);
m.at(1, 1) = c;
m.at(1, 2) = -s;
m.at(2, 1) = s;
m.at(2, 2) = c;
return m;
}
/// Returns 4-by-4 rotation matrix around the Y axis
CUTLASS_HOST_DEVICE
static Matrix rotation_Y(Element theta) {
Matrix m = identity();
Element c = fast_cos(theta);
Element s = fast_sin(theta);
m.at(0, 0) = c;
m.at(2, 0) = -s;
m.at(0, 2) = s;
m.at(2, 2) = c;
return m;
}
/// Returns 4-by-4 rotation matrix around the Z axis
CUTLASS_HOST_DEVICE
static Matrix rotation_Z(Element theta) {
Matrix m = Matrix::identity();
Element c = fast_cos(theta);
Element s = fast_sin(theta);
m.at(0, 0) = c;
m.at(0, 1) = -s;
m.at(1, 0) = s;
m.at(1, 1) = c;
return m;
}
/// Returns a 4-by-4 rotation matrix around a unit-length axis
CUTLASS_HOST_DEVICE
static Matrix rotation(Element theta, Matrix<Element, 3, 1> const &u) {
Element x = u.data[0];
Element y = u.data[1];
Element z = u.data[2];
Element c = fast_cos(theta);
Element s = fast_sin(theta);
Element one_minus_cos = Element(1) - fast_cos(theta);
Matrix m;
m.set_slice3x3({
c + x * x * one_minus_cos, x * y * one_minus_cos - z * s, x * z * one_minus_cos + y * s,
y * x * one_minus_cos * z * s, c + y * y * one_minus_cos, y * z * one_minus_cos - x * s,
z * x * one_minus_cos - y * s, z * y * one_minus_cos + x * s, c + z * z * one_minus_cos
});
return m;
}
/// Returns a 4-by-4 reflection about the plane specified by the
/// unit-length normal vector n_unit
CUTLASS_HOST_DEVICE
static Matrix reflection(Matrix<Element, 3, 1> const &n_unit) {
Element a = n_unit.data[0];
Element b = n_unit.data[1];
Element c = n_unit.data[2];
Matrix m = Matrix::identity();
m.set_slice3x3({
Element(1) - Element(2) * a * a, Element(-2) * a * b, Element(-2) * a * c,
Element(-2) * a * b, Element(1) - Element(2) * b * b, Element(-2) * b * c,
Element(-2) * a * c, Element(-2) * b * c, Element(1) - Element(2) * c * c
});
return m;
}
/// Returns a perspective projection matrix typical of OpenGL applications
CUTLASS_HOST_DEVICE
static Matrix perspective(Element near_plane, Element far_plane, Element fovH, Element fovV) {
Element aspect = fovH / fovV;
Element f = Element(cos(fovV)) / Element(fovH);
Element Q = near_plane - far_plane;
return Matrix(
f / aspect, 0, 0, 0,
0, f, 0, 0,
0, 0, (near_plane + far_plane) / Q, Element(2) * far_plane * near_plane / Q,
0, 0, -1, 0
);
}
CUTLASS_HOST_DEVICE
static Matrix translation(Matrix<Element, 3, 1> const &v) {
return Matrix(
1, 0, 0, v.data[0],
0, 1, 0, v.data[1],
0, 0, 1, v.data[2],
0, 0, 0, 1
);
}
/// Computes the determinant of a 4-by-4 matrix
CUTLASS_HOST_DEVICE
Element determinant(Element accum = Element()) const {
accum += at(0, 0) * Matrix<Element, 3, 3>({ at(1, 1), at(1, 2), at(1, 3), at(2, 1), at(2, 2), at(2, 3), at(3, 1), at(3, 2), at(3, 3) }).determinant();
accum -= at(0, 1) * Matrix<Element, 3, 3>({ at(1, 0), at(1, 2), at(1, 3), at(2, 0), at(2, 2), at(2, 3), at(3, 0), at(3, 2), at(3, 3) }).determinant();
accum += at(0, 2) * Matrix<Element, 3, 3>({ at(1, 0), at(1, 1), at(1, 3), at(2, 0), at(2, 1), at(2, 3), at(3, 0), at(3, 1), at(3, 3) }).determinant();
accum -= at(0, 3) * Matrix<Element, 3, 3>({ at(1, 0), at(1, 1), at(1, 2), at(2, 0), at(2, 1), at(2, 2), at(3, 0), at(3, 1), at(3, 2) }).determinant();
return accum;
}
/// Computes the inverse of a 4-by-4 matrix (ignores the optional argument)
CUTLASS_HOST_DEVICE
Matrix inverse(Element ignore = 1) const {
Matrix<Element, 2, 2> B = slice_2x2(0, 2);
Matrix<Element, 2, 2> A = slice_2x2(0, 0);
Matrix<Element, 2, 2> C = slice_2x2(2, 0);
Matrix<Element, 2, 2> D = slice_2x2(2, 2);
Matrix<Element, 2, 2> D_inv = D.inverse();
Matrix<Element, 2, 2> E = (A - B * D_inv * C).inverse();
return Matrix::block(
E, -E * B * D_inv,
-D_inv * C * E, D_inv + D_inv * C * E * B * D_inv
);
}
};
/// Template alias for 4-by-4 matrix
template <typename Element>
using Matrix4x4 = Matrix<Element, 4, 4>;
/// Free funciton to infer element type from template arguments
template <typename Element>
CUTLASS_HOST_DEVICE Matrix4x4<Element> make_Matrix4x4(
Element _0_0, Element _0_1, Element _0_2, Element _0_3,
Element _1_0, Element _1_1, Element _1_2, Element _1_3,
Element _2_0, Element _2_1, Element _2_2, Element _2_3,
Element _3_0, Element _3_1, Element _3_2, Element _3_3
) {
return Matrix4x4<Element>(
_0_0, _0_1, _0_2, _0_3,
_1_0, _1_1, _1_2, _1_3,
_2_0, _2_1, _2_2, _2_3,
_3_0, _3_1, _3_2, _3_3
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Elementwise scalar multiplication
template <typename Element, int Rows, int Columns>
CUTLASS_HOST_DEVICE
Matrix<Element, Rows, Columns> operator*(Element s, Matrix<Element, Rows, Columns> const &rhs) {
return rhs.multiply(s);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/matrix.h/0 | {
"file_path": "include/cutlass/matrix.h",
"repo_id": "include",
"token_count": 169298
} | 35 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Kernel performing a reduction over one or more ranks of an affine tensor
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/fast_math.h"
#include "cutlass/numeric_types.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/device_kernel.h"
#include "cutlass/reduction/kernel/tensor_reduce_affine_strided.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace reduction {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tensor reduction operator on layouts which are affine
template <
int Rank, ///< Rank of source tensor (e.g. NDHWC => 5)
int ReducedRank, ///< Rank of reduced tensor (includes contiguous, e.g. NC => 2)
typename ElementOutput_,
typename ElementSource_,
typename ReductionOp_,
int VectorLength = 1,
typename ElementCompute_ = ElementOutput_,
int Threads = 256, ///< Number of participating threads
int BatchSize = 4 ///< Number of elements to load per batch
>
struct TensorReductionAffineStrided {
static int const kRank = Rank;
static int const kReducedRank = ReducedRank;
static int const kVectorLength = VectorLength;
static int const kInnerRank = kRank - kReducedRank;
static int const kThreads = Threads;
static int const kBatchSize = BatchSize;
using ElementOutput = ElementOutput_;
using ElementSource = ElementSource_;
using ReductionOp = ReductionOp_;
using ElementCompute = ElementCompute_;
//
// Data members
//
/// Internal status field
Status status;
/// Extent of tensor in source layout
Coord<kRank> extent;
/// Number of points in the outer index space
int64_t outer_count;
/// Number of elements in the inner index space
int64_t inner_count;
/// Number of workspaces needed
int workspace_count;
/// CUDA Grid shape (.x => contiguous, .y => outer, .z => inner)
dim3 grid_shape;
/// CUDA Threadblock shape (.x => contiguous, .y => outer, .z => inner)
dim3 threadblock_shape;
/// CUDA grid shape for the final reduction step if needed
dim3 grid_final;
/// CUDA threadblock shape for the final reduction step if needed
dim3 threadblock_final;
private:
//
// Methods
//
/// Helper to reshape 'count' such that it is less than 2 x 'ext'
static int reshape_pow2(int ext, int count) {
if (ext > count) {
return 1;
}
int x = 1;
for (; count >= ext * 2; ) {
count >>= 1;
x <<= 1;
}
return x;
}
public:
/// Default ctor
TensorReductionAffineStrided():
status(Status::kErrorInvalidProblem),
extent(),
outer_count(0),
inner_count(0),
workspace_count(0),
grid_shape(0, 0, 0),
threadblock_shape(0, 0, 0) { }
/// Constructor
TensorReductionAffineStrided(
Coord<kRank> extent_,
int target_threadblock_count = 128
):
status(Status::kSuccess),
extent(extent_),
outer_count(0),
inner_count(0),
workspace_count(0) {
//
// Plan the parallel mapping strategy.
//
outer_count = 1;
inner_count = 1;
// Compute number of elements in strided ranks
for (int p = 0; p < kReducedRank - 1; ++p) {
outer_count *= extent[p];
}
for (int p = 0; p < kInnerRank; ++p) {
inner_count *= extent[kReducedRank + p - 1];
}
// Compute plan for the reduction
int extent_c = extent[kRank - 1];
int vectors_c = (extent_c -1 + kVectorLength) / kVectorLength;
// Determine CTA shape
int cta_width = kThreads * kVectorLength;
int cta_ways = reshape_pow2(extent_c, cta_width);
int cta_threads_x = kThreads / cta_ways;
threadblock_shape = dim3(cta_threads_x, 1, std::min(cta_ways, 64));
// This leads to an error.
if (threadblock_shape.z > 1) {
if (threadblock_shape.y != 1) {
status = Status::kErrorInternal;
return;
}
}
// Determine grid shape
int cta_count_x = (vectors_c + cta_threads_x - 1) / cta_threads_x;
int cta_count_y = std::max(1, target_threadblock_count / cta_count_x);
// Limit the number of CTAs assigned to outer dimension
if (int64_t(cta_count_y * threadblock_shape.y) > outer_count) {
cta_count_y = int(outer_count + threadblock_shape.y - 1) / threadblock_shape.y;
}
// Limit the number of CTAs assigned to inner dimension
int cta_count_z = std::max(1, target_threadblock_count / cta_count_y);
if (int64_t(cta_count_z * threadblock_shape.z) > inner_count) {
cta_count_z = int(inner_count + threadblock_shape.z - 1) / threadblock_shape.z;
}
grid_shape = dim3(cta_count_x, cta_count_y, cta_count_z);
workspace_count = (cta_count_z > 1 ? cta_count_z : 0);
// Determine shape of final reduction kernel if needed
grid_final = dim3(cta_count_x, int(outer_count));
threadblock_final = dim3(cta_threads_x, 1, 1);
}
/// Simple check to verify the object is initialized correctly
bool good() const {
return status == Status::kSuccess;
}
/// Size of one CTA's workspace
int64_t workspace_stride() const {
// Error condition
if (!good()) {
return 0;
}
int vector_size_bytes = kVectorLength * sizeof_bits<ElementCompute>::value / 8;
return extent[kRank - 1] * vector_size_bytes;
}
/// Returns the size (in bytes) of a temporary workspace needed for reduction across CTAs
int64_t workspace_size() const {
// Error condition
if (!good()) {
return 0;
}
// No reduction across CTAs
if (grid_shape.z == 1) {
return 0;
}
return workspace_stride() * outer_count * grid_shape.z;
}
/// Performs a reduction
Status reduce(
ElementOutput *dst_ptr, ///< Pointer to destination tensor
int64_t dst_stride[], ///< Stride vector (of length kReducedRank - 1)
ElementSource const *src_ptr, ///< Pointer to source tensor
int64_t src_stride[], ///< Stride vector (of length kRank - 1)
void *device_workspace_ptr = nullptr, ///< Device workspace
ElementCompute reduction_identity = ElementCompute(), ///< Reduciton identity
ReductionOp reduction_op = ReductionOp(), ///< Reduction operator
cudaStream_t stream = nullptr) { ///< CUDA Stream into which all kernels are launched
// Initial status check
if (!good()) {
return status;
}
// Guard against null workspace
if (workspace_count > 1 && device_workspace_ptr == nullptr) {
return Status::kErrorWorkspaceNull;
}
// Define reduction kernel
using ReductionKernel = kernel::TensorReductionAffineStrided<
kRank,
kReducedRank,
ElementOutput,
ElementSource,
ReductionOp,
kVectorLength,
ElementCompute,
kThreads>;
using FinalReductionKernel = kernel::TensorReductionAffineStridedFinal<
kRank,
kReducedRank,
ElementOutput,
ElementSource,
ReductionOp,
kVectorLength,
ElementCompute,
kThreads>;
using Params = typename ReductionKernel::Params;
// Construct the parameters
Params params(
extent,
dst_ptr,
dst_stride,
src_ptr,
src_stride,
static_cast<ElementCompute *>(device_workspace_ptr),
workspace_stride(),
workspace_count,
reduction_op,
reduction_identity);
// Shared memory size
int shared_mem_bytes = sizeof(typename ReductionKernel::SharedStorage);
// Launch the kernel
Kernel<ReductionKernel><<< grid_shape, threadblock_shape, shared_mem_bytes, stream >>>(params);
// Check error condition
if (cudaPeekAtLastError() == cudaSuccess) {
status = Status::kSuccess;
}
else {
status = Status::kErrorInternal;
}
// Final reduction kernel
if (workspace_count) {
Kernel<FinalReductionKernel><<< grid_final, threadblock_final, 0, stream >>>(params);
// Check error condition
if (cudaPeekAtLastError() == cudaSuccess) {
status = Status::kSuccess;
}
else {
status = Status::kErrorInternal;
}
}
return status;
}
/// Helper to use overloaded function call operator
Status operator()(
ElementOutput *dst_ptr, ///< Pointer to destination tensor
int64_t dst_stride[], ///< Stride vector (of length kReducedRank - 1)
ElementSource const *src_ptr, ///< Pointer to source tensor
int64_t src_stride[], ///< Stride vector (of length kRank - 1)
void *device_workspace_ptr = nullptr, ///< Pointer to device workspace
ElementCompute reduction_identity = ElementCompute(), ///< Reduciton identity
ReductionOp reduction_op = ReductionOp(), ///< Reduction operator
cudaStream_t stream = nullptr) { ///< CUDA Stream into which all kernels are launched
return reduce(
dst_ptr,
dst_stride,
src_ptr,
src_stride,
device_workspace_ptr,
reduction_identity,
reduction_op,
stream);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace reduction
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/reduction/device/tensor_reduce_affine_strided.h/0 | {
"file_path": "include/cutlass/reduction/device/tensor_reduce_affine_strided.h",
"repo_id": "include",
"token_count": 4123
} | 36 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*!
\file
\brief Defines a proxy class for storing Tensor Float 32 data type.
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include "cutlass/floating_point_nvrtc.h"
#else
#include <cmath>
#include <limits>
#include <cstdint>
#endif
#include "cutlass/cutlass.h"
namespace cutlass {
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Tensor Float 32 data type
struct alignas(4) tfloat32_t {
//
// Data members
//
/// Storage type
uint32_t storage;
//
// Methods
//
/// Constructs from an unsigned int
CUTLASS_HOST_DEVICE
static tfloat32_t bitcast(uint32_t x) {
tfloat32_t h;
h.storage = x;
return h;
}
/// Emulated rounding is fast in device code
CUTLASS_HOST_DEVICE
static tfloat32_t round_half_ulp_truncate(float const &s) {
uint32_t x = reinterpret_cast<uint32_t const &>(s);
#if defined(__CUDA_ARCH__)
if (::isfinite(s)) {
x += 0x1000u;
}
#else
if (std::isfinite(s)) {
x += 0x1000u;
}
#endif
return tfloat32_t::bitcast(x);
}
/// Default constructor
tfloat32_t() = default;
/// Floating-point conversion - round toward nearest even
CUTLASS_HOST_DEVICE
// explicit tfloat32_t(float x): storage(round_half_ulp_truncate(x).storage) { }
tfloat32_t(float x): storage(round_half_ulp_truncate(x).storage) { }
/// Floating-point conversion - round toward nearest even
CUTLASS_HOST_DEVICE
// explicit tfloat32_t(double x): tfloat32_t(float(x)) {
tfloat32_t(double x): tfloat32_t(float(x)) {
}
/// Integer conversion - round toward zero
CUTLASS_HOST_DEVICE
// explicit tfloat32_t(int x) {
tfloat32_t(int x) {
float flt = static_cast<float>(x);
#if defined(__CUDA_ARCH__)
storage = reinterpret_cast<uint32_t const &>(flt);
#else
std::memcpy(&storage, &flt, sizeof(storage));
#endif
}
/// Converts to float
CUTLASS_HOST_DEVICE
operator float() const {
// Conversions to IEEE single-precision requires clearing dont-care bits
// of the mantissa.
unsigned bits = (storage & ~0x1fffu);
#if defined(__CUDA_ARCH__)
return reinterpret_cast<float const &>(bits);
#else
float flt;
std::memcpy(&flt, &bits, sizeof(flt));
return flt;
#endif
}
/// Converts to float
CUTLASS_HOST_DEVICE
explicit operator double() const {
return double(float(*this));
}
/// Converts to int
CUTLASS_HOST_DEVICE
explicit operator int() const {
return int(float(*this));
}
/// Casts to bool
CUTLASS_HOST_DEVICE
explicit operator bool() const {
return (float(*this) != 0.0f);
}
/// Obtains raw bits
CUTLASS_HOST_DEVICE
uint32_t raw() const {
return storage;
}
/// Returns the sign bit
CUTLASS_HOST_DEVICE
bool signbit() const {
return ((raw() & 0x80000000) != 0);
}
/// Returns the biased exponent
CUTLASS_HOST_DEVICE
int exponent_biased() const {
return int((raw() >> 23) & 0x0ff);
}
/// Returns the unbiased exponent
CUTLASS_HOST_DEVICE
int exponent() const {
return exponent_biased() - 127;
}
/// Returns the mantissa
CUTLASS_HOST_DEVICE
int mantissa() const {
return int(raw() & 0x7fffff);
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
CUTLASS_HOST_DEVICE
bool signbit(cutlass::tfloat32_t const& h) {
return h.signbit();
}
CUTLASS_HOST_DEVICE
cutlass::tfloat32_t abs(cutlass::tfloat32_t const& h) {
return cutlass::tfloat32_t::bitcast(h.raw() & 0x7fffffff);
}
CUTLASS_HOST_DEVICE
bool isnan(cutlass::tfloat32_t const& h) {
return (h.exponent_biased() == 0x0ff) && h.mantissa();
}
CUTLASS_HOST_DEVICE
bool isfinite(cutlass::tfloat32_t const& h) {
return (h.exponent_biased() != 0x0ff);
}
CUTLASS_HOST_DEVICE
cutlass::tfloat32_t nan_tf32(const char*) {
// NVIDIA canonical NaN
return cutlass::tfloat32_t::bitcast(0x7fffffff);
}
CUTLASS_HOST_DEVICE
bool isinf(cutlass::tfloat32_t const& h) {
return (h.exponent_biased() == 0x0ff) && !h.mantissa();
}
CUTLASS_HOST_DEVICE
bool isnormal(cutlass::tfloat32_t const& h) {
return h.exponent_biased() && h.exponent_biased() != 0x0ff;
}
CUTLASS_HOST_DEVICE
int fpclassify(cutlass::tfloat32_t const& h) {
int exp = h.exponent_biased();
int mantissa = h.mantissa();
if (exp == 0x0ff) {
if (mantissa) {
return FP_NAN;
}
else {
return FP_INFINITE;
}
}
else if (!exp) {
if (mantissa) {
return FP_SUBNORMAL;
}
else {
return FP_ZERO;
}
}
return FP_NORMAL;
}
CUTLASS_HOST_DEVICE
cutlass::tfloat32_t sqrt(cutlass::tfloat32_t const& h) {
#if defined(__CUDACC_RTC__)
return cutlass::tfloat32_t(sqrtf(float(h)));
#else
return cutlass::tfloat32_t(std::sqrt(float(h)));
#endif
}
CUTLASS_HOST_DEVICE
tfloat32_t copysign(tfloat32_t const& a, tfloat32_t const& b) {
uint32_t a_mag = (reinterpret_cast<uint32_t const &>(a) & 0x7fffffff);
uint32_t b_sign = (reinterpret_cast<uint32_t const &>(b) & 0x80000000);
uint32_t result = (a_mag | b_sign);
return reinterpret_cast<tfloat32_t const &>(result);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// Standard Library operations and definitions
//
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace std {
#if !defined(__CUDACC_RTC__)
/// Numeric limits
template <>
struct numeric_limits<cutlass::tfloat32_t> {
static bool const is_specialized = true;
static bool const is_signed = true;
static bool const is_integer = false;
static bool const is_exact = false;
static bool const has_infinity = true;
static bool const has_quiet_NaN = true;
static bool const has_signaling_NaN = false;
static std::float_denorm_style const has_denorm = std::denorm_present;
static bool const has_denorm_loss = true;
static std::float_round_style const round_style = std::round_to_nearest;
static bool const is_iec559 = false;
static bool const is_bounded = true;
static bool const is_modulo = false;
static int const digits = 19;
/// Least positive value
static cutlass::tfloat32_t min() { return cutlass::tfloat32_t::bitcast(0x01); }
/// Minimum finite value
static cutlass::tfloat32_t lowest() { return cutlass::tfloat32_t::bitcast(0xff7fffff); }
/// Maximum finite value
static cutlass::tfloat32_t max() { return cutlass::tfloat32_t::bitcast(0x7f7fffff); }
/// Returns smallest finite value
static cutlass::tfloat32_t epsilon() { return cutlass::tfloat32_t::bitcast(0x1000); }
/// Returns smallest finite value
static cutlass::tfloat32_t round_error() { return cutlass::tfloat32_t(0.5f); }
/// Returns smallest finite value
static cutlass::tfloat32_t infinity() { return cutlass::tfloat32_t::bitcast(0x7f800000); }
/// Returns smallest finite value
static cutlass::tfloat32_t quiet_NaN() { return cutlass::tfloat32_t::bitcast(0x7fffffff); }
/// Returns smallest finite value
static cutlass::tfloat32_t signaling_NaN() { return cutlass::tfloat32_t::bitcast(0x7fffffff); }
/// Returns smallest finite value
static cutlass::tfloat32_t denorm_min() { return cutlass::tfloat32_t::bitcast(0x1); }
};
#endif
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace std
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// Arithmetic operators
//
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
///////////////////////////////////////////////////////////////////////////////////////////////////
CUTLASS_HOST_DEVICE
bool operator==(tfloat32_t const& lhs, tfloat32_t const& rhs) {
return float(lhs) == float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator!=(tfloat32_t const& lhs, tfloat32_t const& rhs) {
return float(lhs) != float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator<(tfloat32_t const& lhs, tfloat32_t const& rhs) {
return float(lhs) < float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator<=(tfloat32_t const& lhs, tfloat32_t const& rhs) {
return float(lhs) <= float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator>(tfloat32_t const& lhs, tfloat32_t const& rhs) {
return float(lhs) > float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator>=(tfloat32_t const& lhs, tfloat32_t const& rhs) {
return float(lhs) >= float(rhs);
}
CUTLASS_HOST_DEVICE
tfloat32_t operator+(tfloat32_t const& lhs, tfloat32_t const& rhs) {
return tfloat32_t(float(lhs) + float(rhs));
}
CUTLASS_HOST_DEVICE
tfloat32_t operator-(tfloat32_t const& lhs) {
union u_tff32 {
float val_f32;
tfloat32_t val_tf;
CUTLASS_HOST_DEVICE u_tff32() : val_f32(0) { }
};
union u_tff32 x; x.val_f32 = -reinterpret_cast<float const &>(lhs);
return x.val_tf;
}
CUTLASS_HOST_DEVICE
tfloat32_t operator-(tfloat32_t const& lhs, tfloat32_t const& rhs) {
return tfloat32_t(float(lhs) - float(rhs));
}
CUTLASS_HOST_DEVICE
tfloat32_t operator*(tfloat32_t const& lhs, tfloat32_t const& rhs) {
return tfloat32_t(float(lhs) * float(rhs));
}
CUTLASS_HOST_DEVICE
tfloat32_t operator/(tfloat32_t const& lhs, tfloat32_t const& rhs) {
return tfloat32_t(float(lhs) / float(rhs));
}
CUTLASS_HOST_DEVICE
tfloat32_t& operator+=(tfloat32_t & lhs, tfloat32_t const& rhs) {
lhs = tfloat32_t(float(lhs) + float(rhs));
return lhs;
}
CUTLASS_HOST_DEVICE
tfloat32_t& operator-=(tfloat32_t & lhs, tfloat32_t const& rhs) {
lhs = tfloat32_t(float(lhs) - float(rhs));
return lhs;
}
CUTLASS_HOST_DEVICE
tfloat32_t& operator*=(tfloat32_t & lhs, tfloat32_t const& rhs) {
lhs = tfloat32_t(float(lhs) * float(rhs));
return lhs;
}
CUTLASS_HOST_DEVICE
tfloat32_t& operator/=(tfloat32_t & lhs, tfloat32_t const& rhs) {
lhs = tfloat32_t(float(lhs) / float(rhs));
return lhs;
}
CUTLASS_HOST_DEVICE
tfloat32_t& operator++(tfloat32_t & lhs) {
float tmp(lhs);
++tmp;
lhs = tfloat32_t(tmp);
return lhs;
}
CUTLASS_HOST_DEVICE
tfloat32_t& operator--(tfloat32_t & lhs) {
float tmp(lhs);
--tmp;
lhs = tfloat32_t(tmp);
return lhs;
}
CUTLASS_HOST_DEVICE
tfloat32_t operator++(tfloat32_t & lhs, int) {
tfloat32_t ret(lhs);
float tmp(lhs);
tmp++;
lhs = tfloat32_t(tmp);
return ret;
}
CUTLASS_HOST_DEVICE
tfloat32_t operator--(tfloat32_t & lhs, int) {
tfloat32_t ret(lhs);
float tmp(lhs);
tmp--;
lhs = tfloat32_t(tmp);
return ret;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// User-defined literals
//
CUTLASS_HOST_DEVICE
cutlass::tfloat32_t operator "" _tf32(long double x) {
return cutlass::tfloat32_t(float(x));
}
CUTLASS_HOST_DEVICE
cutlass::tfloat32_t operator "" _tf32(unsigned long long int x) {
return cutlass::tfloat32_t(int(x));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/tfloat32.h/0 | {
"file_path": "include/cutlass/tfloat32.h",
"repo_id": "include",
"token_count": 4661
} | 37 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief This defines a "fragment" iterator for visiting the fragments of a warp vector
that participate in one warp-level mma operation.
Typically, this is used to access the scale/bias fragement of a warp-level mma operation.
The scale/bias vector is then partitioned into smaller fragments that can be fed into
next warp-level mma operation.
This iterator is necessary to accomplish warp-level mma fusion where the scale/bias vector is
applied to the multiplicand for the next mma.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/numeric_conversion.h"
namespace cutlass {
namespace transform {
namespace warp {
////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the input fragment tile shape (concept: MatrixShape)
typename Shape_,
/// Element type
typename Element_,
/// Layout of operand in memory
typename Layout_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
//// Number of elements per access when loading fragment
int ElementsPerAccess>
class VectorFragmentIterator;
// Partial specialization for PitchLinear layout tile
template <
/// Size of the input fragment vector shape (concept: MatrixShape)
typename Shape_,
/// Element type
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
//// Number of elements per access when loading fragment
int ElementsPerAccess>
class VectorFragmentIterator<Shape_, Element_,
cutlass::layout::PitchLinear,
InstructionShape_, ElementsPerAccess> {
public:
/// Size of the input threadblock tile shape (concept: MatrixShape)
using Shape = Shape_;
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::PitchLinear;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Number of participating threads
static int const kThreads = 32;
static int const kElementsPerAccess = ElementsPerAccess;
static int const kRowsPerIteration = 8;
static int const kColumnsPerAccess = 8;
static int const kElementsPerIteration = kRowsPerIteration * InstructionShape::kK / kThreads;
static int const kAccessPerIteration = kElementsPerIteration / kElementsPerAccess;
/// Number of iterations
using Iterations = MatrixShape<InstructionShape::kM / kRowsPerIteration, Shape::kContiguous / kElementsPerIteration>;
public:
//
// Derived quantities
//
// All fragments have kElementsPerAccess scale followed by bias
/// Fragment object holding a thread's part of a tile
/// This is the fragment size produced by one iteration of the iterator.
using Fragment = Array<Element, kElementsPerIteration * Iterations::kRow>;
/// Input threadblock fragment tile
using ThreadblockFragment = Array<Element, Shape::kContiguous >;
private:
/// Internal access type
using AccessType = Array<Element, kElementsPerAccess>;
private:
//
// Data members
//
/// Input threadblock fragment tile
AccessType const *iterator_;
/// Internal index
int index_;
public:
/// Constructs an iterator
CUTLASS_HOST_DEVICE
VectorFragmentIterator(ThreadblockFragment const &threadblock_frag)
: iterator_(reinterpret_cast<AccessType const *>(&threadblock_frag)),
index_(0) {}
/// Add offset
CUTLASS_HOST_DEVICE
void add_offset(int index_offset) {
index_ += index_offset;
if(index_ >= Iterations::kColumn)
index_ = 0;
}
/// Increments
CUTLASS_HOST_DEVICE
VectorFragmentIterator &operator++() {
add_offset(1);
return *this;
}
CUTLASS_HOST_DEVICE
void set_index(int idx) {
index_ = idx;
}
/// Loads a fragment from the referenced part of the accumulator tile
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int r = 0; r < Iterations::kRow; r++) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kAccessPerIteration; i++) {
frag_ptr[i * Iterations::kRow + r].clear();
frag_ptr[i * Iterations::kRow + r] = iterator_[index_ * kAccessPerIteration + i];
}
}
}
};
// Partial specialization for Row-Major layout tile
template <
/// Size of the input fragment tile shape (concept: MatrixShape)
typename Shape_,
/// Element type
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
//// Number of elements per access when loading fragment
int ElementsPerAccess>
class VectorFragmentIterator<Shape_, Element_,
cutlass::layout::RowMajor,
InstructionShape_, ElementsPerAccess> {
public:
/// Size of the input threadblock tile shape (concept: MatrixShape)
using Shape = Shape_;
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::RowMajor;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Underlying iterator
using Base = VectorFragmentIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::PitchLinear, InstructionShape, ElementsPerAccess>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
/// This is the fragment size produced by one iteration of the iterator.
using Fragment = typename Base::Fragment;
/// Input threadblock fragment tile
using ThreadblockFragment = typename Base::ThreadblockFragment;
private:
/// Underlying iterator
Base iterator_;
public:
/// Constructs an iterator
CUTLASS_HOST_DEVICE
VectorFragmentIterator(ThreadblockFragment const &threadblock_frag)
: iterator_(threadblock_frag) {}
/// Add offset
CUTLASS_HOST_DEVICE
void add_offset(int index_offset) {
iterator_.add_offset(index_offset);
}
/// Increments
CUTLASS_HOST_DEVICE
VectorFragmentIterator &operator++() {
add_offset(1);
return *this;
}
CUTLASS_HOST_DEVICE
void set_index(int idx) {
iterator_.set_index(idx);
}
/// Loads a fragment from the referenced part of the accumulator tile
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
iterator_.load(frag);
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace conv
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/transform/warp/vector_fragment_iterator.h/0 | {
"file_path": "include/cutlass/transform/warp/vector_fragment_iterator.h",
"repo_id": "include",
"token_count": 2738
} | 38 |
# CuTe TMA Tensors
Along your travels, you may find strange looking CuTe Tensors that are printed as something like
```
ArithTuple(0,_0,_0,_0) o ((_128,_64),2,3,1):((_1@0,_1@1),_64@1,_1@2,_1@3)
```
What is an `ArithTuple`? Are those tensor strides? What do those mean? What is this for?
This documentation intends to answer those questions and introduce some of the more advanced features of CuTe.
# Introduction to TMA instructions
The Tensor Memory Accelerator (TMA) is a set of instructions for copying possibly multidimensional arrays between global and shared memory. TMA was introduced in the Hopper architecture. A single TMA instruction can copy an entire tile of data all at once. As a result, the hardware no longer needs to compute individual memory addresses and issue a separate copy instruction for each element of the tile.
To accomplish this, the TMA instruction is given a *TMA descriptor*, which is a packed representation of a multidimensional tensor in global memory with 1, 2, 3, 4, or 5 dimensions. The TMA descriptor holds
* the base pointer of the tensor;
* the data type of the tensor's elements (e.g., `int`, `float`, `double`, or `half`);
* the size of each dimension;
* the stride within each dimension; and
* other flags representing the smem box size, smem swizzling patterns, and out-of-bounds access behavior.
This descriptor must be created on the host before kernel execution.
It is shared between all thread blocks that will be issuing TMA instructions.
Once inside the kernel, the TMA is executed with the following parameters:
* pointer to the TMA descriptor;
* pointer to the SMEM; and
* coordinates into the GMEM tensor represented within the TMA descriptor.
For example, the interface for TMA-store with 3-D coordinates looks like this.
```cpp
struct SM90_TMA_STORE_3D {
CUTE_DEVICE static void
copy(void const* const desc_ptr,
void const* const smem_ptr,
int32_t const& crd0, int32_t const& crd1, int32_t const& crd2) {
// ... invoke CUDA PTX instruction ...
}
};
```
We observe that the TMA instruction does not directly consume pointers to global memory. Indeed, the global memory pointer is contained in the descriptor, is considered constant, and is NOT a separate parameter to the TMA instruction. Instead, the TMA consumes TMA coordinates into the TMA's view of global memory that is defined in the TMA descriptor.
That means that an ordinary CuTe Tensor that stores a GMEM pointer and computes offsets and new GMEM pointers is useless to the TMA.
What do we do?
# Building a TMA Tensor
## Implicit CuTe Tensors
All CuTe Tensors are compositions of Layouts and Iterators. An ordinary global memory tensor's iterator is its global memory pointer. However, a CuTe Tensor's iterator doesn't have to be a pointer; it can be any random-access iterator.
One example of such an iterator is a *counting iterator*.
This represents a possibly infinite sequence of integers that starts at some value.
We call the members of this sequence *implicit integers*,
because the sequence is not explicitly stored in memory.
The iterator just stores its current value.
We can use a counting iterator to create a tensor of implicit integers,
```cpp
Tensor A = make_tensor(counting_iterator<int>(42), make_shape(4,5));
print_tensor(A);
```
which outputs
```
counting_iter(42) o (4,5):(_1,4):
42 46 50 54 58
43 47 51 55 59
44 48 52 56 60
45 49 53 57 61
```
This tensor maps logical coordinates to on-the-fly computed integers. Because it's still a CuTe Tensor, it can still be tiled and partitioned and sliced just like a normal tensor by accumulating integer offsets into the iterator.
But the TMA doesn't consume pointers or integers, it consumes coordinates. Can we make a tensor of implicit TMA
coordinates for the TMA instruction to consume? If so, then we could presumably also tile and partition and slice that tensor of coordinates so that we would always have the right TMA coordinate to give to the instruction.
## ArithTupleIterators and ArithTuples
First, we build a `counting_iterator` equivalent for TMA coordinates. It should support
* dereference to a TMA coordinate, and
* offset by another TMA coordinate.
We'll call this an `ArithmeticTupleIterator`. It stores a coordinate (a tuple of integers) that is represented as an `ArithmeticTuple`. The `ArithmeticTuple` is simply a (public subclass of) `cute::tuple` that has an overloaded `operator+` so that it can be offset by another tuple. The sum of two tuples is the tuple of the sum of the elements.
Now similar to `counting_iterator<int>(42)` we can create an implicit "iterator" (but without increment or other common iterator operations) over tuples that can be dereferenced and offset by other tuples
```cpp
ArithmeticTupleIterator citer_1 = make_inttuple_iter(42, Int<2>{}, Int<7>{});
ArithmeticTupleIterator citer_2 = citer_1 + make_tuple(Int<0>{}, 5, Int<2>{});
print(*citer_2);
```
which outputs
```
(42,7,_9)
```
A TMA Tensor can use an iterator like this to store the current TMA coordinate "offset". The "offset" here is in quotes because it's clearly not a normal 1-D array offset or pointer.
In summary, one creates a TMA descriptor for the *whole global memory tensor*. The TMA descriptor defines a view into that tensor and the instruction takes TMA coordinates into that view. In order to generate and track those TMA coordinates, we define an implicit CuTe Tensor of TMA coordinates that can be tiled, sliced, and partitioned the exact same way as an ordinary CuTe Tensor.
We can now track and offset TMA coordinates with this iterator, but how do we get CuTe Layouts to generate non-integer offsets?
## Strides aren't just integers
Ordinary tensors have a layout that maps
a logical coordinate `(i,j)` into a 1-D linear index `k`.
This mapping is the inner-product of the coordinate with the strides.
TMA Tensors hold iterators of TMA coordinates.
Thus, a TMA Tensor's Layout must map a logical coordinate
to a TMA coordinate, rather than to a 1-D linear index.
To do this, we can abstract what a stride is. Strides need not be integers, but rather any algebraic object that supports inner-product with the integers (the logical coordinate). The obvious choice is the `ArithmeticTuple` we used earlier since they can be added to each other, but this time additionally equipped with an `operator*` so it can also be scaled by an integer.
### Aside: Integer-module strides
A group of objects that support addition between elements and product between elements and integers is called an integer-module.
Formally, an integer-module is an abelian group `(M,+)` equipped with `Z*M -> M`, where `Z` are the integers. That is, an integer-module `M` is
a group that supports inner products with the integers.
The integers are an integer-module.
Rank-R tuples of integers are an integer-module.
In principle, layout strides may be any integer-module.
### Basis elements
CuTe's basis elements live in the header file `cute/numeric/arithmetic_tuple.hpp`.
To make it easy to create `ArithmeticTuple`s that can be used as strides, CuTe defines normalized basis elements using the `E` type alias. "Normalized" means that the scaling factor of the basis element is the compile-time integer 1.
| C++ object | Description | String representation |
| --- | --- | --- |
| `E<>{}` | `1` | `1` |
| `E<0>{}` | `(1,0,...)` | `1@0` |
| `E<1>{}` | `(0,1,0,...)` | `1@1` |
| `E<0,1>{}` | `((0,1,0,...),0,...)` | `1@1@0` |
| `E<1,0>{}` | `(0,(1,0,...),0,...)` | `1@0@1` |
The "description" column in the above table
interprets each basis element as an infinite tuple of integers,
where all the tuple's entries not specified by the element's type are zero.
We count tuple entries from left to right, starting with zero.
For example, `E<1>{}` has a 1 in position 1: `(0,1,0,...)`.
`E<3>{}` has a 1 in position 3: `(0,0,0,1,0,...)`.
Basis elements can be *nested*.
For instance, in the above table, `E<0,1>{}` means that
in position 0 there is a `E<1>{}`: `((0,1,0,...),0,...)`.
Basis elements can be *scaled*.
That is, they can be multiplied by an integer *scaling factor*.
For example, in `5*E<1>{}`, the scaling factor is `5`.
`5*E<1>{}` prints as `5@1` and means `(0,5,0,...)`.
The scaling factor commutes through any nesting.
For instance, `5*E<0,1>{}` prints as `5@1@0`
and means `((0,5,0,...),0,...)`.
Basis elements can also be added together,
as long as their hierarchical structures are compatible.
For example, `3*E<0>{} + 4*E<1>{}` results in `(3,4,0,...)`.
Intuitively, "compatible" means that
the nested structure of the two basis elements
matches well enough to add the two elements together.
### Linear combinations of strides
Layouts work by taking the inner product
of the natural coordinate with their strides.
For strides made of integer elements, e.g., `(1,100)`,
the inner product of the input coordinate `(i,j)`
and the stride is `i + 100j`.
Offsetting an "ordinary" tensor's pointer and this index
gives the pointer to the tensor element at `(i,j)`.
For strides of basis elements, we still compute the inner product of the natural coordinate with the strides.
For example, if the stride is `(1@0,1@1)`,
then the inner product of the input coordinate `(i,j)`
with the strides is `i@0 + j@1 = (i,j)`.
That translates into the (TMA) coordinate `(i,j)`.
If we wanted to reverse the coordinates,
then we could use `(1@1,1@0)` as the stride.
Evaluating the layout would give `i@1 + j@0 = (j,i)`.
A linear combination of basis elements
can be interpreted as a possibly multidimensional and hierarchical coordinate.
For instance, `2*2@1@0 + 3*1@1 + 4*5@1 + 7*1@0@0`
means `((0,4,...),0,...) + (0,3,0,...) + (0,20,0,...) + ((7,...),...) = ((7,4,...),23,...)`
and can be interpreted as the coordinate `((7,4),23)`.
Thus, linear combinations of these strides can be used to generate TMA coordinates.
These coordinates, in turn, can be used to offset TMA coordinate iterators.
## Application to TMA Tensors
Now we can build CuTe Tensors like the one seen in the introduction.
```cpp
Tensor a = make_tensor(make_inttuple_iter(0,0),
make_shape ( 4, 5),
make_stride(E<0>{}, E<1>{}));
print_tensor(a);
Tensor b = make_tensor(make_inttuple_iter(0,0),
make_shape ( 4, 5),
make_stride(E<1>{}, E<0>{}));
print_tensor(b);
```
prints
```
ArithTuple(0,0) o (4,5):(_1@0,_1@1):
(0,0) (0,1) (0,2) (0,3) (0,4)
(1,0) (1,1) (1,2) (1,3) (1,4)
(2,0) (2,1) (2,2) (2,3) (2,4)
(3,0) (3,1) (3,2) (3,3) (3,4)
ArithTuple(0,0) o (4,5):(_1@1,_1@0):
(0,0) (1,0) (2,0) (3,0) (4,0)
(0,1) (1,1) (2,1) (3,1) (4,1)
(0,2) (1,2) (2,2) (3,2) (4,2)
(0,3) (1,3) (2,3) (3,3) (4,3)
```
| media/docs/cute/0z_tma_tensors.md/0 | {
"file_path": "media/docs/cute/0z_tma_tensors.md",
"repo_id": "media",
"token_count": 3694
} | 39 |
![ALT](../images/gemm-hierarchy-with-epilogue-no-labels.png "CUTLASS Terminology")
[README](../../README.md#documentation) > **Terminology**
# CUTLASS Terminology
**cute::Layout**: A `cute::Layout` vocabulary type composed of the hierarchical `cute::Shape` and `cute::Stride`
tuples that is used throughout CUTLASS 3.0 to represent and manipulate thread and data layouts. More details are included in the [CuTe specific tensor type documentation](/media/docs/cute/03_tensor.md).
**cute::Tensor**: A pointer backed by a `cute::Layout` used to represent a tensor. More details are included in the [CuTe specific tensor type documentation](/media/docs/cute/03_tensor.md).
**Capacity**: (scalar) physical number of elements in memory required to store a multidimensional object; expressed as the type's LongIndex type
- example: the capacity of a column-major matrix is `lda * N`
**Element**: data type describing one item in a multidimensional tensor, array, or matrix
**Extent**: (vector-valued quantity) the logical size of each dimension of a multidimensional index space. Consistent with the [C++ Standard Library](https://en.cppreference.com/w/cpp/types/extent).
- `Coord<N> extent()`
- `Index extent(int dim)`
**Fragment**: a register-backed array of elements used to store a thread's part of a tile
**Index**: signed integer representing quantities aligned with a logical dimension
**Layout**: functor mapping logical coordinates of a tensor to linear offset (as LongIndex); owns stride vectors, if any.
**LongIndex**: signed integer representing offsets in memory; typically wider than Index type
**Numeric Type**: a CUTLASS data type used to represent real-valued quantities; is trivially copyable.
**Pitch Linear**: linear memory allocation obtained from a user-defined 2-D size, which specifies the
contiguous and strided dimensions of a tile.
**Planar Complex**: representation of complex tensors as two real-valued tensors, with real elements in one part and imaginary elements in another part of identical layout, separated by an offset
**Policy**: additional details extending the interface of a template guiding internal implementation;
typically used to target specific design points known to be efficient
**Rank**: number of dimensions in a multidimensional index space, array, tensor, or matrix. Consistent with
[C++ Standard Library](https://en.cppreference.com/w/cpp/types/rank)
**Register**: in device code, registers are the most efficient storage for statically sized arrays of elements.
Arrays may be expected to be stored in registers if all accesses are made via constexpr indices or within
fully unrolled loops.
**Residue**: partial tile or matrix computation which may require special accommodation for functional correctness or performance
**Size**: (scalar) number of logical elements in a tensor; equal to the product of each member of `extent()`
- `LongIndex size()`
`sizeof_bits<T>::value` - template pattern returning the size of a numeric type or array in units of bits
**Storage**: when appropriate, refers to some alternative type used to store a packed collection of elements;
may be used to handle bit-level packing or make types safe for use in unions
**TensorRef**: contains base pointer and _Layout_ object for referencing infinitely-sized tensor object
**TensorView**: contains _TensorRef_ and extent of a finite mathematical object
**Tile**: partitions of a tensor that have constant extents and layout known at compile time
**Trait**: characteristics of a fully-specialized type, typically used in metaprogramming reflection
**View**: an object containing references to a data structure that it does not own; typically, construction of views is lightweight
**Warp**: a collection of hardware threads executing in lock-step; warp-level operations typically rely on cooperation among the threads within the warp
`AlignedBuffer<T, N>`: statically sized array type; union-safe, no construction guarantee for elements
`Array<T, N>`: container for holding numeric types - handles bit packing for small numeric types (e.g. int4_t, uint4_t, bin1_t)
`sizeof(Array<T, N>)` - gives expected value in units of bytes with minimum storage of `1 B`: (sizeof_bits<T>::value * N) / 8
**Operator**: an object performing a computation on matrix or tensor objects. May be further refined by scope within the execution model hierarchy. Deprecated starting CUTLASS 3.0,
replaced by [MMA and Copy atoms from CuTe](/media/docs/cute/0t_mma_atom.md).
**Tile Iterator**: abstraction for accessing and traversing a sequence of tiles in a tensor; CUTLASS specifies
[formal concepts for tile iterators](tile_iterator_concept.md). Deprecated starting CUTLASS 3.0.
Replaced by `cute::Layout` in equivalent usage scenarios to represent data tensors.
**Thread Map**: abstraction for defining how threads are mapped to a given tile. Deprecated starting CUTLASS 3.0.
Replaced by `cute::Layout` in equivalent usage scenarios to represent thread tensors.
# Copyright
Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
```
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
```
| media/docs/terminology.md/0 | {
"file_path": "media/docs/terminology.md",
"repo_id": "media",
"token_count": 1752
} | 40 |
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Common data types and string names/tags for them
"""
import enum
from cutlass_library import (
ComplexTransform,
DataType,
DataTypeSize,
EpilogueScheduleType,
KernelScheduleType,
MathOperation,
OpcodeClass,
TileSchedulerType
)
# The following block implements enum.auto() for Python 3.5 variants that don't include it such
# as the default 3.5.2 on Ubuntu 16.04.
#
# https://codereview.stackexchange.com/questions/177309/reimplementing-pythons-enum-auto-for-compatibility
try:
from enum import auto as enum_auto
except ImportError:
__cutlass_library_auto_enum = 0
def enum_auto() -> int:
global __cutlass_library_auto_enum
i = __cutlass_library_auto_enum
__cutlass_library_auto_enum += 1
return i
class DataTypeSizeBytes:
"""
Static class to mimic the `DataTypeSize` dictionary, but with checks for whether the
data type key is less than a full byte or a non-integer number of bytes.
"""
@staticmethod
def __class_getitem__(datatype):
"""
Returns the number of bytes in size the data type is. Raises an exception if the data type
is either less than a full byte or a non-integer number of bytes in size.
:param datatype: data type to query
:return: number of bytes the data type occupies
:rtype: int
"""
bits = DataTypeSize[datatype]
if bits < 8:
raise Exception(
f"Data type {datatype} is less than one byte in size."
)
elif bits % 8 != 0:
raise Exception(
f"Data type datatype is not an integer number of bytes."
)
return bits // 8
class SchedulerMode(enum.Enum):
Device = enum_auto()
Host = enum_auto()
SchedulerModeTag = {
SchedulerMode.Device: "cutlass::gemm::kernel::GroupScheduleMode::kDeviceOnly",
SchedulerMode.Host: "cutlass::gemm::kernel::GroupScheduleMode::kHostPrecompute",
}
ShortSchedulerModeNames = {SchedulerMode.Device: "Device", SchedulerMode.Host: "Host"}
class FunctionalOp(enum.Enum):
AtomicAdd = enum_auto()
AtomicMaximum = enum_auto()
Divides = enum_auto()
Maximum = enum_auto()
Minimum = enum_auto()
Minus = enum_auto()
Multiplies = enum_auto()
MultiplyAdd = enum_auto()
Plus = enum_auto()
FunctionalOpTag = {
FunctionalOp.AtomicAdd: "cutlass::atomic_add",
FunctionalOp.AtomicMaximum: "cutlass::atomic_maximum",
FunctionalOp.Divides: "cutlass::divides",
FunctionalOp.Maximum: "cutlass::maximum",
FunctionalOp.Minimum: "cutlass::minimum",
FunctionalOp.Minus: "cutlass::minus",
FunctionalOp.Multiplies: "cutlass::multiplies",
FunctionalOp.MultiplyAdd: "cutlass::multiply_add",
FunctionalOp.Plus: "cutlass::plus",
}
class ActivationOp(enum.Enum):
DGelu = enum_auto()
Gelu = enum_auto()
GeluTaylor = enum_auto()
HardSwish = enum_auto()
Identity = enum_auto()
LeakyReLU = enum_auto()
ReLU = enum_auto()
Sigmoid = enum_auto()
SiLU = enum_auto()
Tanh = enum_auto()
ActivationOpTag = {
ActivationOp.DGelu: "cutlass::epilogue::thread::dGELU",
ActivationOp.Gelu: "cutlass::epilogue::thread::GELU",
ActivationOp.GeluTaylor: "cutlass::epilogue::thread::GELU_taylor",
ActivationOp.HardSwish: "cutlass::epilogue::thread::HardSwish",
ActivationOp.Identity: "cutlass::epilogue::thread::Identity",
ActivationOp.LeakyReLU: "cutlass::epilogue::thread::LeakyReLU",
ActivationOp.ReLU: "cutlass::epilogue::thread::ReLu",
ActivationOp.Sigmoid: "cutlass::epilogue::thread::Sigmoid",
ActivationOp.SiLU: "cutlass::epilogue::thread::SiLu",
ActivationOp.Tanh: "cutlass::epilogue::thread::Tanh",
}
def op_tag(op) -> str:
"""
Dispatches `op` to the appropriate *Tag dictionary depending on whether
`op` is an ActivationOp or FunctionalOp. This is useful for cases in which
either type can be used.
:param op: operation to emit a tag for
:type op: ActivationOp | FunctionalOp
:return: tag corresponding to op
:rtype: str
"""
if isinstance(op, ActivationOp):
return ActivationOpTag[op]
elif isinstance(op, FunctionalOp):
return FunctionalOpTag[op]
else:
raise Exception(f"Unexpected op type {op}. Must be one of ActivationOp or FunctionalOp.")
class FloatRoundStyle(enum.Enum):
ToNearest = enum_auto()
ToNearestSatfinite = enum_auto()
Indeterminate = enum_auto()
TowardZero = enum_auto()
TowardInfinity = enum_auto()
TowardNegInfinity = enum_auto()
HalfUlpTruncDntz = enum_auto()
HalfUlpTruncate = enum_auto()
FloatRoundStyleTag = {
FloatRoundStyle.ToNearest: "cutlass::FloatRoundStyle::round_to_nearest",
FloatRoundStyle.ToNearestSatfinite: "cutlass::FloatRoundStyle::round_to_nearest_satfinite",
FloatRoundStyle.Indeterminate: "cutlass::FloatRoundStyle::round_indeterminate",
FloatRoundStyle.TowardZero: "cutlass::FloatRoundStyle::round_toward_zero",
FloatRoundStyle.TowardInfinity: "cutlass::FloatRoundStyle::round_toward_infinity",
FloatRoundStyle.TowardNegInfinity: "cutlass::FloatRoundStyle::round_toward_neg_infinity",
FloatRoundStyle.HalfUlpTruncDntz: "cutlass::FloatRoundStyle::round_half_ulp_trunc_dntz",
FloatRoundStyle.HalfUlpTruncate: "cutlass::FloatRoundStyle::round_half_ulp_truncate",
}
class MathInstruction:
"""
Description of a the lowest-level matrix-multiply-accumulate operation to be used in a kernel
"""
def __init__(
self,
instruction_shape,
element_a,
element_b,
element_accumulator,
opcode_class=OpcodeClass.Simt,
math_operation=MathOperation.multiply_add,
):
"""
:param instruction_shape: size of the [M, N, K] dimensions of the instruction
:type instruction_shape: list or tuple
:param element_a: data type of operand A
:param element_b: data type of operand B
:param element_accumulator: data type used in accumulation
:param opcode_class: higher-level class of the instruction (e.g., SIMT or Tensor Core)
:type opcode_class: cutlass_library.library.OpcodeClass
:param math_operation: the type of low-level operation to be performed (e.g., multiply accumulate)
:type math_operation: MathOperation
"""
self.instruction_shape = instruction_shape
self.element_a = element_a
self.element_b = element_b
self.element_accumulator = element_accumulator
self.opcode_class = opcode_class
self.math_operation = math_operation
class TileDescription:
"""
Description of a tile of computation to be performed in the kernel, encompassing threadblock, cluster, and warp shapes,
stage count, and math instruction specification
"""
def __init__(
self,
threadblock_shape,
stages,
warp_count,
math_instruction,
cluster_shape=[1, 1, 1],
kernel_schedule: KernelScheduleType = None,
epilogue_schedule: EpilogueScheduleType = None,
tile_scheduler: TileSchedulerType = None
):
"""
:param threadblock_shape: shape of a threadblock tyle
:type threadblock_shape: list or tuple
:param stages: number of pipline stages in the operation. For SM90 kernels, this can be set to `None` and the maximum
number of stages that can be supported for an operation on a given architecture will be computed at a later time
:type stages: int or None
:param warp_count: number of warps in each [M, N, K] dimension of a threadblock tile
:type warp_count: list, tuple, or None
:param math_instruction: specification of the instruction type and shape to be performed and the types of its operands
:type math_instruction: MathInstruction
:param cluster_shape: number of threadblocks in the [X, Y, Z] dimensions of a threadblock cluster
:param kernel_schedule: type of kernel schedule to use (only available for SM90+)
:type kernel_schedule: cutlass_library.KernelScheduleType
:param epilogue_schedule: type of epilogue schedule to use (only available for SM90+)
:type epilogue_schedule: cutlass_library.EpilogueScheduleType
:param tile_scheduler: type of tile scheduler to use (only available for SM90+)
:type tile_scheduler: cutlass_library.TileSchedulerType
"""
if ((kernel_schedule is None and epilogue_schedule is not None) or
(kernel_schedule is not None and epilogue_schedule is None)):
raise Exception("Kernel and epilogue schedule must either both be Auto or neither be Auto.")
self.threadblock_shape = threadblock_shape
self.cluster_shape = cluster_shape
self.kernel_schedule = kernel_schedule
self.epilogue_schedule = epilogue_schedule
self.tile_scheduler = tile_scheduler
self.stages = stages
self.math_instruction = math_instruction
self.instruction_shape = math_instruction.instruction_shape
# Number of warps along x, y, z directions
self.warp_count = warp_count
def clone_and_update(self, td: dict):
attrs = {
"cluster_shape": None,
"threadblock_shape": None,
"warp_count": None,
"stages": None,
"instruction_shape": None,
"kernel_schedule": None,
"epilogue_schedule": None,
"tile_scheduler": None
}
for key in attrs.keys():
if key in td.keys():
attrs[key] = td[key]
else:
attrs[key] = getattr(self, key)
attrs["math_instruction"] = MathInstruction(
attrs["instruction_shape"],
self.math_instruction.element_a,
self.math_instruction.element_b,
self.math_instruction.element_accumulator,
self.math_instruction.opcode_class,
self.math_instruction.math_operation
)
# Remove the instruction shape
del attrs["instruction_shape"]
return TileDescription(**attrs)
@property
def num_threads(self):
"""
Returns the number of threads in the threadblock
:return: number of threads in the threadblock
:rtype: int or None (if warp count is None)
"""
if self.warp_count is not None:
threads = 32
for cnt in self.warp_count:
threads *= cnt
return threads
return None
def procedural_name(self):
"""
Returns a name identifying the tile description
:return: name identifying the tile description
:rtype: int
"""
emit_stages = 0 if self.stages is None else self.stages
name = "%dx%dx%d_%dx%d_%dx%d" % (
self.cluster_shape[0],
self.cluster_shape[1],
self.cluster_shape[2],
self.threadblock_shape[0],
self.threadblock_shape[1],
self.threadblock_shape[2],
emit_stages
)
return name
def procedural_name_2x(self):
"""
Returns a name identifying the tile description
:return: name identifying the tile description
:rtype: int
"""
return "%dx%d_%dx%d" % (self.threadblock_shape[0], self.threadblock_shape[1], self.threadblock_shape[2], self.stages)
def __str__(self):
"""
Returns a string with containing each of the tile description's values
:return: contents of tile description
:rtype: str
"""
if self.kernel_schedule is not None:
kschedule = self.kernel_schedule
else:
kschedule = KernelScheduleType.ScheduleAuto
if self.epilogue_schedule is not None:
eschedule = self.epilogue_schedule
else:
eschedule = EpilogueScheduleType.ScheduleAuto
if self.tile_scheduler is not None:
tschedule = self.tile_scheduler.name
else:
tschedule = "None"
return f"""
{{
ClusterShape: {self.cluster_shape}
ThreadblockShape: {self.threadblock_shape}
WarpCount: {self.warp_count}
Stages: {self.stages if self.stages is not None else 'Auto'}
InstructionShape: {self.math_instruction.instruction_shape}
Kernel schedule: {kschedule.name}
Epilogue schedule: {kschedule.name}
TileScheduler: {tschedule}
}}"""
class TensorDescription:
def __init__(self, element, layout, alignment=1, complex_transform=ComplexTransform.none):
self.element = element
self.layout = layout
if element != DataType.void:
self.alignment = min(128 // DataTypeSize[self.element], alignment)
else:
self.alignment = alignment
self.complex_transform = complex_transform
def CalculateSmemUsagePerStage(operation):
"""
Returns the amount of shared memory in bytes consumed in a single stage of a kernel.
:param op: operation for which the maximum stages should be computed. If stages are
set via the `op.tile_description.stages` parameter, this setting is ignored
in the present calculation
:type op: cutlass.backend.Operation
:return: number of bytes of shared memory consumed by a single stage
:rtype: int
"""
m, n, k = operation.tile_description.threadblock_shape
if operation.operation_kind == OperationKind.Gemm:
stage_barrier_bytes = 32
return (
(DataTypeSize[operation.A.element] * m * k // 8)
+ (DataTypeSize[operation.B.element] * k * n // 8)
+ stage_barrier_bytes
)
else:
raise Exception("Unsupported operation kind {}.".format(operation.operation_kind))
def CalculateSmemUsage(operation):
"""
Returns the amount of shared memory in bytes consumed by a kernel.
:param op: operation for which the maximum stages should be computed. If stages are
set via the `op.tile_description.stages` parameter, this setting is ignored
in the present calculation
:type op: cutlass.backend.Operation
:return: int
"""
return operation.tile_description.stages * CalculateSmemUsagePerStage(operation)
class ApiVersion(enum.Enum):
"""
Differentiate between CUTLASS 2.x and 3.x API versions
"""
v2x = enum_auto()
v3x = enum_auto()
def api_version(arch, opclass, dtype):
"""
Returns whether the architecture, opcode class, and datatype in question require using CUTLASS 2.x
or 3.x for code emission.
:param arch: compute capability of device on which to run
:type arch: int
:param opclass: class of the operation being performed
:type opclass: cutlass_library.OpcodeClass
:param dtype: data type to be used in operation (assumes that ElementA and ElementB are the same)
:type dtype: cutlass_library.DataType
:return: API version to be used in code emission
:rtype: ApiVersion
"""
if (arch >= 90 and
opclass == OpcodeClass.TensorOp and
(dtype != DataType.f64)):
return ApiVersion.v3x
else:
return ApiVersion.v2x
class EmissionType(enum.Enum):
"""
Tags for whether to emit a kernel- or device-level operation
"""
Kernel = enum_auto()
Device = enum_auto()
| python/cutlass/backend/library.py/0 | {
"file_path": "python/cutlass/backend/library.py",
"repo_id": "python",
"token_count": 6615
} | 41 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Ease-of-use interface for constructing, compiling, and running GEMMs.
The ``Gemm`` interface is meant to allow one to easily instantiate, compile, and run
GEMM operations in CUTLASS via Python, without specifying many configuration parameters.
Under the hood, the interface will select sensible default parameters for the many template
parameters for CUTLASS GEMMs.
Note: optimal performance is not to be expected from this interface. To achieve optimal
performance, one should specify and tune each configuration parameter.
The simplest example of using this interface is the following:
.. highlight:: python
.. code-block:: python
# A, B, C, and D are torch/numpy/cupy tensor objects
plan = cutlass.op.Gemm(A, B, C, D)
plan.run()
One can also use the interface by specifying data types of operands at construction
and using different tensor objects with these data types at runtime:
.. highlight:: python
.. code-block:: python
# The following is shorthand for:
# cutlass.op.Gemm(element_A=torch.float32, element_B=torch.float32,
# element_C=torch.float32, element_D=torch.float32,
# element_accumulator=torch.float32,
# layout=cutlass.LayoutType.RowMajor)
plan = cutlass.op.Gemm(element=torch.float32, layout=cutlass.LayoutType.RowMajor)
A0 = torch.rand((128, 256), device='cuda')
B0 = torch.rand((256, 64), device='cuda')
C0 = torch.zeros((128, 64), device='cuda')
D0 = torch.zeros((128, 64), device.'cuda')
plan.run(A0, B0, C0, D0)
A = torch.rand((32, 128), device='cuda')
B = torch.rand((128, 256), device='cuda')
C = torch.zeros((32, 256), device='cuda')
D = torch.zeros((32, 256), device.'cuda')
plan.run(A1, B1, C1, D1)
The interface additionally enables one to decouple the compilation of the underlying CUTLASS
kernel from its execution:
.. highlight:: python
.. code-block:: python
plan = cutlass.op.Gemm(element=np.float32, layout=cutlass.LayoutType.RowMajor)
plan.compile()
# Do other work...
plan.run(A0, B0, C0, D0)
# Do other work...
plan.run(A1, B1, C1, D1)
Elementwise activation functions are easily fused to the GEMM via the interface:
.. highlight:: python
.. code-block:: python
plan = cutlass.op.Gemm(element=np.float32, layout=cutlass.LayoutType.RowMajor)
plan.activation = cutlass.epilogue.relu
Operations can also be run asynchronously:
.. highlight:: python
.. code-block:: python
plan = cutlass.op.Gemm(element=np.float32, layout=cutlass.LayoutType.RowMajor)
args = plan.run()
# Do other work...
args.sync()
"""
from math import prod
from cuda import cuda
from cutlass_library import (
DataType,
DataTypeSize,
GemmUniversalMode,
)
import cutlass
from cutlass import epilogue, swizzle
from cutlass.backend import compiler
from cutlass.backend.evt import EpilogueFunctorVisitor
from cutlass.backend.gemm_operation import GemmArguments, GemmOperationUniversal
from cutlass.backend.library import TensorDescription, TileDescription
from cutlass.op.op import OperationBase
from cutlass.shape import GemmCoord
from cutlass.utils import check, datatypes
class Gemm(OperationBase):
"""
Constructs a ``Gemm`` object.
The data types and layouts of operands A, B, and C, along with the data type of output D
and that used for accumulation, are bound to the ``Gemm`` object throughout its lifetime --
these are not to be changed after a ``Gemm`` has been constructed.
The constructor has optional parameters for flexibly setting these parameters. The following
constructors are equivalent:
.. highlight:: python
.. code-block:: python
# Use F32 for A, B, C, D, and accumulation. All operands are row major.
# Use the generic ``element`` and ``layout`` parameters to concisely set all data types and layouts
# for operands to the same values.
Gemm(element=cutlass.DataType.f32, layout=cutlass.LayoutType.RowMajor)
# Explicitly specify the data types to use for A, B, C, and D. Use the generic ``layout``.
Gemm(element_A=cutlass.DataType.f32, element_B=cutlass.DataType.f32, element_C=cutlass.DataType.f32,
element_D=cutlass.DataType.f32, layout=cutlass.LayoutType.RowMajor)
# Set the data types and elements from existing tensors. Note that one can use different tensors when
# executing GEMM via the ``run()`` method than passed in here (though those passed in to ``run()`` must
# have the same data type and layout as those passed in here).
# A, B, C, and D are row-major torch.Tensor objects of type torch.float32
Gemm(A=A, B=B, C=C, D=D)
# Use the generic ``element`` and explicitly specify the layouts to use for A, B, and C (layout of D is
# the same as that for D, at present)
Gemm(element=cutlass.DataType.f32, layout_A=cutlass.LayoutType.RowMajor,
layout_B=cutlass.LayoutType.RowMajor, layout_C=cutlass.LayoutType.RowMajor)
# Explicitly specify the data type and layout for only some of A, B, C, and D. Unspecified data types
# and layouts will inherit those passed in via the generic ``element`` and ``layout``
Gemm(element_A=cutlass.DataType.f32, layout_B=cutlass.LayoutType.RowMajor,
element=cutlass.DataType.f32, layout=cutlass.LayoutType.RowMajor)
The order of precedence for the setting of the data type and layout for a given operand/output is as follows:
1) If the tensor type is specified (e.g., ``A``), use the data type and layout inferred from this tensor
2) Otherwise, if the data type/layout (e.g., ``element_A``, ``layout_A``) is specified, use those
3) Otherwise, use the generic values (e.g., ``element``, ``layout``)
:param cc: compute capability of device for which kernels should be compiled. For example, if running on H100, this should be set to 90
:type cc: int
:param kernel_cc: compute capability of kernels to generate. For example, if running on SM90, but desiring to use a CUTLASS 2.x-style Ampere kernel, this should be set to 80
:type kernel_cc: int
:param A: tensor representing data type and layout of operand A
:param B: tensor representing data type and layout of operand B
:param C: tensor representing data type and layout of operand C
:param D: tensor representing data type and layout of operand D
:param alpha: scalar paramter alpha from GEMM computation that scales the product of operands A and B
:param beta: scalar parameter beta from GEMM operation that scales operand C
:param element_accumulator: data type to be used in accumulation of the product of operands A and B
:type element_accumulator: cutlass.DataType
:param element: generic data type to be used for operands A, B, C, D, as well as the accumulation data type
:type element: cutlass.DataType
:param layout: generic layout type to be used for operands A, B, C, and D
:type layout: cutlass.LayoutType
:param element_A: data type to be used for operand A
:type element_A: cutlass.DataType
:param element_B: data type to be used for operand B
:type element_B: cutlass.DataType
:param element_C: data type to be used for operand C
:type element_C: cutlass.DataType
:param element_D: data type to be used for operand D
:type element_D: cutlass.DataType
:param layout_A: layout of operand A
:type layout_A: cutlass.LayoutType
:param layout_B: layout of operand B
:type layout_B: cutlass.LayoutType
:param layout_C: layout of operand C
:type layout_C: cutlass.LayoutType
:param layout_D: layout of operand D
:type layout_D: cutlass.LayoutType
"""
def __init__(
self, A=None, B=None, C=None, D=None,
alpha=1.0, beta=0.0, element_accumulator=None,
element=None, layout=None,
element_A=None, element_B=None, element_C=None, element_D=None,
layout_A=None, layout_B=None, layout_C=None,
cc: int = None, kernel_cc: int = None
):
super().__init__(cc=cc, kernel_cc=kernel_cc)
self.name = "gemm"
self.compiled = False
elements = []
layouts = []
# Check that at least one of the following is set for each tensor (illustrated assuming tensor A):
# ``A``, ``element_A``, ``element`` and ``A``, ``layout_A``, ``layout``
for elt, lay, tens, name in zip([element_A, element_B, element_C, element_D],
[layout_A, layout_B, layout_C, layout_C],
[A, B, C, D],
["A", "B", "C", "D"]):
if elt is not None and tens is not None:
raise Exception(f'Must not specify both element_{name} and tensor {name}')
if lay is not None and tens is not None:
raise Exception(f'Must not specify both layout_{name} and tensor {name}')
if elt is None and tens is None and element is None:
raise Exception(f'Must specify one of element_{name}, tensor {name}, or generic element.')
if lay is None and tens is None and layout is None:
raise Exception(f'Must specify one of layout_{name}, tensor {name}, or generic layout.')
elt_to_set = None
lay_to_set = None
if tens is not None:
elt_to_set, lay_to_set = datatypes.get_datatype_and_layout(tens)
else:
elt_to_set = elt if elt is not None else element
lay_to_set = lay if lay is not None else layout
elements.append(datatypes.library_type(elt_to_set))
layouts.append(lay_to_set)
self._element_a, self._element_b, self._element_c, self._element_d = elements
self._layout_a, self._layout_b, self._layout_c, self._layout_d = layouts
if element_accumulator is None:
self._element_accumulator = self._element_c
else:
self._element_accumulator = datatypes.library_type(element_accumulator)
self.A = A
self.B = B
self.C = C
self.D = D
self.alpha = alpha
self.beta = beta
self.epilogue_functor = None
self.op_class = None
self._tile_description = None
self._reset_operations()
self._swizzling_functor = cutlass.swizzle.IdentitySwizzle1
def _reset_operations(self, reset_epilogue: bool = True):
# Set the default op class
datatype_comb = (self._element_a, self._element_b, self._element_accumulator)
layout_comb = (self._layout_a, self._layout_b)
self.possible_op_classes = self.options.supporting_opclasses(
self._element_a, self._element_b, self._element_accumulator,
self._layout_a, self._layout_b, self._math_operation)
if cutlass.OpcodeClass.TensorOp in self.possible_op_classes:
self.opclass = cutlass.OpcodeClass.TensorOp
elif cutlass.OpcodeClass.Simt in self.possible_op_classes:
self.opclass = cutlass.OpcodeClass.Simt
else:
if self._math_operation is not None:
math_op_str = f' and math operation {self._math_operation}'
else:
math_op_str = ''
raise Exception(f'No kernel configuration found for supported data type and layout '
f'combination {datatype_comb}x{layout_comb}{math_op_str}')
if reset_epilogue:
self._reset_epilogue_functor_activation(cutlass.epilogue.identity)
@property
def swizzling_functor(self):
"""
Returns the type of the swizzling functor currently being used by the GEMM
:return: swizzing functor type
"""
return self._swizzling_functor
@swizzling_functor.setter
def swizzling_functor(self, swizzling_functor):
"""
Sets the swizzling functor to the type specified by `swizzling_functor`
"""
if swizzling_functor == cutlass.swizzle.ThreadblockSwizzleStreamK:
if self.op_class == cutlass.OpcodeClass.Simt:
raise Exception('ThreadblockSwizzleStreamK is currently only supported with opcode class TensorOp')
if self.current_cc == 90:
raise Exception('ThreadblockSwizzleStreamK is currently unsupported on SM90')
self._swizzling_functor = swizzling_functor
#
# Tile description Related
#
@property
def tile_description(self) -> TileDescription:
"""
Returns the tile description
"""
return self._tile_description
@tile_description.setter
def tile_description(
self, td=None):
"""
Set the tile description
:param td: tile description
:type td: cutlass.backend.TileDescription, or a dict with keys
{
"threadblock_shape": [int, int, int],
"warp_count": [int, int, int],
"stages": int,
"instruction_shape": [int, int, int] (optional),
"cluster_shape": [int, int, int] (optional)
}
"""
if td is None:
return
if isinstance(td, dict):
if self._tile_description is None:
op = self.possible_operations.default_operation(self._math_operation)
self._tile_description = datatypes.td_from_profiler_op(op)
td = self._tile_description.clone_and_update(td)
valid, msg = self._valid_tile_description(td)
if valid:
self._tile_description = td
else:
raise Exception(msg)
def _valid_tile_description(self, td: TileDescription) -> tuple:
"""
Checks whether the provided tile description is valid for the given compute capability. At present,
this checks the following:
- Does the tile description use a number of stages supported by the compute capability in question?
- Does the tile size requested fit within shared memory?
- Are cluster dimensions outside the valid range requested for a given architecture (e.g.,
more non-unit cluster dimensions for pre-SM90 architectures)?
- Is the kernel schedule being used supported on the architecture in question?
:param td: tile description to validate
:type td: cutlass.backend.TileDescription
:return: tuple in which the first element is a bool indicating that the tile description is valid
and the second element is a string providing an optional error message.
:rtype: tuple
"""
valid, msg = check.valid_stage_count(self.cc, self.current_cc, td, self._element_c, self._element_d)
if not valid:
return (valid, msg)
valid, msg = check.valid_cluster_shape(self.current_cc, td.cluster_shape)
if not valid:
return (valid, msg)
valid, msg = check.valid_schedule(self.current_cc, td.kernel_schedule, td.epilogue_schedule, td.tile_scheduler)
return valid, msg
def tile_descriptions(self) -> list:
"""
Returns a list of valid tile descriptions for the operations
:returns: list of valid tile descriptions for the operations
:rtype: list
"""
tds = [datatypes.td_from_profiler_op(op) for op in self.possible_operations.all_operations]
if self._math_operation is not None:
tds = [td for td in tds if td.math_instruction.math_operation == self._math_operation]
return tds
def construct(
self, tile_description: TileDescription = None,
alignment_A: int = None, alignment_B: int = None, alignment_C: int = None) -> GemmOperationUniversal:
"""
Constructs a ``cutlass.backend.GemmUniversalOperation`` based on the input parameters and current
kernel specification of the ``Gemm`` object.
:param tile_description: tile description specifying shapes and operand types to use in the kernel
:type tile_description: cutlass.backend.TileDescription
:param alignment_A: alignment of operand A
:type alignment_A: int
:param alignment_B: alignment of operand B
:type alignment_B: int
:param alignment_C: alignment of operand C
:type alignment_C: int
:return: operation that was constructed
:rtype: cutlass.backend.GemmOperationUniversal
"""
alignment_pref_A = min(128 // DataTypeSize[self._element_a], max(self.possible_operations.alignments("A")))
alignment_pref_B = min(128 // DataTypeSize[self._element_b], max(self.possible_operations.alignments("B")))
alignment_A = check.alignment_or_default(alignment_A, alignment_pref_A)
alignment_B = check.alignment_or_default(alignment_B, alignment_pref_B)
tensor_A = TensorDescription(self._element_a, self._layout_a, alignment_A)
tensor_B = TensorDescription(self._element_b, self._layout_b, alignment_B)
if alignment_C is None:
alignment_C = max(self.possible_operations.alignments("C"))
if self._element_c != DataType.void:
alignment_C = min(128 // DataTypeSize[self._element_c], alignment_C)
if tile_description is None:
if self._tile_description is None:
op = self.possible_operations.operations(alignment_A, alignment_B, alignment_C, self._math_operation)[0]
tile_description = datatypes.td_from_profiler_op(op)
# The selected op may have lower alignment than that determined above, so we must
# reset alignment here.
alignment_C = op.C.alignment
else:
tile_description = self._tile_description
else:
valid, err_str = self._valid_tile_description(tile_description)
if not valid:
raise Exception(f"Invalid tile description. {err_str}")
self._tile_description = tile_description
tensor_C = TensorDescription(self._element_c, self._layout_c, alignment_C)
self.epilogue_functor = self._reset_epilogue_functor_alignment(alignment_C, self.epilogue_functor)
operation = GemmOperationUniversal(
arch=self.current_cc,
tile_description=tile_description,
A=tensor_A, B=tensor_B, C=tensor_C,
epilogue_functor=self.epilogue_functor,
swizzling_functor=self._swizzling_functor,
)
return operation
def compile(self, tile_description: TileDescription = None,
alignment_A: int = None, alignment_B: int = None, alignment_C: int = None,
print_module: bool = False) -> cutlass.backend.GemmOperationUniversal:
"""
Emits and compiles the kernel currently specified. If ``tile_description`` and any
of the ``alignment`` parameters are set, the kernel will be chosen using this
tile description and alignments. Otherwise, a default tile description and alignment
will be used.
:param tile_description: tile description specifying shapes and operand types to use in the kernel
:type tile_description: cutlass.backend.TileDescription
:param alignment_A: alignment of operand A
:type alignment_A: int
:param alignment_B: alignment of operand B
:type alignment_B: int
:param alignment_C: alignment of operand C
:type alignment_C: int
:param print_module: whether to print the emitted C++ code
:type print_module: bool
:return: operation that was compiled
:rtype: cutlass.backend.GemmOperationUniversal
"""
self.operation = self.construct(tile_description, alignment_A, alignment_B, alignment_C)
if print_module:
print(self.operation.rt_module.emit())
compiler.add_module([self.operation,])
return self.operation
def _verify_rank(self, tensor):
"""
Verifies that ``tensor`` has rank greater than 1
:param tensor: object representing a tensor passed in to verify, or ``None`` if no tensor was passed in
:type tensor: numpy/cupy/torch array/tensor object
"""
if len(tensor.shape) < 2:
raise Exception(f"Tensors must be of rank greater than 1. Received tensor of shape: {tensor.shape}")
def _get_batch_count(self, A, B, C, D) -> int:
"""
Returns the batch count specified by the tensors A, B, C, and D and verifies that these
tensors match in batch size. Presence of a batch dimension is detected by one of the
tensors being rank 3. If a batch dimension is present, it must be present in one of
operands A, B, or C (but need not be in all), and must be present in D.
:param A: tensor A
:type A: numpy/cupy/torch array/tensor object
:param B: tensor B
:type B: numpy/cupy/torch array/tensor object
:param C: tensor C
:type C: numpy/cupy/torch array/tensor object
:param D: tensor D
:type D: numpy/cupy/torch array/tensor object
:return: tuple of batch count dimensions
:rtype: tuple
"""
A_batch = prod(A.shape[:-2]) if len(A.shape) > 2 else 1
B_batch = prod(B.shape[:-2]) if len(B.shape) > 2 else 1
if 1 not in [A_batch, B_batch]:
if A_batch != B_batch:
raise Exception(f"Get invalid batch counts: A={A_batch}, B={B_batch}")
return max(A_batch, B_batch)
def _get_batch_stride(self, tensor) -> int:
"""
Returns the batch stride of ``tensor``. If ``tensor`` is only rank-2, batch stride is 0.
:param tensor: tensor object to process
:type tensor: numpy/cupy/torch array/tensor object
:return: stride between each matrix in the batch
:rtype: int
"""
if tensor is not None and len(tensor.shape) > 2:
return tensor.shape[-2] * tensor.shape[-1]
else:
return 0
def _get_problem_args(self, A, B, C, D) -> tuple:
"""
Returns the problem size and GEMM universal mode to use for the
given operands.
:param A: tensor A
:type A: numpy/cupy/torch array/tensor object
:param B: tensor B
:type B: numpy/cupy/torch array/tensor object
:param C: tensor C
:type C: numpy/cupy/torch array/tensor object
:param D: tensor D
:type D: numpy/cupy/torch array/tensor object
:return: tuple containing the problem size (cutlass.shape.GemmCoord), the GEMM mode (cutlass.GemmUniversalMode), and the batch count (int)
:rtype: tuple
"""
M, K = A.shape[-2:]
N = B.shape[-1]
mode = GemmUniversalMode.Gemm
batch_count = self._get_batch_count(A, B, C, D)
returned_batch_count = batch_count
# If we are running a batched GEMM in which there is a nonzero batch stride
# only for A, then we can fold the batched dimension of A into the M dimension
# (i.e., (b, m, k) x (k, n) -> (m*b, k) x (k, n)). This works only if both A
# and C are row major. A similar operation can be performed if only B has a nonzero
# batch dimension
if batch_count > 1:
A_row = self._layout_a == cutlass.LayoutType.RowMajor
B_row = self._layout_b == cutlass.LayoutType.RowMajor
C_row = self._layout_c == cutlass.LayoutType.RowMajor
# Consider a Tensor to be batched if its rank is > 2 and
# the product of the modes beyond rank 2 equals our pre-determined batch size.
batched = lambda x : x is None or (len(x.shape) > 2 and prod(x.shape[:-2]) == batch_count)
if batched(A) and not batched(B) and (C is None or batched(C)) and A_row and C_row:
M *= batch_count
returned_batch_count = 1
elif not batched(A) and batched(B) and (C is None or batched(C)) and not B_row and not C_row:
N *= batch_count
returned_batch_count = 1
else:
mode = GemmUniversalMode.Batched
return GemmCoord(M, N, K), mode, returned_batch_count
def _verify_type_and_layout(self, tensor, ref_type, ref_layout, name):
"""
Verifies that ``tensor`` has data type ``ref_type`` and layout ``ref_layout``. An exception
is raised if it does not.
:param tensor: object representing a tensor passed in to verify, or ``None`` if no tensor was passed in
:type tensor: numpy/cupy/torch array/tensor object
:param ref_dtype: data type for the tensor that this object was initialized to
:param ref_layout: layout for the tensor that this object was initialized to
:param name: identifier of the tensor to verify. Used in raising exceptions
:type name: str
"""
dtype, layout = datatypes.get_datatype_and_layout(tensor)
if dtype != ref_type or layout != ref_layout:
try:
# Attempt to transpose the tensor to fit the desired layout
tensor = tensor.transpose(-1, -2)
except:
raise Exception(f'Tensor {name} with type and layout ({dtype}, {layout}) '
f'does not match the expected type and '
f'layout of ({ref_type}, {ref_layout}) and transpose failed.')
def run(self, A=None, B=None, C=None, D=None,
alpha=None, beta=None, sync: bool = True, print_module: bool = False, visitor_args: dict = None,
stream: cuda.CUstream = cuda.CUstream(0)) -> GemmArguments:
"""
Runs the kernel currently specified. If it has not already been, the kernel is emitted and
compiled. Tensors holding operands and outputs of the kernel are sourced either from the
``A``, ``B``, ``C``, ``D``, ``alpha``, and ``beta``
parameters provided in this call, or from those
passed in on the construction of this object -- one of the two must be specified.
By default, this call returns only once the kernel has completed. To launch the kernel
and immediately return, set ``sync=False``. In this case, it is the responsibility of the
caller to syncrhonize the results of the kernel before attempting to access outputs
by calling ``sync()`` on the arguments returned from this call.
:param A: tensor representing data type and layout of operand A
:param B: tensor representing data type and layout of operand B
:param C: tensor representing data type and layout of operand C
:param D: tensor representing data type and layout of operand D
:param alpha: scalar paramter alpha from GEMM computation that scales the product of operands A and B
:param beta: scalar parameter beta from GEMM operation that scales operand C
:param sync: whether the call should wait for the kernel to complete before returning
:type sync: bool
:param print_module: whether to print the emitted C++ code
:type print_module: bool
:param stream: cuda stream, defaults to cuda.cuda.CUstream(0)
:type stream: :class:`cuda.cuda.CUstream`
:return: arguments passed in to the kernel
:rtype: cutlass.backend.GemmArguments
"""
super().run_setup()
A = self._verify_tensor(A, self.A, self._element_a, self._layout_a, "A")
B = self._verify_tensor(B, self.B, self._element_b, self._layout_b, "B")
C = self._verify_tensor(C, self.C, self._element_c, self._layout_c, "C")
D = self._verify_tensor(D, self.D, self._element_d, self._layout_d, "D")
alpha = self._verify_scalar(alpha, self.alpha, self._element_c, "alpha")
beta = self._verify_scalar(beta, self.beta, self._element_c, "beta")
is_void_c = self._element_c == DataType.void
self._verify_rank(A)
self._verify_rank(B)
if not is_void_c:
self._verify_rank(C)
self._verify_rank(D)
alignment_a = self.possible_operations.find_alignment(A.shape, self._layout_a, operand="A")
alignment_b = self.possible_operations.find_alignment(B.shape, self._layout_b, operand="B")
# Set C alignment based on D.shape so as to correctly get an alignment with void-C
# kernels, for which `C` is None.
alignment_c = self.possible_operations.find_alignment(D.shape, self._layout_c, operand="C")
self.compile(self._tile_description, alignment_A=alignment_a, alignment_B=alignment_b,
alignment_C=alignment_c, print_module=print_module)
problem_size, mode, batch_count = self._get_problem_args(A, B, C, D)
if mode == GemmUniversalMode.Gemm or batch_count == 1:
kwargs = {'split_k_slices': 1}
else:
kwargs = {
'batch': batch_count,
'batch_strides': {
'A': self._get_batch_stride(A),
'B': self._get_batch_stride(B),
'C': self._get_batch_stride(C),
'D': self._get_batch_stride(D)
}
}
kwargs['stream'] = stream
if isinstance(self.epilogue_functor, EpilogueFunctorVisitor):
output_op = self.operation.epilogue_type(visitor_args)
else:
output_op = self.operation.epilogue_type(alpha, beta)
arguments = GemmArguments(
operation=self.operation, problem_size=problem_size,
A=A, B=B, C=C, D=D,
output_op=output_op,
gemm_mode=mode,
**kwargs
)
self.operation.run(arguments)
if sync:
arguments.sync()
return arguments
| python/cutlass/op/gemm.py/0 | {
"file_path": "python/cutlass/op/gemm.py",
"repo_id": "python",
"token_count": 12897
} | 42 |
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Utilities for filtering CUTLASS library kernels and emitting library intitialization
and building code
"""
import enum
import logging
import os.path
import shutil
try:
import builtins
if hasattr(builtins, "CUTLASS_IGNORE_PACKAGE") and CUTLASS_IGNORE_PACKAGE == True:
raise ImportError("Disabling attempt to import cutlass_library")
from cutlass_library.library import *
from cutlass_library.gemm_operation import *
from cutlass_library.rank_k_operation import *
from cutlass_library.rank_2k_operation import *
from cutlass_library.trmm_operation import *
from cutlass_library.symm_operation import *
from cutlass_library.conv2d_operation import *
from cutlass_library.conv3d_operation import *
except ImportError:
from library import *
from gemm_operation import *
from rank_k_operation import *
from rank_2k_operation import *
from trmm_operation import *
from symm_operation import *
from conv2d_operation import *
from conv3d_operation import *
###################################################################################################
_LOGGER = logging.getLogger(__name__)
class EmitOperationKindAll:
"""
Emit the OperationKind-level CUTLASS library initialization code.
The code is generated in the {generated_path}/{operation_kind} directory
(e.g., tools/library/generated/gemm in the build directory,
for OperationKind=Gemm), in the all_{operation_kind}_operations.cu file
(e.g., all_gemm_operations.cu for OperationKind=Gemm).
That file declares several functions in namespace cutlass::library.
The functions all have this form,
void initialize_{configuration_name}(Manifest& manifest);
The file also _defines_ the following function in that namespace.
void initialize_all_{operation_kind}_operations(Manifest& manifest);
That function calls all of the functions declared in this file.
Those functions are defined in subdirectories
(which this class does not create).
"""
def __init__(self, generated_path, kind, args):
self.generated_path = generated_path
self.kind = kind
self.args = args
self.header_template ="""
/*
Generated by manifest.py - Do not edit.
*/
#include "cutlass/cutlass.h"
#include "cutlass/library/library.h"
#include "cutlass/library/manifest.h"
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
self.entry_template = """
//
// Entry point to construct operations
//
void initialize_all_${operation_name}_operations(Manifest &manifest) {
"""
self.configuration_prototype_template = "void initialize_${configuration_name}(Manifest &manifest);\n"
self.configuration_template =" initialize_${configuration_name}(manifest);\n"
self.epilogue_template ="""}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
"""
#
def __enter__(self):
_LOGGER.debug("*** EmitOperationKindAll::__enter__")
self.operation_path = os.path.join(self.generated_path, OperationKindNames[self.kind])
_LOGGER.debug('*** operation_path (directory to create): ' +
str(self.operation_path));
os.makedirs(self.operation_path, exist_ok=True)
self.top_level_path = os.path.join(self.operation_path, f"all_{OperationKindNames[self.kind]}_operations.cu")
_LOGGER.debug(f"*** top_level_path (file to write): {str(self.top_level_path)}")
self.top_level_file = open(self.top_level_path, "w")
self.top_level_file.write(self.header_template)
self.source_files = [self.top_level_path,]
self.configurations = []
return self
#
def emit(self, operations):
_LOGGER.debug('*** EmitOperationKindAll::emit')
_LOGGER.debug(f"*** len(operations): {len(operations)}")
_LOGGER.debug(f"*** min_cc list: {sorted(min_cc for min_cc, _ in operations.items())}")
for min_cc, configurations in sorted(operations.items()):
_LOGGER.debug(f"*** min_cc={min_cc}")
for configuration_name, _ in configurations.items():
_LOGGER.debug(f"*** configuration_name={configuration_name}")
self.configurations.append(configuration_name)
self.top_level_file.write(SubstituteTemplate(self.configuration_prototype_template, {'configuration_name': configuration_name} ))
#
def __exit__(self, exception_type, exception_value, traceback):
_LOGGER.debug("*** EmitOperationKindAll::__exit__")
self.top_level_file.write(SubstituteTemplate(self.entry_template, {'operation_name': OperationKindNames[self.kind]}))
for configuration_name in self.configurations:
self.top_level_file.write(SubstituteTemplate(self.configuration_template, {'configuration_name': configuration_name}))
self.top_level_file.write(self.epilogue_template)
self.top_level_file.close()
class EmitOperationKindLibrary:
"""
Emit the CUTLASS library initialization code for each OperationKind.
The code is generated in the directory
{generated_path}/{operation_kind}/{min_cc}
(e.g., tools/library/generated/gemm/90 in the build directory,
for min_cc=90 and OperationKind=Gemm), in the file
all_sm{min_cc}_{operation_kind}_operations.cu
(e.g., all_sm90_gemm_operations.cu for min_cc=90 and OperationKind=Gemm).
The min_cc variable here indicates the minimum GPU architecture version
that the things to be initialized require.
For example, min_cc=90 indicates sm90.
That file declares several functions in namespace cutlass::library.
The functions all have this form,
void initialize_all_sm{min_cc}_{subclass_name}_{extended_name}_operations(Manifest& manifest);
where extended_name is operation.extended_name() for all the operations
given to the emit method (which see below). (All operations for a given
configuration_name are guaranteed to have the same extended_name().)
The file also _defines_ the following function in that namespace.
void initialize_all_sm{min_cc}__{operation_kind}_operations(Manifest& manifest);
That function calls all of the functions declared in this file.
Those functions are defined in subdirectories.
The mapping from OperationKind to emitter handles the details
of what happens in each of those subdirectories.
"""
def __init__(self, generated_path, min_cc, kind, args):
self.generated_path = generated_path
self.min_cc = min_cc
self.kind = kind
self.args = args
self.emitters = {
OperationKind.Gemm: EmitGemmConfigurationLibrary,
OperationKind.Conv2d: EmitConv2dConfigurationLibrary,
OperationKind.Conv3d: EmitConv3dConfigurationLibrary,
OperationKind.RankK: EmitRankKConfigurationLibrary,
OperationKind.Rank2K: EmitRank2KConfigurationLibrary,
OperationKind.Trmm: EmitTrmmConfigurationLibrary,
OperationKind.Symm: EmitSymmConfigurationLibrary
}
self.header_template ="""
/*
Generated by manifest.py - Do not edit.
*/
#include "cutlass/cutlass.h"
#include "cutlass/library/library.h"
#include "cutlass/library/manifest.h"
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
self.entry_template = """
//
// Entry point to construct operations
//
void initialize_all_sm${min_cc}_${subclass_name}_${operation_name}_operations(Manifest &manifest) {
"""
self.configuration_prototype_template = "void initialize_${configuration_name}(Manifest &manifest);\n"
self.configuration_template = " initialize_${configuration_name}(manifest);\n"
self.subclass_call_template = " initialize_all_sm${min_cc}_${subclass_name}_${operation_name}_operations(manifest);\n"
self.subclass_prototype_template = "void initialize_all_sm${min_cc}_${subclass_name}_${operation_name}_operations(Manifest &manifest);\n"
self.epilogue_template ="""}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
"""
#
def __enter__(self):
_LOGGER.debug("*** EmitOperationKindLibrary::__enter__")
_LOGGER.debug(f"*** generated_path: {str(self.generated_path)}")
_LOGGER.debug(f"*** OperationKindNames[kind]: {OperationKindNames[self.kind]}")
_LOGGER.debug(f"*** min_cc: {self.min_cc}")
self.operation_path = os.path.join(self.generated_path, OperationKindNames[self.kind], str(self.min_cc))
_LOGGER.debug(f"*** operation_path (directory to make): {str(self.operation_path)}")
os.makedirs(self.operation_path)
self.top_level_path = os.path.join(self.operation_path, f"all_sm{self.min_cc}_{OperationKindNames[self.kind]}_operations.cu")
_LOGGER.debug(f"*** top_level_path (file to write): {str(self.top_level_path)}")
self.top_level_file = open(self.top_level_path, "w")
self.top_level_file.write(self.header_template)
self.source_files = {}
# Each {operation_kind x cc} combination is further decomposed by the instruction
# types used. This dictionary used to track the file handles for the top-level
# files of each subclass
self.subclass_files = {}
# Configurations in each sub class
self.subclass_configurations = {}
return self
#
def emit(self, configuration_name, operations):
_LOGGER.debug("*** EmitOperationKindLibrary::emit")
_LOGGER.debug(f"*** configuration_name: {configuration_name}")
assert len(operations) > 0
# The extended name for all operations of a given configuration_name is guaranteed
# to be the same because extended_name() is used in defining configuration_name. Thus,
# we can safely use the extended_name() of the first operation.
extended_name = operations[0].extended_name()
_LOGGER.debug('*** extended_name (for all ops): ' + extended_name)
# Create a directory for operations with this subclass if it does not exist
if extended_name not in self.subclass_files:
subclass_path = os.path.join(self.operation_path, extended_name)
_LOGGER.debug(f"*** subclass_path: {str(subclass_path)}")
os.mkdir(subclass_path)
self.subclass_configurations[extended_name] = []
# Open a new top-level file for this sub class
subclass_top_level_path = os.path.join(
subclass_path, f"all_sm{self.min_cc}_{extended_name}_{OperationKindNames[self.kind]}_operations.cu")
_LOGGER.debug('*** subclass_top_level_path (min_cc, extended_name, ' +
'OperationKind): ' + str(subclass_top_level_path))
self.subclass_files[extended_name] = open(subclass_top_level_path, "w")
self.subclass_files[extended_name].write(self.header_template)
self.source_files[extended_name] = [subclass_top_level_path]
subclass_dir = os.path.dirname(self.subclass_files[extended_name].name)
_LOGGER.debug('*** subclass_dir: ' + str(subclass_dir))
with self.emitters[self.kind](subclass_dir, configuration_name) as configuration_emitter:
for operation in operations:
configuration_emitter.emit(operation)
_LOGGER.debug('*** configuration_emitter.configuration_path: ' +
str(configuration_emitter.configuration_path))
self.source_files[extended_name].append(configuration_emitter.configuration_path)
self.subclass_configurations[extended_name].append(configuration_name)
self.subclass_files[extended_name].write(SubstituteTemplate(self.configuration_prototype_template, {'configuration_name': configuration_name} ))
#
def __exit__(self, exception_type, exception_value, traceback):
_LOGGER.debug("*** EmitOperationKindLibrary::__exit__")
for subclass_name, subclass_file in sorted(self.subclass_files.items()):
subclass_cfg = {
'min_cc': str(self.min_cc),
'subclass_name': subclass_name,
'operation_name': OperationKindNames[self.kind]
}
self.top_level_file.write(SubstituteTemplate(self.subclass_prototype_template, subclass_cfg))
self.top_level_file.write(
SubstituteTemplate(self.entry_template, {
'min_cc': str(self.min_cc),
'subclass_name': '',
'operation_name': OperationKindNames[self.kind]
}))
# Finish and close all subclass files
for subclass_name, subclass_file in sorted(self.subclass_files.items()):
subclass_cfg = {
'min_cc': str(self.min_cc),
'subclass_name': subclass_name,
'operation_name': OperationKindNames[self.kind]
}
subclass_file.write(SubstituteTemplate(self.entry_template, subclass_cfg))
for configuration in self.subclass_configurations[subclass_name]:
subclass_file.write(
SubstituteTemplate(self.configuration_template, {
'configuration_name': configuration
}))
subclass_file.write(self.epilogue_template)
subclass_file.close()
# Write the call to initialize_all for this subclass to the top-level file
self.top_level_file.write(SubstituteTemplate(self.subclass_call_template, subclass_cfg))
self.top_level_file.write(self.epilogue_template)
self.top_level_file.close()
class EmitInterfaceLibrary:
"""
Emit the topmost-level CUTLASS library initialization code.
The code is generated in the generated_path directory
(e.g., tools/library/generated in the build directory),
in the initialize_all.cpp file.
That file declares several functions in namespace cutlass::library.
The functions all have this form,
void initialize_all_{operation_kind}_operations(Manifest& manifest);
where {operation_kind} abbreviates the "kind" of operation
(e.g., gemm for matrix-matrix multiply, conv2d for 2-d convolution,
or trmm for triangular solve with multiple right-hand sides).
The definitions of these functions live in subdirectories.
The file also _defines_ the following function in that namespace.
void initialize_all(Manifest& manifest);
That function first prepares the manifest, and then
calls all of the functions declared in this file.
"""
def __init__(self, generated_path, operation_count, args):
self.generated_path = generated_path
self.args = args
self.prototypes = []
self.fn_calls = []
self.operation_count = str(operation_count)
self.top_level_hdr_template = '''
/*
Generated by manifest.py - Do not edit.
*/
'''
self.top_level_prologue = '''
#include "cutlass/library/library.h"
#include "cutlass/library/manifest.h"
namespace cutlass {
\tnamespace library {
${prototypes}
'''
self.top_level_initialize_kind = '''
\t\tvoid initialize_all_${kind}_operations(Manifest &manifest) {
${fn_calls}
\t\t}
'''
self.top_level_initialize = '''
\t\tvoid initialize_all(Manifest &manifest) {
\t\t\tmanifest.reserve(${operation_count});\n
${fn_calls}
\t\t}
'''
self.top_level_suffix = '''
\t} // namespace library
} // namespace cutlass
'''
#
def __enter__(self):
_LOGGER.debug("*** EmitInterfaceLibrary::__enter__")
self.top_level_path = os.path.join(self.generated_path, 'initialize_all.cpp')
_LOGGER.debug("*** top_level_path: " + str(self.top_level_path))
self.top_level_file = open(self.top_level_path, "w")
self.top_level_file.write(self.top_level_hdr_template)
self.source_files = [self.top_level_path,]
return self
#
def emit(self, operation_name):
_LOGGER.debug("*** EmitInterfaceLibrary::emit")
_LOGGER.debug("*** operation_name: " + operation_name)
self.prototypes.append(SubstituteTemplate(
"\t\tvoid initialize_all_${operation_kind}_operations(Manifest &manifest);",
{'operation_kind': operation_name}))
self.fn_calls.append(SubstituteTemplate(
"\t\t\tinitialize_all_${operation_kind}_operations(manifest);",
{'operation_kind': operation_name}))
#
def __exit__(self, exception_type, exception_value, traceback):
_LOGGER.debug("*** EmitInterfaceLibrary::__exit__")
self.top_level_file.write(SubstituteTemplate(self.top_level_prologue, {'prototypes':"\n".join(self.prototypes)}))
# Write out initialize_all method
self.top_level_file.write(SubstituteTemplate(self.top_level_initialize,
{'operation_count': self.operation_count, 'fn_calls':"\n".join(self.fn_calls)}))
self.top_level_file.write(self.top_level_suffix)
self.top_level_file.close()
###################################################################################################
###################################################################################################
class Options:
def __init__(self):
pass
###################################################################################################
#
class Manifest:
#
def __init__(self, args = None):
self.operations = {}
self.args = args
self.operation_count = 0
self.operations_by_name = {}
self.kernel_filter = ''
self.kernel_filter_list = []
self.kernel_names = []
self.operations_enabled = []
self.selected_kernels = []
self.ignore_kernel_names = []
self.compute_capabilities = [50,]
self.curr_build_dir = '.'
self.filter_by_cc = True
if self.args:
self.kernel_filter = self.args.kernels
self.curr_build_dir = args.curr_build_dir
# A common user error is to use commas instead of semicolons.
if ',' in args.architectures:
raise RuntimeError("The list of architectures (CMake option CUTLASS_NVCC_ARCHS) must be semicolon-delimited.\nDon't use commas to separate the architectures; use semicolons.\nYou specified the list as: " + args.architectures)
architectures = args.architectures.split(';') if len(args.architectures) else ['50',]
arch_conditional_cc = ['90a']
architectures = [x if x not in arch_conditional_cc else x.split('a')[0] for x in architectures]
self.compute_capabilities = [int(x) for x in architectures]
if args.filter_by_cc in ['false', 'False', '0']:
self.filter_by_cc = False
if args.operations == 'all':
self.operations_enabled = []
else:
operations_list = [
OperationKind.Gemm
, OperationKind.Conv2d
, OperationKind.Conv3d
, OperationKind.RankK
, OperationKind.Trmm
, OperationKind.Symm
]
self.operations_enabled = [x for x in operations_list if OperationKindNames[x] in args.operations.split(',')]
if args.kernels == 'all':
self.kernel_names = []
else:
self.kernel_names = [x for x in args.kernels.split(',') if x != '']
self.ignore_kernel_names = [x for x in args.ignore_kernels.split(',') if x != '']
if args.kernel_filter_file is None:
self.kernel_filter_list = []
else:
self.kernel_filter_list = self.get_kernel_filters(args.kernel_filter_file)
_LOGGER.debug("Using {filter_count} kernel filters from {filter_file}".format(
filter_count = len(self.kernel_filter_list),
filter_file = args.kernel_filter_file))
self.operation_count = 0
self.operations_by_name = {}
self.disable_full_archs_compilation = args.disable_full_archs_compilation
def get_kernel_filters (self, kernelListFile):
if os.path.isfile(kernelListFile):
with open(kernelListFile, 'r') as fileReader:
lines = [line.rstrip() for line in fileReader if not line.startswith("#")]
lines = [re.compile(line) for line in lines if line]
return lines
else:
return []
#
def filter_out_kernels(self, kernel_name, kernel_filter_list):
for kernel_filter_re in kernel_filter_list:
if kernel_filter_re.search(kernel_name) is not None:
return True
return False
#
def _filter_string_matches(self, filter_string, haystack):
''' Returns true if all substrings appear in the haystack in order'''
substrings = filter_string.split('*')
for sub in substrings:
idx = haystack.find(sub)
if idx < 0:
return False
haystack = haystack[idx + len(sub):]
return True
#
def filter(self, operation):
''' Filtering operations based on various criteria'''
# filter based on compute capability
enabled = not (self.filter_by_cc)
for cc in self.compute_capabilities:
if cc >= operation.tile_description.minimum_compute_capability and \
cc <= operation.tile_description.maximum_compute_capability and \
(cc not in SharedMemPerCC or SharedMemPerCC[cc] >= CalculateSmemUsage(operation)):
enabled = True
break
if not enabled:
return False
if len(self.operations_enabled) and not operation.operation_kind in self.operations_enabled:
return False
# eliminate duplicates
if operation.procedural_name() in self.operations_by_name.keys():
return False
# Filter based on list of valid substrings
if len(self.kernel_names):
name = operation.procedural_name()
enabled = False
# compare against the include list
for name_substr in self.kernel_names:
if self._filter_string_matches(name_substr, name):
_LOGGER.debug("Kernel {kernel} included due to filter string '{filt}'.".format(
kernel = operation.procedural_name(),
filt = name_substr))
enabled = True
break
# compare against the exclude list
for name_substr in self.ignore_kernel_names:
if self._filter_string_matches(name_substr, name):
_LOGGER.debug("Kernel {kernel} ignored due to filter string '{filt}'.".format(
kernel = operation.procedural_name(),
filt = name_substr))
enabled = False
break
if len(self.kernel_filter_list) > 0:
if self.filter_out_kernels(operation.procedural_name(), self.kernel_filter_list):
_LOGGER.debug("Kernel {kernel} matched via kernel filter file.".format(kernel = operation.procedural_name()))
enabled = True
else:
_LOGGER.debug("Kernel {kernel} culled due to no match in kernel filter file.".format(kernel = operation.procedural_name()))
enabled = False
# TODO: filter based on compute data type
return enabled
#
#
def append(self, operation):
'''
Inserts the operation.
operation_kind -> configuration_name -> []
'''
if self.filter(operation):
self.selected_kernels.append(operation.procedural_name())
self.operations_by_name[operation.procedural_name()] = operation
# add the configuration
configuration_name = operation.configuration_name()
# Split operations by minimum CC
min_cc = operation.arch
if operation.operation_kind not in self.operations.keys():
self.operations[operation.operation_kind] = {}
if min_cc not in self.operations[operation.operation_kind]:
self.operations[operation.operation_kind][min_cc] = {}
if configuration_name not in self.operations[operation.operation_kind][min_cc].keys():
self.operations[operation.operation_kind][min_cc][configuration_name] = []
self.operations[operation.operation_kind][min_cc][configuration_name].append(operation)
self.operation_count += 1
else:
_LOGGER.debug("Culled {} from manifest".format(operation.procedural_name()))
#
def emit_manifest_cmake(self, manifest_path, top_level_path, source_files):
with open(manifest_path, "w") as manifest_file:
target_text = SubstituteTemplate("""cutlass_target_sources(cutlass_library_objs PRIVATE
""", { })
manifest_file.write(target_text + '\n\n')
manifest_file.write(" %s\n" % str(top_level_path.replace('\\', '/')))
generated_path = os.path.join(self.curr_build_dir, 'generated')
for kind in self.operations.keys():
kind_str = OperationKindNames[kind]
all_kind_file = os.path.join(generated_path, kind_str, f"all_{kind_str}_operations.cu").replace('\\', '/')
manifest_file.write(f" {all_kind_file}\n")
manifest_file.write(')\n\n')
for kind in self.operations.keys():
for min_cc in sorted(self.operations[kind].keys()):
for subclass in sorted(source_files[kind][min_cc].keys()):
target_text = SubstituteTemplate("""cutlass_add_cutlass_library(
SUFFIX ${kind}_sm${min_cc}_${subclass}
""", { 'min_cc': str(min_cc), 'kind': OperationKindNames[kind], 'subclass': subclass })
manifest_file.write(target_text + '\n\n')
for source_file in source_files[kind][min_cc][subclass]:
manifest_file.write(" %s\n" % str(source_file.replace('\\', '/')))
manifest_file.write(")\n")
if self.disable_full_archs_compilation:
self.emit_disable_full_archs_compilation(manifest_file, source_files)
def emit_disable_full_archs_compilation(manifest_file, source_files):
def for_hopper(name):
pass
def for_ampere(name):
return "16816" in name or \
"16832" in name or \
"16864" in name or \
("1688" in name and "tf32" in name)
def for_turing(name):
return ("1688" in name and "tf32" not in name) or \
"8816" in name
def for_volta(name):
return "884" in name
def is_cpp(name):
return name.endswith(".cpp")
def get_src_archs_str_given_requested_cuda_archs(archs, source_file):
intersected_archs = archs & set(self.compute_capabilities)
if intersected_archs == set():
raise RuntimeError(
"""
Empty archs set for file {} after taking
the intersection of {} (global requested archs) and
{} (per file requested archs)
""".format(source_file, set(self.compute_capabilities), archs))
else:
return " ".join(map(str, intersected_archs))
for min_cc in sorted(source_files.keys()):
for source_file in source_files[min_cc]:
if is_cpp(source_file):
continue # skip because source is cpp
elif for_ampere(source_file):
archs_str = get_src_archs_str_given_requested_cuda_archs({80, 87, 90}, source_file)
elif for_turing(source_file):
archs_str = get_src_archs_str_given_requested_cuda_archs({75}, source_file)
elif for_volta(source_file):
archs_str = get_src_archs_str_given_requested_cuda_archs({70, 72}, source_file)
else:
raise RuntimeError("Per file archs are not set {}, as there is no rule specified for this file pattern".format(source_file))
manifest_file.write("cutlass_apply_cuda_gencode_flags({} SM_ARCHS {})\n".format(str(source_file.replace('\\', '/')), archs_str))
#
def emit(self, target = GeneratorTarget.Library):
operation_emitters = {
GeneratorTarget.Library: EmitOperationKindLibrary
}
# Emitters for all operations that fall under a particular kind (e.g., GEMM, Conv2d)
kind_emitters = {
GeneratorTarget.Library: EmitOperationKindAll
}
interface_emitters = {
GeneratorTarget.Library: EmitInterfaceLibrary
}
generated_path = os.path.join(self.curr_build_dir, 'generated')
# create generated/
if os.path.exists(generated_path):
shutil.rmtree(generated_path)
os.mkdir(generated_path)
with interface_emitters[target](generated_path, self.operation_count, self.args) as iface_emitter:
top_level_path = iface_emitter.top_level_path
for operation_kind in self.operations.keys():
iface_emitter.emit(OperationKindNames[operation_kind])
source_files = {}
for kind in self.operations.keys():
source_files[kind] = {}
for min_cc in self.operations[kind].keys():
source_files[kind][min_cc] = {}
for operation_kind, ops in self.operations.items():
for min_cc, configurations in sorted(ops.items()):
with operation_emitters[target](generated_path, min_cc, operation_kind, self.args) as operation_kind_emitter:
for configuration_name, operations in configurations.items():
_LOGGER.info(f"Emitting {configuration_name} with {len(operations)} operation{'' if len(operations) == 1 else 's'}.")
operation_kind_emitter.emit(configuration_name, operations)
for subclass, files in operation_kind_emitter.source_files.items():
if subclass not in source_files[operation_kind][min_cc]:
source_files[operation_kind][min_cc][subclass] = []
source_files[operation_kind][min_cc][subclass].extend(operation_kind_emitter.source_files[subclass])
# Emit top level all_{gemm, conv2d, ...}_operations.cu files
with kind_emitters[target](generated_path, operation_kind, self.args) as operation_kind_emitter:
operation_kind_emitter.emit(ops)
# write the manifest.cmake file containing paths from all targets
manifest_path = os.path.join(generated_path, "manifest.cmake")
self.emit_manifest_cmake(manifest_path, top_level_path, source_files)
###################################################################################################
| python/cutlass_library/manifest.py/0 | {
"file_path": "python/cutlass_library/manifest.py",
"repo_id": "python",
"token_count": 11132
} | 43 |
/* Copy buttons */
button.copybtn {
position: absolute;
display: flex;
top: .3em;
right: .3em;
width: 1.7em;
height: 1.7em;
opacity: 0;
transition: opacity 0.3s, border .3s, background-color .3s;
user-select: none;
padding: 0;
border: none;
outline: none;
border-radius: 0.4em;
/* The colors that GitHub uses */
border: #1b1f2426 1px solid;
background-color: #f6f8fa;
color: #57606a;
}
button.copybtn.success {
border-color: #22863a;
color: #22863a;
}
button.copybtn svg {
stroke: currentColor;
width: 1.5em;
height: 1.5em;
padding: 0.1em;
}
div.highlight {
position: relative;
}
/* Show the copybutton */
.highlight:hover button.copybtn, button.copybtn.success {
opacity: 1;
}
.highlight button.copybtn:hover {
background-color: rgb(235, 235, 235);
}
.highlight button.copybtn:active {
background-color: rgb(187, 187, 187);
}
/**
* A minimal CSS-only tooltip copied from:
* https://codepen.io/mildrenben/pen/rVBrpK
*
* To use, write HTML like the following:
*
* <p class="o-tooltip--left" data-tooltip="Hey">Short</p>
*/
.o-tooltip--left {
position: relative;
}
.o-tooltip--left:after {
opacity: 0;
visibility: hidden;
position: absolute;
content: attr(data-tooltip);
padding: .2em;
font-size: .8em;
left: -.2em;
background: grey;
color: white;
white-space: nowrap;
z-index: 2;
border-radius: 2px;
transform: translateX(-102%) translateY(0);
transition: opacity 0.2s cubic-bezier(0.64, 0.09, 0.08, 1), transform 0.2s cubic-bezier(0.64, 0.09, 0.08, 1);
}
.o-tooltip--left:hover:after {
display: block;
opacity: 1;
visibility: visible;
transform: translateX(-100%) translateY(0);
transition: opacity 0.2s cubic-bezier(0.64, 0.09, 0.08, 1), transform 0.2s cubic-bezier(0.64, 0.09, 0.08, 1);
transition-delay: .5s;
}
/* By default the copy button shouldn't show up when printing a page */
@media print {
button.copybtn {
display: none;
}
}
| python/docs/_static/copybutton.css/0 | {
"file_path": "python/docs/_static/copybutton.css",
"repo_id": "python",
"token_count": 880
} | 44 |
{
"path": "./../../../../examples/python/00_basic_gemm.ipynb"
}
| python/docs_src/source/externals/00_basic_gemm.nblink/0 | {
"file_path": "python/docs_src/source/externals/00_basic_gemm.nblink",
"repo_id": "python",
"token_count": 31
} | 45 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Utilities for defining Conv2D problem sizes for testing.
This file was ported from the C++ version in test/unit/conv/device/conv2d_problems.h
"""
from cutlass_library import ConvMode
import cutlass
from cutlass.shape import Conv2DProblemSize
class TestbedConv2dProblemSizes:
def __init__(self, minimum_channel_size: int):
conv2d_default_sizes = self.initialize_conv2d_default_sizes(minimum_channel_size)
conv2d_rigorous_sizes = self.initialize_conv2d_rigorous_sizes(minimum_channel_size)
conv2d_resnet50_sizes = self.initialize_conv2d_resnet50_sizes(1)
conv2d_resnet50_sizes_perf = self.initialize_conv2d_resnet50_sizes(34)
grouped_sizes = self.initialize_conv2d_grouped_sizes()
# Filter all problems
self.all = []
for size_list in [conv2d_default_sizes, conv2d_rigorous_sizes, conv2d_resnet50_sizes, conv2d_resnet50_sizes_perf, grouped_sizes]:
for size in size_list:
if (size.C // size.groups) % minimum_channel_size == 0:
self.all.append(size)
def initialize_conv2d_default_sizes(self, minimum_channel_size):
# Small input size x stride (1,1)
# C < CTA::K and non-multiples of CTA::K. Typical CTA::K = {32, 64}
conv2d_default_sizes = []
conv2d_default_sizes.append(Conv2DProblemSize(
1, 1, 1, minimum_channel_size,
8, 1, 1, minimum_channel_size,
1, 1,
1, 1,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
1, 1, 8, minimum_channel_size,
8, 1, 3, minimum_channel_size,
1, 1,
1, 1,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
1, 7, 8, minimum_channel_size,
8, 3, 3, minimum_channel_size,
1, 1,
1, 1,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
1, 7, 9, minimum_channel_size,
8, 4, 4, minimum_channel_size,
1, 1,
1, 1,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
2, 7, 9, minimum_channel_size,
8, 5, 5, minimum_channel_size,
1, 1,
1, 1,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
3, 7, 9, minimum_channel_size,
8, 6, 5, minimum_channel_size,
1, 1,
1, 1,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
3, 7, 9, minimum_channel_size,
8, 6, 6, minimum_channel_size,
1, 1,
1, 1,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
3, 7, 9, minimum_channel_size,
8, 7, 7, minimum_channel_size,
1, 1,
1, 1,
1, 1,
))
##############################################
# Small input size x stride (2,2)
# C < CTA::K and non-multiples of CTA::K. Typical CTA::K = {32, 64}
##############################################
conv2d_default_sizes.append(Conv2DProblemSize(
1, 11, 7, minimum_channel_size,
8, 1, 1, minimum_channel_size,
0, 0,
2, 2,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
1, 11, 7, minimum_channel_size,
8, 3, 3, minimum_channel_size,
1, 1,
2, 2,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
1, 13, 11, minimum_channel_size,
8, 1, 1, minimum_channel_size,
1, 1,
2, 2,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
1, 17, 19, minimum_channel_size,
16, 2, 2, minimum_channel_size,
1, 1,
2, 2,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
1, 23, 5, minimum_channel_size,
16, 3, 3, minimum_channel_size,
1, 1,
2, 2,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
1, 13, 17, 8,
24, 3, 3, 8,
0, 0,
2, 2,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
1, 23, 21, 8,
24, 3, 3, 8,
1, 1,
3, 3,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
1, 20, 24, 8,
40, 3, 3, 8,
3, 3,
3, 3,
1, 1,
))
##########################################
# Medium input size (1x16x16x128), filter size (1x1, 2x2, 3x3, 5x5), stride (1, 1)
##########################################
conv2d_default_sizes.append(Conv2DProblemSize(
1, 15, 19, 160,
224, 1, 1, 160,
0, 0,
1, 1,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
1, 19, 37, 160,
224, 3, 3, 160,
1, 1,
2, 2,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
1, 16, 16, 160,
224, 2, 3, 160,
1, 1,
1, 1,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
1, 23, 21, 128,
224, 3, 3, 128,
1, 1,
1, 1,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
1, 29, 37, 160,
224, 5, 5, 160,
2, 2,
1, 1,
1, 1,
))
##########################################
# C > CTA::K and non-multiples of CTA::K. Typical CTA::K = {32, 64}
##########################################
conv2d_default_sizes.append(Conv2DProblemSize(
1, 15, 19, 32 + minimum_channel_size,
96, 3, 3, 32 + minimum_channel_size,
1, 1,
1, 1,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
1, 16, 24, 64 + minimum_channel_size,
96, 3, 3, 64 + minimum_channel_size,
1, 1,
1, 1,
1, 1,
))
##########################################
# Medium input size, filter size (1x1, 3,x3, 5x5, 7x7), stride (2, 2)
##########################################
conv2d_default_sizes.append(Conv2DProblemSize(
1, 13, 16, 288,
160, 5, 5, 288,
2, 2,
2, 2,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
1, 55, 51, 256,
512, 1, 1, 256,
0, 0,
2, 2,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
1, 71, 80, 32,
64, 5, 5, 32,
2, 2,
2, 2,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
1, 224, 224, 8,
64, 7, 7, 8,
3, 3,
2, 2,
1, 1,
))
##########################################
# Medium input size stride (3, 3), filter (3, 3), non-default padding
##########################################
conv2d_default_sizes.append(Conv2DProblemSize(
1, 27, 23, 256,
512, 3, 3, 256,
0, 0,
3, 3,
1, 1,
))
##########################################
# Medium input size padding > stride, asymmetric filter, padding and striding
##########################################
conv2d_default_sizes.append(Conv2DProblemSize(
1, 27, 31, 256,
512, 3, 3, 256,
5, 7,
3, 4,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
1, 27, 35, 256,
512, 7, 5, 256,
11, 7,
3, 5,
1, 1,
))
##########################################
# Medium input size *mixed* stride (1, 2) and (2, 1),
# filter (3, 3), default padding
##########################################
conv2d_default_sizes.append(Conv2DProblemSize(
1, 27, 27, 256,
512, 3, 3, 256,
1, 1,
1, 2,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
1, 27, 27, 256,
512, 3, 3, 256,
1, 1,
2, 1,
1, 1,
))
######################################/
# Additional input size
######################################/
conv2d_default_sizes.append(Conv2DProblemSize(
3, 28, 28, 256,
256, 2, 2, 256,
0, 0,
2, 2,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
1, 32, 32, 16,
32, 3, 3, 16,
1, 1,
6, 2,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
32, 24, 32, 32,
32, 1, 2, 32,
0, 0,
1, 1,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
4, 2, 3, 256,
328, 3, 5, 256,
1, 1,
1, 1,
1, 1,
))
return conv2d_default_sizes
# Add a few large and rigorous convolution problem sizes
def initialize_conv2d_rigorous_sizes(self, minimum_channel_size):
sizes = []
if False:
sizes.append(Conv2DProblemSize.from_sizes(
(1, 124, 224, 2 * minimum_channel_size),
(24, 7, 7, 2 * minimum_channel_size),
))
sizes.append(Conv2DProblemSize.from_sizes(
(1, 233, 35, minimum_channel_size),
(24, 7, 5, minimum_channel_size),
))
return sizes
# Add resent50 layers to unit testing sizes
def initialize_conv2d_resnet50_sizes(self, batch_size):
conv2d_problem_vector = []
conv2d_problem_vector.append(Conv2DProblemSize(
batch_size, 56, 56, 64,
256, 1, 1, 64,
0, 0,
1, 1,
1, 1,
))
conv2d_problem_vector.append(Conv2DProblemSize(
batch_size, 56, 56, 64,
64, 1, 1, 64,
0, 0,
1, 1,
1, 1,
))
conv2d_problem_vector.append(Conv2DProblemSize(
batch_size, 56, 56, 64,
64, 3, 3, 64,
1, 1,
1, 1,
1, 1,
))
conv2d_problem_vector.append(Conv2DProblemSize(
batch_size, 56, 56, 256,
64, 1, 1, 256,
0, 0,
1, 1,
1, 1,
))
conv2d_problem_vector.append(Conv2DProblemSize(
batch_size, 56, 56, 256,
512, 1, 1, 256,
0, 0,
2, 2,
1, 1,
))
conv2d_problem_vector.append(Conv2DProblemSize(
batch_size, 56, 56, 256,
128, 1, 1, 256,
0, 0,
2, 2,
1, 1,
))
conv2d_problem_vector.append(Conv2DProblemSize(
batch_size, 28, 28, 128,
128, 3, 3, 128,
1, 1,
1, 1,
1, 1,
))
conv2d_problem_vector.append(Conv2DProblemSize(
batch_size, 28, 28, 128,
512, 1, 1, 128,
0, 0,
1, 1,
1, 1,
))
conv2d_problem_vector.append(Conv2DProblemSize(
batch_size, 28, 28, 512,
128, 1, 1, 512,
0, 0,
1, 1,
1, 1,
))
conv2d_problem_vector.append(Conv2DProblemSize(
batch_size, 28, 28, 512,
1024, 1, 1, 512,
0, 0,
2, 2,
1, 1,
))
conv2d_problem_vector.append(Conv2DProblemSize(
batch_size, 28, 28, 512,
256, 1, 1, 512,
0, 0,
2, 2,
1, 1,
))
conv2d_problem_vector.append(Conv2DProblemSize(
batch_size, 14, 14, 256,
256, 3, 3, 256,
1, 1,
1, 1,
1, 1,
))
conv2d_problem_vector.append(Conv2DProblemSize(
batch_size, 14, 14, 256,
1024, 1, 1, 256,
0, 0,
1, 1,
1, 1,
))
conv2d_problem_vector.append(Conv2DProblemSize(
batch_size, 14, 14, 1024,
256, 1, 1, 1024,
0, 0,
1, 1,
1, 1,
))
conv2d_problem_vector.append(Conv2DProblemSize(
batch_size, 14, 14, 1024,
2048, 1, 1, 1024,
0, 0,
2, 2,
1, 1,
))
conv2d_problem_vector.append(Conv2DProblemSize(
batch_size, 14, 14, 1024,
512, 1, 1, 1024,
0, 0,
2, 2,
1, 1,
))
conv2d_problem_vector.append(Conv2DProblemSize(
batch_size, 7, 7, 512,
512, 3, 3, 512,
1, 1,
1, 1,
1, 1,
))
conv2d_problem_vector.append(Conv2DProblemSize(
batch_size, 7, 7, 512,
2048, 1, 1, 512,
0, 0,
1, 1,
1, 1,
))
conv2d_problem_vector.append(Conv2DProblemSize(
batch_size, 7, 7, 2048,
512, 1, 1, 2048,
0, 0,
1, 1,
1, 1,
))
return conv2d_problem_vector
def initialize_conv2d_grouped_sizes(self):
threadblock_n = 128
threadblock_k = 32
sizes = []
##########################################
# One group calculated by one or multiple CTAs: k_per_group % CTA::N = 0
# One CTA calculates a single group
##########################################
for cta_per_group_k in range(1, 4):
for groups in range(2, 5):
conv_k = cta_per_group_k * threadblock_n * groups
sizes.append(Conv2DProblemSize(
1, 8, 8, threadblock_k * 2 * groups,
conv_k, 3, 3, threadblock_k * 2,
1, 1,
1, 1,
1, 1,
ConvMode.CrossCorrelation,
1,
groups
))
# Partial gemm_k: k_per_group == CTA::N && channels_per_group < CTA::K
sizes.append(Conv2DProblemSize(
1, 8, 8, threadblock_k,
threadblock_n * 2, 3, 3, threadblock_k // 2,
1, 1,
1, 1,
1, 1,
ConvMode.CrossCorrelation,
1,
2
))
sizes.append(Conv2DProblemSize(
1, 56, 56, 696,
768, 3, 3, 232,
1, 1,
2, 2,
1, 1,
ConvMode.CrossCorrelation,
1,
3
))
sizes.append(Conv2DProblemSize(
1, 14, 14, 1392,
1536, 3, 3, 232,
1, 1,
1, 1,
1, 1,
ConvMode.CrossCorrelation,
1,
3
))
##########################################
# One CTA calculate multiple groups: CTA::N % k_per_group = 0
##########################################
# 2 groups per CTA
sizes.append(Conv2DProblemSize(
1, 8, 8, threadblock_k * 4,
threadblock_n, 3, 3, threadblock_k * 2,
1, 1,
1, 1,
1, 1,
ConvMode.CrossCorrelation,
1,
2
))
# 2 groups per CTA and partial gemm_k
sizes.append(Conv2DProblemSize(
1, 8, 8, threadblock_k,
threadblock_n, 3, 3, threadblock_k // 2,
1, 1,
1, 1,
1, 1,
ConvMode.CrossCorrelation,
1,
2
))
# 4 groups per CTA
sizes.append(Conv2DProblemSize(
1, 8, 8, threadblock_k * 8,
threadblock_n // 2, 3, 3, threadblock_k * 2,
1, 1,
1, 1,
1, 1,
ConvMode.CrossCorrelation,
1,
4
))
# 4 groups per CTA and partial gemm_k
sizes.append(Conv2DProblemSize(
1, 8, 8, threadblock_k * 2,
threadblock_n // 2, 3, 3, threadblock_k // 2,
1, 1,
1, 1,
1, 1,
ConvMode.CrossCorrelation,
1,
4
))
return sizes
| test/python/cutlass/conv2d/conv2d_problem_sizes.py/0 | {
"file_path": "test/python/cutlass/conv2d/conv2d_problem_sizes.py",
"repo_id": "test",
"token_count": 9922
} | 46 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Implicit GEMM for fused epilogue broadcast testbed
Parallel split-k is not tested because we can just use regular conv kernel
when we need to use parallel-splitk. Broadcast can happen in the reduction
kernel.
*/
#pragma once
#include <fstream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/cutlass.h"
#include "cutlass/conv/device/implicit_gemm_convolution.h"
#include "cutlass/reduction/device/reduce_split_k.h"
#include "cutlass/reduction/thread/reduction_operators.h"
#include "conv2d_problems.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/device/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/convolution.h"
#include "cutlass/util/reference/device/convolution.h"
#include "cutlass/core_io.h"
#include "cutlass/util/tensor_view_io.h"
#include "../cache_testbed_output.h"
namespace test {
namespace conv {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Conv2d>
struct Conv2dWithBroadcastReferenceOp {
using OutputOp = typename Conv2d::EpilogueOutputOp;
using ElementCompute = typename OutputOp::ElementCompute;
using ElementZ = typename OutputOp::ElementZ;
using ElementT = typename OutputOp::ElementT;
typename OutputOp::BinaryOp binary_op;
typename OutputOp::ElementwiseOp elementwise_op;
Conv2dWithBroadcastReferenceOp() { }
void operator()(ElementZ &Z, ElementT &T, ElementCompute conv2d, ElementCompute bias) {
ElementCompute t_full = binary_op(conv2d, bias);
T = ElementT(t_full);
ElementCompute z_full = elementwise_op(t_full);
Z = ElementZ(z_full);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Fused testbed
//
// Y = CONV(AB, C)
//
// T[n, p, q, k] = ReductionOp(Y[n, p, q, k], Broadcast[k])
//
// Z[n, p, q, k] = Elementwise(T[n, p, q, k])
//
template <
typename Conv2d,
typename ReferenceOp,
bool AddBroadcastFirst = false
>
class TestbedConv2dWithBroadcast {
public:
using ElementA = typename Conv2d::ElementA;
using LayoutA = typename Conv2d::LayoutA;
using ElementB = typename Conv2d::ElementB;
using LayoutB = typename Conv2d::LayoutB;
using ElementC = typename Conv2d::ElementC;
using LayoutC = typename Conv2d::LayoutC;
using ElementAccumulator = typename Conv2d::ElementAccumulator;
using ElementCompute = typename Conv2d::ElementCompute;
using EpilogueOutputOp = typename Conv2d::EpilogueOutputOp;
using ElementZ = typename EpilogueOutputOp::ElementZ;
using ElementT = typename EpilogueOutputOp::ElementT;
using ElementVector = typename EpilogueOutputOp::ElementVector;
static cutlass::conv::Operator const kConvolutionalOperator = Conv2d::kConvolutionalOperator;
static const bool kAddBroadcastFirst = AddBroadcastFirst;
static const bool kStoreT = EpilogueOutputOp::kStoreT;
public:
/// Initialization
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::Distribution::Kind init_C;
uint64_t seed;
cutlass::HostTensor<ElementA, LayoutA> tensor_A;
cutlass::HostTensor<ElementB, LayoutB> tensor_B;
cutlass::HostTensor<ElementC, LayoutC> tensor_C;
cutlass::HostTensor<ElementAccumulator, LayoutC> tensor_C_reference;
cutlass::HostTensor<ElementZ, LayoutC> tensor_Z_computed;
cutlass::HostTensor<ElementZ, LayoutC> tensor_Z_reference;
cutlass::HostTensor<ElementT, LayoutC> tensor_T_computed;
cutlass::HostTensor<ElementT, LayoutC> tensor_T_reference;
cutlass::HostTensor<ElementAccumulator, LayoutC> tensor_Y_reference;
cutlass::HostTensor<ElementVector, LayoutC> tensor_Broadcast; // Input Broadcast
public:
TestbedConv2dWithBroadcast(
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint64_t seed_ = 2080
):
init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) {
}
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
void initialize_tensor(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
int scope;
int bits = cutlass::sizeof_bits<Element>::value;
if (bits <= 8) {
scope = 2;
}
else if (bits == 16) {
if (cutlass::sizeof_bits<ElementAccumulator>::value <= 16) {
scope = 3;
}
else {
scope = 5;
}
}
else {
scope = 8;
}
cutlass::reference::host::TensorFillRandomUniform(
view, seed, scope, -scope, 0);
}
else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5);
}
else if (dist_kind == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(view.data(), view.capacity());
}
else {
}
}
void initialize(
cutlass::conv::Conv2dProblemSize const &problem_size, uint64_t seed = 2019) {
tensor_A.resize(implicit_gemm_tensor_a_extent(kConvolutionalOperator, problem_size));
tensor_B.resize(implicit_gemm_tensor_b_extent(kConvolutionalOperator, problem_size));
tensor_C.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size));
tensor_C_reference.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size));
tensor_Z_computed.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size));
tensor_Z_reference.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size));
tensor_T_computed.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size));
tensor_T_reference.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size));
tensor_Y_reference.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size));
tensor_Broadcast.resize({
1,
1,
1,
implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size).c(),
});
initialize_tensor(tensor_A.host_view(), init_A, seed);
initialize_tensor(tensor_B.host_view(), init_B, seed * 17);
initialize_tensor(tensor_C.host_view(), init_C, seed * 39);
initialize_tensor(tensor_Broadcast.host_view(), init_C, seed * 39);
for (int n = 0; n < tensor_C_reference.extent().n(); ++n) {
for (int p = 0; p < tensor_C_reference.extent().h(); ++p) {
for (int q = 0; q < tensor_C_reference.extent().w(); ++q) {
for (int k = 0; k < tensor_C_reference.extent().c(); ++k) {
tensor_C_reference.at({n, p, q, k}) = ElementAccumulator(tensor_C.at({n, p, q, k}));
}
}
}
}
tensor_A.sync_device();
tensor_B.sync_device();
tensor_C.sync_device();
tensor_Broadcast.sync_device();
tensor_C_reference.sync_device();
tensor_Z_computed.sync_device();
tensor_Z_reference.sync_device();
tensor_T_computed.sync_device();
tensor_T_reference.sync_device();
tensor_Y_reference.sync_device();
}
bool sufficient() const {
//
// Determine SMEM requirements and waive if not satisfied
//
size_t smem_size = sizeof(typename Conv2d::UnderlyingKernel::SharedStorage);
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.sharedMemPerBlockOptin < smem_size) {
return false;
}
return true;
}
/// Executes one test
bool run(
cutlass::conv::Conv2dProblemSize const &problem_size,
cutlass::conv::SplitKMode const &split_k_mode = cutlass::conv::SplitKMode::kSerial,
ElementCompute alpha = ElementCompute(1),
ElementCompute beta = ElementCompute(1)) {
// Waive test if insufficient CUDA device
if (!sufficient()) {
if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) {
std::cerr << "Test waived due to insufficient CUDA device." << std::endl;
}
return true;
}
#if 0 //display conv2d problem size for debugging
std::cout << problem_size << std::endl
<< "alpha, beta: (" << alpha << ", " << beta << ")" << std::endl
<< "split_k_mode: " << ((split_k_mode == cutlass::conv::SplitKMode::kSerial) ? "(serial)" : "(parallel)") << std::endl
<< std::endl;
#endif
initialize(problem_size);
// configure the operator
Conv2d conv2d_op;
typename Conv2d::Arguments conv2d_args(
problem_size,
tensor_A.device_ref(),
tensor_B.device_ref(),
tensor_C.device_ref(),
tensor_Z_computed.device_ref(),
{alpha, beta},
split_k_mode,
tensor_Broadcast.device_data(),
kStoreT ? tensor_T_computed.device_data() : nullptr,
0, // This must be zero
implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size).c()
);
// initialize the kernel
size_t workspace_size = Conv2d::get_workspace_size(conv2d_args);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
cutlass::Status status = conv2d_op.initialize(conv2d_args, workspace.get());
if (status != cutlass::Status::kSuccess) {
cudaError_t error = cudaGetLastError();
std::cerr << "This test is not supported: " << cudaGetErrorString(error) << "\n";
return true;
}
// run conv2d operator
status = conv2d_op();
EXPECT_TRUE(status == cutlass::Status::kSuccess);
if (status != cutlass::Status::kSuccess) {
return false;
}
bool passed = false;
cudaError_t result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess) << " device reference error: "
<< cudaGetErrorString(result);
tensor_T_computed.sync_host();
tensor_Z_computed.sync_host();
//
// Reference check
//
// When kAddBroadcastFirst is true, add bias on the host
ElementCompute beta_ref = kAddBroadcastFirst ? ElementCompute(0) : beta;
#if CUTLASS_CONV_TEST_UNIT_REFERENCE_DEVICE_ENABLED
cutlass::reference::device::Conv2d<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementAccumulator,
LayoutC,
ElementAccumulator,
ElementAccumulator
>(
kConvolutionalOperator,
problem_size,
tensor_A.device_ref(),
tensor_B.device_ref(),
tensor_C_reference.device_ref(),
tensor_Y_reference.device_ref(),
alpha,
beta_ref);
// sync host (copy device data to host) for dumping error output in case of mismatches
tensor_Y_reference.sync_host();
#else
cutlass::reference::host::Conv2d<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementAccumulator,
LayoutC,
ElementAccumulator,
ElementAccumulator
>(
kConvolutionalOperator,
problem_size,
tensor_A.host_ref(),
tensor_B.host_ref(),
tensor_C_reference.host_ref(),
tensor_Y_reference.host_ref(),
alpha,
beta_ref);
#endif
ReferenceOp reference_op;
// compute tensor Z and tensor T
for (int n = 0; n < problem_size.N; ++n) {
for (int p = 0; p < (kConvolutionalOperator == cutlass::conv::Operator::kFprop ? problem_size.P : problem_size.H); ++p) {
for (int q = 0; q < (kConvolutionalOperator == cutlass::conv::Operator::kFprop ? problem_size.Q : problem_size.W); ++q) {
for (int k = 0; k < (kConvolutionalOperator == cutlass::conv::Operator::kFprop ? problem_size.K : problem_size.C); ++k) {
ElementZ z{};
ElementT t{};
ElementCompute accum = tensor_Y_reference.at({n, p, q, k});
ElementCompute bias = ElementCompute(tensor_Broadcast.at({0, 0, 0, k}));
if (kAddBroadcastFirst) {
reference_op(z, t, accum + bias,
beta * ElementCompute(tensor_C_reference.at({n, p, q, k})));
} else {
reference_op(z, t, accum, bias);
}
tensor_Z_reference.at({n, p, q, k}) = z;
tensor_T_reference.at({n, p, q, k}) = t;
}
}
}
}
if (kStoreT) {
passed = cutlass::reference::host::TensorEquals(
tensor_T_computed.host_view(),
tensor_T_reference.host_view());
EXPECT_TRUE(passed);
}
passed = cutlass::reference::host::TensorEquals(
tensor_Z_computed.host_view(),
tensor_Z_reference.host_view());
EXPECT_TRUE(passed);
if (!passed) {
std::stringstream fname;
fname << "error_Conv2d_ImplicitGemm_device_"
<< (split_k_mode == cutlass::conv::SplitKMode::kSerial ? "serial_reduction_" : "parallel_reduction_")
<< (Conv2d::kConvolutionalOperator == cutlass::conv::Operator::kFprop ? "fprop_" :
(Conv2d::kConvolutionalOperator == cutlass::conv::Operator::kDgrad ? "dgrad_" :
(Conv2d::kConvolutionalOperator == cutlass::conv::Operator::kDeconv ? "deconv_" : "wgrad_")))
<< "nhwc_"
<< problem_size.N << "x"
<< problem_size.H << "x"
<< problem_size.W << "x"
<< problem_size.C
<< "_krsc_"
<< problem_size.K << "x"
<< problem_size.R << "x"
<< problem_size.S << "x"
<< problem_size.C
<< "_padding_"
<< problem_size.pad_h << "x"
<< problem_size.pad_w
<< "_stride_"
<< problem_size.stride_h << "x"
<< problem_size.stride_w
<< "_dilation_"
<< problem_size.dilation_h << "x"
<< problem_size.dilation_w << "_"
<< (problem_size.mode == cutlass::conv::Mode::kCrossCorrelation ? "xcorr_" : "conv_")
<< Conv2d::ThreadblockShape::kM << "x"
<< Conv2d::ThreadblockShape::kN << "x"
<< Conv2d::ThreadblockShape::kK << "_"
<< Conv2d::WarpShape::kM << "x"
<< Conv2d::WarpShape::kN << "x"
<< Conv2d::WarpShape::kK << ".txt";
std::cout << fname.str() << std::endl;
std::ofstream results(fname.str());
results << problem_size << std::endl;
results
<< "\nA:\n" << tensor_A.host_view() << "\n"
<< "\nB:\n" << tensor_B.host_view() << "\n"
<< "\nC:\n" << tensor_C.host_view() << "\n"
<< "\nBroadcast:\n" << tensor_Broadcast.host_view() << "\n"
<< "\nY reference:\n" << tensor_Y_reference.host_view() << "\n"
<< "\nT reference:\n" << tensor_T_reference.host_view() << "\n"
<< "\nT computed:\n" << tensor_T_computed.host_view() << "\n"
<< "\nZ reference:\n" << tensor_Z_reference.host_view() << "\n"
<< "\nZ computed:\n" << tensor_Z_computed.host_view() << "\n";
}
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename ImplicitGemm,
typename ReferenceOp = Conv2dWithBroadcastReferenceOp<ImplicitGemm>,
bool AddBroadcastFirst = false>
bool TestSpecificConv2dWithBroadcast(
const Conv2dProblemVector & problem_sizes) {
bool passed = true;
//
// Testbed object
//
TestbedConv2dWithBroadcast<ImplicitGemm, ReferenceOp, AddBroadcastFirst> testbed;
// Sweep conv2d problem sizes (split-k-mode=kSerial, split-k-slice=1, alpha=1.0, beta=0.0)
for(auto conv_problem : problem_sizes) {
//
// Test
//
// test mode = xcross
passed = testbed.run(
conv_problem,
cutlass::conv::SplitKMode::kSerial);
if (!passed) {
return false;
}
// test mode = convolution
passed = testbed.run(
conv_problem.reset_mode(cutlass::conv::Mode::kConvolution),
cutlass::conv::SplitKMode::kSerial);
if (!passed) {
return false;
}
}
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////
// TestAllConv: Runs cutlass::conv::device::ImplicitGemmConvolution operator and compares it with reference
// TestAllConv runs conv operator on default conv problem sizes from test::conv::device::TestbedConv2dProblemSizes
// Additionally, each conv2d test can provide conv problem sizes (conv_test_sizes) and blacklist of sizes
// (conv_blacklist_sizes)
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename ImplicitGemm,
typename ReferenceOp = Conv2dWithBroadcastReferenceOp<ImplicitGemm>,
bool AddBroadcastFirst = false,
bool TestSplitK = true
>
bool TestAllConv2dWithBroadcast(
const Conv2dProblemVector &conv_test_sizes = Conv2dProblemVector(),
const Conv2dProblemVector &conv_blacklist_sizes = Conv2dProblemVector()) {
bool passed = true;
//
// Testbed object
//
TestbedConv2dWithBroadcast<ImplicitGemm, ReferenceOp, AddBroadcastFirst> testbed;
//
// Get conv problem sizes to run conv operator
//
TestbedConv2dProblemSizes conv_problems(128/cutlass::sizeof_bits<typename ImplicitGemm::ElementA>::value);
// Vector of conv2d problem sizes to avoid duplicate runs
Conv2dProblemVector conv_tested_sizes;
Conv2dProblemVector const *problem_vectors[] = {
&conv_test_sizes, // run user specified sizes
&conv_problems.conv2d_default_sizes, // run default and cudnn bug sizes
&conv_problems.conv2d_resnet50_sizes, // run resnet50 sizes
#if CUTLASS_CONV_UNIT_TEST_RIGOROUS_SIZE_ENABLED
&conv_problems.conv2d_rigorous_sizes, // run large and rigorous sizes if enabled
#endif
};
// Sweep conv2d problem sizes (split-k-mode=kSerial, split-k-slice=1, alpha=1.0, beta=0.0)
for (Conv2dProblemVector const * problem_vector : problem_vectors) {
// Run conv testbed on default convolution sizes
for(auto conv_problem : *problem_vector) {
// Skip blacklist and avoid duplicate problem sizes
if (std::find(conv_blacklist_sizes.begin(), conv_blacklist_sizes.end(), conv_problem) != conv_blacklist_sizes.end() ||
std::find(conv_tested_sizes.begin(), conv_tested_sizes.end(), conv_problem) != conv_tested_sizes.end()) {
continue;
}
//
// Procedurally disable certain cases
//
// CUTLASS DGRAD's *unity* stride specialization only support stride {1, 1}
if ((ImplicitGemm::kConvolutionalOperator == cutlass::conv::Operator::kDgrad ||
ImplicitGemm::kConvolutionalOperator == cutlass::conv::Operator::kDeconv) &&
(ImplicitGemm::UnderlyingKernel::Mma::IteratorA::kStrideSupport ==
cutlass::conv::StrideSupport::kUnity)) {
if (!((conv_problem.stride_h == 1) && (conv_problem.stride_w == 1))) {
continue;
}
}
#if 0 // relax restrictions on analytic strided dgrad
// CUTLASS DGRAD's *strided* specialization only support stride >= {2, 2}
if ((ImplicitGemm::kConvolutionalOperator == cutlass::conv::Operator::kDgrad ||
ImplicitGemm::kConvolutionalOperator == cutlass::conv::Operator::kDeconv) &&
(ImplicitGemm::UnderlyingKernel::Mma::IteratorA::kStrideSupport ==
cutlass::conv::StrideSupport::kStrided)) {
if (((conv_problem.stride_h == 1) && (conv_problem.stride_w == 1))) {
continue;
}
}
#endif
//
// Test
//
// push back tested problem size to avoid re-running duplicates
conv_tested_sizes.push_back(conv_problem);
// test mode = xcross
passed = testbed.run(
conv_problem,
cutlass::conv::SplitKMode::kSerial);
if (!passed) {
return false;
}
// test mode = convolution
passed = testbed.run(
conv_problem.reset_mode(cutlass::conv::Mode::kConvolution),
cutlass::conv::SplitKMode::kSerial);
if (!passed) {
return false;
}
}
}
// CUTLASS DGRAD's *strided* specialization does not support split-k mode
if ((ImplicitGemm::kConvolutionalOperator == cutlass::conv::Operator::kDgrad ||
ImplicitGemm::kConvolutionalOperator == cutlass::conv::Operator::kDeconv) &&
(ImplicitGemm::UnderlyingKernel::Mma::IteratorA::kStrideSupport ==
cutlass::conv::StrideSupport::kStrided)) {
passed = testbed.run(
cutlass::conv::Conv2dProblemSize(
{1, 56, 56, 8}, // input size (NHWC)
{8, 1, 1, 8}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{2, 2}, // stride (stride_h, stride_w)
{1, 1}), // dilation (dilation_h, dilation_w)
cutlass::conv::SplitKMode::kSerial,
cutlass::from_real<typename ImplicitGemm::ElementCompute>(2.0),
cutlass::from_real<typename ImplicitGemm::ElementCompute>(2.0));
if (!passed) {
return false;
}
return passed;
}
if (!TestSplitK)
return passed;
// Sweep split-k-slice using serial and prallel reduction with non-unity alpha and non-zero beta for
// a single conv2d problem size. Convolution unit tests take a long time to run so only sweep parameters
// which are abolutely necessary to catch functional bugs. The below code does provide option to sweep
// alpha and beta for local testing, but only runs one value for alpha and beta.
cutlass::conv::Conv2dProblemSize conv2d_split_k_test_size (
{1, 17, 11, 288}, // input size (NHWC)
{160, 3, 3, 288}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
);
cutlass::conv::SplitKMode split_k_modes [] = {
cutlass::conv::SplitKMode::kSerial
};
int split_k_slices[] = {
1, 2, 3, 4, 201
};
double problem_alpha[] = {
2.0
};
double problem_beta[] = {
2.0
};
for (auto split_k_mode : split_k_modes) {
for (auto split_k_slice : split_k_slices) {
for (auto alpha : problem_alpha) {
for (auto beta : problem_beta) {
passed = testbed.run(
conv2d_split_k_test_size.reset_split_k_slices(split_k_slice),
split_k_mode,
cutlass::from_real<typename ImplicitGemm::ElementCompute>(alpha),
cutlass::from_real<typename ImplicitGemm::ElementCompute>(beta));
if (!passed) {
return false;
}
}
}
}
}
return passed;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace conv
} // namespace test
| test/unit/conv/device/conv2d_with_broadcast_testbed.h/0 | {
"file_path": "test/unit/conv/device/conv2d_with_broadcast_testbed.h",
"repo_id": "test",
"token_count": 10107
} | 47 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Implicit GEMM testbed for 3.x API
*/
#pragma once
#include "cutlass/cutlass.h"
#include "../../common/cutlass_unit_test.h"
#include "cute/tensor.hpp"
#include "cutlass/kernel_hardware_info.hpp"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/convnd_problem_shape.hpp"
#include "thrust/universal_vector.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/packed_stride.hpp"
#include "cutlass/util/reference/host/conv.hpp"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/reference/device/tensor_fill.h"
#include "cutlass/util/reference/device/tensor_compare.h"
#include "conv_problem_sizes.hpp"
#include "../cache_testbed_output.h"
#include <iostream>
#include "cute/layout.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test::conv::device {
/////////////////////////////////////////////////////////////////////////////////////////////////
// Initializes a flat device buffer
template <typename Element>
static void
initialize_values(
thrust::universal_vector<Element>& dst_ptr,
cutlass::Distribution::Kind dist_kind,
uint64_t seed) {
if (cutlass::Distribution::Uniform == dist_kind) {
int scope;
int bits = cutlass::sizeof_bits<Element>::value;
if (bits <= 8) {
scope = 2;
}
else if (bits == 16) {
scope = 4;
}
else {
scope = 8;
}
cutlass::reference::host::BlockFillRandomUniform(
dst_ptr.data().get(), dst_ptr.size(), seed, scope, -scope, 0);
}
else if (cutlass::Distribution::Identity == dist_kind) {
cutlass::reference::host::BlockFillRandomUniform(
dst_ptr.data().get(), dst_ptr.size(), seed, 0, 0, 0);
}
else if (cutlass::Distribution::Gaussian == dist_kind) {
cutlass::reference::host::BlockFillRandomGaussian(dst_ptr.data().get(), dst_ptr.size(), seed, 0, 0.5);
}
else if (cutlass::Distribution::Sequential == dist_kind) {
cutlass::reference::host::BlockFillSequential(dst_ptr.data().get(), dst_ptr.size());
}
else {
std::cerr << "Invalid distribution kind!\n.";
exit(1);
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <class Conv>
struct ConvTestbed {
// Kernel data types
using ElementA = typename Conv::ConvKernel::ElementA;
using ElementB = typename Conv::ConvKernel::ElementB;
using ElementC = cute::conditional_t<cute::is_void_v<typename Conv::ConvKernel::ElementC>,
typename Conv::ConvKernel::ElementD, typename Conv::ConvKernel::ElementC>;
using ElementD = typename Conv::ConvKernel::ElementD;
using ElementAccumulator = typename Conv::ConvKernel::ElementAccumulator;
//
// FusionOperation derived types/queries
//
using FusionOp = typename Conv::EpilogueOutputOp;
// fusion types are potentially void if the fusion is not supported
// helper so we don't try to construct HostTensor with void type
template <typename T, typename U = uint8_t>
using non_void_t = cute::conditional_t<cute::is_void_v<T>, U, T>;
using ElementScalar = typename FusionOp::ElementScalar;
using ElementCompute = typename FusionOp::ElementCompute;
using BiasType = typename cutlass::epilogue::collective::detail::IsThreadEpilogueOpWithBias<FusionOp>::type;
using ElementBias = non_void_t<BiasType>;
using ActivationType = non_void_t<typename cutlass::epilogue::collective::detail::IsThreadEpilogueOpWithActivation<FusionOp>::type,
cutlass::epilogue::thread::Identity<ElementCompute>>;
static constexpr bool IsActivationEnabled = cutlass::epilogue::collective::detail::IsThreadEpilogueOpWithActivation<FusionOp>::value;
using ActivationFunctor = cute::conditional_t<IsActivationEnabled, ActivationType, cutlass::epilogue::thread::Identity<ElementCompute>>;
static constexpr bool IsBiasEnabled = cutlass::epilogue::collective::detail::IsThreadEpilogueOpWithBias<FusionOp>::value &&
!cute::is_same_v<BiasType, void>;
using StrideC = typename Conv::ConvKernel::StrideC;
using StrideD = typename Conv::ConvKernel::StrideD;
using ThreadEpilogueOp = typename Conv::ConvKernel::CollectiveEpilogue::ThreadEpilogueOp;
static constexpr cutlass::conv::Operator ConvOp = Conv::DispatchPolicy::ConvOp;
static constexpr int NumSpatialDimensions = Conv::NumSpatialDimensions;
using ProblemShape = cutlass::conv::ConvProblemShape<ConvOp, NumSpatialDimensions>;
using Schedule = typename Conv::DispatchPolicy::Schedule;
/// Initialization
cutlass::Distribution::Kind init_A = cutlass::Distribution::Uniform;
cutlass::Distribution::Kind init_B = cutlass::Distribution::Uniform;
cutlass::Distribution::Kind init_C = cutlass::Distribution::Uniform;
cutlass::Distribution::Kind init_bias = cutlass::Distribution::Uniform;
uint64_t seed = 6090;
float epsilon = 0.0f;
int split_p_slices = 1;
thrust::universal_vector<ElementA> tensor_A;
thrust::universal_vector<ElementB> tensor_B;
thrust::universal_vector<ElementC> tensor_C;
thrust::universal_vector<ElementD> tensor_D_computed;
thrust::universal_vector<ElementD> tensor_D_reference;
thrust::universal_vector<ElementBias> tensor_bias;
thrust::universal_vector<ElementScalar> tensor_alpha;
thrust::universal_vector<ElementScalar> tensor_beta;
void initialize(ProblemShape const& problem_shape, uint64_t seed = 6090) {
tensor_A.resize(sizeof(ElementA) * problem_shape.size_A());
tensor_B.resize(sizeof(ElementB) * problem_shape.size_B());
tensor_C.resize(sizeof(ElementC) * problem_shape.size_C());
tensor_D_computed.resize(sizeof(ElementD) * problem_shape.size_C());
tensor_D_reference.resize(sizeof(ElementD) * problem_shape.size_C());
tensor_bias.resize(sizeof(ElementBias) * cute::size(cute::get<0>(problem_shape.get_shape_B())));
initialize_values(tensor_A, init_A, seed);
initialize_values(tensor_B, init_B, seed * 11);
initialize_values(tensor_C, init_C, seed * 17);
initialize_values(tensor_bias, init_bias, seed * 19);
}
// Determine SMEM requirements and waive if not satisfied
bool sufficient() const {
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
int max_smem_size;
result = cudaDeviceGetAttribute(&max_smem_size, cudaDevAttrMaxSharedMemoryPerBlockOptin, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaDeviceGetAttribute() failed");
}
return max_smem_size >= Conv::ConvKernel::SharedStorageSize;
}
/// Executes one test
bool run(
ProblemShape const& problem_shape,
ElementScalar alpha = ElementScalar(1),
ElementScalar beta = ElementScalar(0)
) {
// Waive test if insufficient CUDA device
if (!sufficient()) {
if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) {
std::cerr << "Test waived due to insufficient CUDA device.\n";
}
return true;
}
initialize(problem_shape);
cutlass::KernelHardwareInfo hw_info;
cudaGetDevice(&hw_info.device_id);
hw_info.sm_count = cutlass::KernelHardwareInfo::query_device_multiprocessor_count(hw_info.device_id);
// configure the operator
Conv conv_op;
auto stride_C = StrideC{};
auto stride_D = StrideD{};
if constexpr (ConvOp == cutlass::conv::Operator::kWgrad) {
stride_C = cutlass::make_cute_packed_stride(
StrideC{}, problem_shape.shape_C, problem_shape.stride_C, ConvOp);
stride_D = cutlass::make_cute_packed_stride(
StrideD{}, problem_shape.shape_C, problem_shape.stride_C, ConvOp);
}
// Need to support non-packed output strides for fprop and dgrad kernel.
else {
cute::for_each(cute::make_seq<cute::rank<0>(StrideC{})>{}, [&](auto i) {
cute::get<0, i>(stride_C) = problem_shape.stride_C[ProblemShape::RankT-2-i];
});
cute::for_each(cute::make_seq<cute::rank<0>(StrideD{})>{}, [&](auto i) {
cute::get<0, i>(stride_D) = problem_shape.stride_C[ProblemShape::RankT-2-i];
});
}
typename Conv::ConvKernel::TileScheduler::Arguments scheduler_args{};
auto args = typename Conv::Arguments {
{
problem_shape,
tensor_A.data().get(),
tensor_B.data().get(),
}, // MainloopArguments
{
{},
tensor_C.data().get(),
stride_C,
tensor_D_computed.data().get(),
stride_D,
}, // EpilogueArguments
hw_info,
scheduler_args
};
auto &fusion_args = args.epilogue.thread;
fusion_args.alpha = alpha;
fusion_args.beta = beta;
if constexpr (IsBiasEnabled) {
fusion_args.bias_ptr = tensor_bias.data().get();
}
// Clamp bound
if constexpr (cute::is_same_v<ActivationFunctor, cutlass::epilogue::thread::Clamp<ElementCompute>>) {
fusion_args.activation.lower_bound = CUTLASS_STL_NAMESPACE::numeric_limits<ElementCompute>::lowest();
fusion_args.activation.upper_bound = CUTLASS_STL_NAMESPACE::numeric_limits<ElementCompute>::max();
}
// Scale
if constexpr (cute::is_same_v<ActivationFunctor, cutlass::epilogue::thread::ScaledGELU_taylor<ElementCompute>> ||
cute::is_same_v<ActivationFunctor, cutlass::epilogue::thread::ScaledGELU<ElementCompute>>) {
fusion_args.activation.scale = ElementCompute{1};
}
cutlass::Status status = cutlass::Status::kInvalid;
status = conv_op.can_implement(args);
EXPECT_EQ(conv_op.can_implement(args), cutlass::Status::kSuccess);
if (status != cutlass::Status::kSuccess) {
std::cerr << "can_implement failed for the given problem_shape: \n";
print(problem_shape);
return false;
}
// find workspace requirement for parallel split-k reduction
size_t workspace_size = Conv::get_workspace_size(args);
thrust::universal_vector<uint8_t> workspace(workspace_size);
status = conv_op.initialize(args, workspace.data().get());
if (status != cutlass::Status::kSuccess) {
cudaError_t error = cudaGetLastError();
std::cerr << "This test is not supported: " << cudaGetErrorString(error) << "\n";
return true;
}
// run conv3d operator
status = conv_op();
EXPECT_TRUE(status == cutlass::Status::kSuccess);
if (status != cutlass::Status::kSuccess) {
return false;
}
bool passed = false;
cudaError_t result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess) << " Kernel execution error: "
<< cudaGetErrorString(result);
// Create cute::Tensors using the logical rank-3 MNK multi-mode shapes the mainloop gives us
auto shape_mA = cute::reverse(problem_shape.shape_A);
auto shape_mB = cute::reverse(problem_shape.shape_B);
auto shape_mC = cute::reverse(problem_shape.shape_C);
auto shape_mBias = cute::make_shape(cute::size(cute::get<0>(problem_shape.get_shape_B())));
auto stride_mA = cute::reverse(problem_shape.stride_A);
auto stride_mB = cute::reverse(problem_shape.stride_B);
auto stride_mC = cute::reverse(problem_shape.stride_C);
auto mA = make_tensor(tensor_A.data().get(), make_layout(shape_mA, stride_mA));
auto mB = make_tensor(tensor_B.data().get(), make_layout(shape_mB, stride_mB));
auto mC = make_tensor(tensor_C.data().get(), make_layout(shape_mC, stride_mC));
auto mD_ref = make_tensor(tensor_D_reference.data().get(), make_layout(shape_mC, stride_mC));
auto mD_computed = make_tensor(tensor_D_computed.data().get(), make_layout(shape_mC, stride_mC));
auto mBias = make_tensor(tensor_bias.data().get(), make_layout(shape_mBias));
auto mAlpha = make_tensor(tensor_alpha.data().get(), make_layout(shape_mBias));
auto mBeta = make_tensor(tensor_beta.data().get(), make_layout(shape_mBias));
cutlass::reference::host::ConvEpilogueFusionParams<
ElementAccumulator,
ElementScalar,
ElementCompute,
ElementC,
ElementD,
decltype(mAlpha),
decltype(mBeta),
decltype(mBias),
ActivationFunctor>
epilogue_fusion_params{};
epilogue_fusion_params.alpha = alpha;
epilogue_fusion_params.beta = beta;
if constexpr (IsBiasEnabled) {
epilogue_fusion_params.tensor_bias = mBias;
}
auto padding = cute::reverse(problem_shape.lower_padding);
auto tstride = cute::reverse(problem_shape.traversal_stride);
auto dilation = cute::reverse(problem_shape.dilation);
cutlass::reference::host::ConvReferenceImpl<
ConvOp,
NumSpatialDimensions,
decltype(mA),
decltype(mB),
decltype(mC),
decltype(mD_ref),
decltype(padding),
decltype(tstride),
decltype(dilation),
decltype(epilogue_fusion_params)>
reference_impl(mA, mB, mC, mD_ref, padding, tstride, dilation, epilogue_fusion_params);
//
// Reference check - support caching results
//
CachedTestKey cached_test_key = CreateCachedConvNd3xTestKey<
ProblemShape,
ElementA,
ElementB,
ElementC,
ElementD
>(
ConvOp,
problem_shape,
alpha,
beta,
tensor_A,
tensor_B,
tensor_C
);
//
// Look for the cached key
//
bool cached_result_loaded = false;
CachedTestResult cached_test_result;
std::string convnd_result_cache_name =
std::string("cached_results_") + CUTLASS_TARGET_NAME + ".txt";
#if (CUTLASS_TEST_ENABLE_CACHED_RESULTS)
CachedTestResultListing cached_results(convnd_result_cache_name);
auto cached = cached_results.find(cached_test_key);
cached_result_loaded = cached.first;
if (cached_result_loaded) {
cached_test_result = cached.second;
}
#endif
if (!cached_result_loaded) {
// Compute reference
reference_impl.compute_reference();
#if (CUTLASS_TEST_ENABLE_CACHED_RESULTS)
cached_test_result.D = TensorHash(tensor_D_reference);
CachedTestResultListing cached_results(convnd_result_cache_name);
cached_results.append(cached_test_key, cached_test_result);
cached_results.write(convnd_result_cache_name);
#endif
} // if (!cached_result_loaded)
#if (CUTLASS_TEST_ENABLE_CACHED_RESULTS)
uint32_t tensor_D_computed_hash = TensorHash(tensor_D_computed);
passed = (tensor_D_computed_hash == cached_test_result.D);
// If hash fails, double check against reference implementation.
if(!passed) {
std::cerr << "Hash-based comparison unsuccessful for key:" << "\n" << cached_test_key
<< ", comparing with reference implementation now.\n";
if (cached_result_loaded) {
// Compute reference
reference_impl.compute_reference();
}
// Validate kernel against reference
passed = compare_reference(mD_ref, mD_computed, mA, mB, mAlpha, mBeta, mBias, this->epsilon);
}
#else
// Validate kernel against reference
passed = compare_reference(mD_ref, mD_computed, mA, mB, mAlpha, mBeta, mBias, this->epsilon);
#endif
EXPECT_TRUE(passed);
return passed;
}
template<
class Engine, class Layout,
class EngineA, class LayoutA,
class EngineB, class LayoutB,
class EngineAlpha, class LayoutAlpha,
class EngineBeta, class LayoutBeta,
class EngineBias, class LayoutBias>
static constexpr bool
compare_reference(
cute::Tensor<Engine, Layout> const& reference,
cute::Tensor<Engine, Layout> const& computed,
cute::Tensor<EngineA, LayoutA> const& A,
cute::Tensor<EngineB, LayoutB> const& B,
cute::Tensor<EngineAlpha, LayoutAlpha> const& tensor_alpha,
cute::Tensor<EngineBeta, LayoutBeta> const& tensor_beta,
cute::Tensor<EngineBias, LayoutBias> const& tensor_bias,
float epsilon = 0.0f) {
if (size(reference) != size(computed)) {
return false;
}
bool passed = true;
if (epsilon == 0.0f) {
// fast refcheck w/o epsilon
for (size_t i = 0; i < size_t(size(reference)); ++i) {
if (reference(i) != computed(i)) {
passed = false;
break;
}
}
} else {
// refcheck with epsilon
for (size_t i = 0; i < size_t(size(reference)); ++i) {
auto ref = static_cast<float>(reference(i));
auto act = static_cast<float>(computed(i));
auto abs_error = std::abs(act - ref);
auto rel_error = abs_error / (std::max(std::abs(act), std::abs(ref)) + 0.00001f);
if (std::isnan(abs_error) || std::isnan(rel_error) ||
std::min(abs_error, rel_error) > epsilon) {
passed = false;
break;
}
}
}
#if CUTLASS_DEBUG_TRACE_LEVEL > 1
if (not passed) {
cute::print("Reference:");
cute::print_tensor(reference);
cute::print("\nComputed:");
cute::print_tensor(computed);
cute::print("\n");
for (size_t i = 0; i < size_t(size(A)); ++i) {
printf("[%ld]: A = %f\n", i, float(A(i)));
}
for (size_t i = 0; i < size_t(size(B)); ++i) {
printf("[%ld]: B = %f\n", i, float(B(i)));
}
if constexpr (IsBiasEnabled) {
for (size_t i = 0; i < size_t(size(tensor_bias)); ++i) {
printf("[%ld]: bias = %f\n", i, float(tensor_bias(i)));
}
}
for (size_t i = 0; i < size_t(size(reference)); ++i) {
printf("[%ld]: ref = %f, computed = %f\n", i, float(reference(i)), float(computed(i)));
}
}
#endif
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Conv>
bool TestAllConv(double alpha = 1.0, double beta = 0.0, float epsilon = 0.0f) {
using ElementScalar = typename Conv::EpilogueOutputOp::ElementScalar;
bool passed = true;
ConvTestbed<Conv> testbed;
testbed.epsilon = epsilon;
auto problem_vector = get_conv_problem_vector<
Conv::NumSpatialDimensions, Conv::DispatchPolicy::ConvOp>();
for (auto conv_problem : problem_vector) {
#if CUTLASS_DEBUG_TRACE_LEVEL > 0
print(conv_problem);
#endif
passed = testbed.run(
conv_problem,
cutlass::from_real<ElementScalar>(alpha),
cutlass::from_real<ElementScalar>(beta));
if (!passed) {
printf("Failed test for "); print(conv_problem);
return false;
}
}
return passed;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace test::conv::device
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/conv/device_3x/testbed_conv.hpp/0 | {
"file_path": "test/unit/conv/device_3x/testbed_conv.hpp",
"repo_id": "test",
"token_count": 8056
} | 48 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*!
\file
\brief Unit tests for the small matrix class.
*/
#include <iostream>
#include "../common/cutlass_unit_test.h"
#include "cutlass/matrix.h"
#include "cutlass/core_io.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Matrix, elementwise_add) {
using Matrix4x4 = cutlass::Matrix4x4<float>;
Matrix4x4 A = {
1, 2, 3, 4,
5, 6, 7, 8,
9, 10, 11, 12,
13, 14, 15, 16
};
Matrix4x4 B = A.transpose();
Matrix4x4 C = A.add(B * 2.125f);
bool passed = true;
for (int i = 0; i < 4; ++i) {
for (int j = 0; j < 4; ++j) {
float got = C.at(i, j);
float expected = A.at(i, j) + A.at(j, i) * 2.125f;
if (got != expected) {
passed = false;
}
}
}
EXPECT_TRUE(passed);
if (!passed) {
std::cout << "A:\n" << A << "\n\nB:\n" << B << "\n\nC:\n" << C << std::endl;
}
}
TEST(Matrix, elementwise_multiply) {
using Matrix4x4 = cutlass::Matrix4x4<float>;
Matrix4x4 A = {
1, 2, 3, 4,
5, 6, 7, 8,
9, 10, 11, 12,
13, 14, 15, 16
};
Matrix4x4 B = A.transpose();
Matrix4x4 C = A.multiply(B);
bool passed = true;
for (int i = 0; i < 4; ++i) {
for (int j = 0; j < 4; ++j) {
float got = C.at(i, j);
float expected = A.at(i, j) * A.at(j, i);
if (got != expected) {
passed = false;
}
}
}
EXPECT_TRUE(passed);
if (!passed) {
std::cout << "A:\n" << A << "\n\nB:\n" << B << "\n\nC:\n" << C << std::endl;
}
}
TEST(Matrix, product_4x4_overloads) {
using Matrix4x4 = cutlass::Matrix4x4<float>;
Matrix4x4 A = {
1, 2, 3, 4,
5, 6, 7, 8,
9, 10, 11, 12,
13, 14, 15, 16
};
Matrix4x4 B = {
-1, -2, 0, 4,
1, 2, 1, 1,
3, 2, 1, 1,
1, 0, 8, 2
};
Matrix4x4 C = Matrix4x4::identity();
Matrix4x4 D = A * B + C;
bool passed = true;
for (int i = 0; i < 4; ++i) {
for (int j = 0; j < 4; ++j) {
float got = D.at(i, j);
float expected = (i == j ? 1.0f : 0);
for (int k = 0; k < 4; ++k) {
expected += A.at(i, k) * B.at(k, j);
}
if (got != expected) {
passed = false;
}
}
}
EXPECT_TRUE(passed);
if (!passed) {
std::cout << "A:\n" << A << "\n\nB:\n" << B << "\n\nC:\n" << C << "\n\nD:\n" << D << std::endl;
}
}
TEST(Matrix, product_4x4) {
using Matrix4x4 = cutlass::Matrix4x4<float>;
Matrix4x4 A = {
1, 2, 3, 4,
5, 6, 7, 8,
9, 10, 11, 12,
13, 14, 15, 16
};
Matrix4x4 B = {
-1, -2, 0, 4,
1, 2, 1, 1,
3, 2, 1, 1,
1, 0, 8, 2
};
Matrix4x4 C = Matrix4x4::identity();
// Compute product with optional source accumulator
Matrix4x4 D = A.product(B, C);
bool passed = true;
for (int i = 0; i < 4; ++i) {
for (int j = 0; j < 4; ++j) {
float got = D.at(i, j);
float expected = (i == j ? 1.0f : 0.0f);
for (int k = 0; k < 4; ++k) {
expected += A.at(i, k) * B.at(k, j);
}
if (got != expected) {
passed = false;
}
}
}
EXPECT_TRUE(passed);
if (!passed) {
std::cout << "A:\n" << A << "\n\nB:\n" << B << "\n\nC:\n" << C << "\n\nD:\n" << D << std::endl;
}
for (int i = 0; i < 4; ++i) {
for (int j = 0; j < 4; ++j) {
float c = (i == j ? 1.0f : 0.0f);
EXPECT_TRUE(A.row(i).dot(B.column(j)) + c == D.at(i, j));
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/core/matrix.cu/0 | {
"file_path": "test/unit/core/matrix.cu",
"repo_id": "test",
"token_count": 2184
} | 49 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cutlass_unit_test.h"
#include <iostream>
#include <iomanip>
#include <utility>
#include <type_traits>
#include <vector>
#include <numeric>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <cute/tensor.hpp>
using namespace cute;
template <class ElementType, class SmemLayout>
struct SharedStorage
{
cute::ArrayEngine<ElementType, cute::cosize_v<SmemLayout>> smem;
};
template <class T, class TiledCopy, class GmemLayout, class SmemLayout>
__global__ void
test_tiled_cp_async_device_cute(T const* g_in, T* g_out,
TiledCopy const tiled_copy,
GmemLayout gmem_layout, SmemLayout smem_layout)
{
using namespace cute;
extern __shared__ char shared_memory[];
using SharedStorage = SharedStorage<T, SmemLayout>;
SharedStorage& shared_storage = *reinterpret_cast<SharedStorage*>(shared_memory);
auto thr_copy = tiled_copy.get_slice(threadIdx.x);
Tensor gA = make_tensor(make_gmem_ptr(g_in), gmem_layout);
Tensor gB = make_tensor(make_gmem_ptr(g_out), gmem_layout);
// Construct SMEM tensor
Tensor sA = make_tensor(make_smem_ptr(shared_storage.smem.begin()), smem_layout);
auto tAgA = thr_copy.partition_S(gA);
auto tAsA = thr_copy.partition_D(sA);
#if 0
if (thread0()) {
print("gA : "); print(gA.layout()); print("\n");
print("sA : "); print(sA.layout()); print("\n");
print("tAgA: "); print(tAgA.layout()); print("\n");
print("tAsA: "); print(tAsA.layout()); print("\n");
}
#endif
copy(tiled_copy, tAgA, tAsA);
cp_async_fence();
cp_async_wait<0>();
__syncthreads();
// Store trivially smem -> gmem
if (thread0()) {
copy(sA, gB);
}
}
template <class T, class TiledCopy, class GMEM_Layout, class SMEM_Layout>
void
test_tiled_cp_async(
TiledCopy const tiled_copy,
GMEM_Layout const& gmem_layout,
SMEM_Layout const& smem_layout)
{
using namespace cute;
// Allocate and initialize host test data
size_t N = ceil_div(cosize(gmem_layout) * sizeof_bits<T>::value, 8);
thrust::host_vector<T> h_in(N);
Tensor hA_in = make_tensor(recast_ptr<T>(h_in.data()), gmem_layout);
for (int i = 0; i < size(hA_in); ++i) { hA_in(i) = static_cast<T>(i % 13); }
// Allocate and initialize device test data
thrust::device_vector<T> d_in = h_in;
thrust::device_vector<T> d_out(h_in.size(), T(-1));
// Launch
int smem_size = int(sizeof(SharedStorage<T, decltype(smem_layout)>));
test_tiled_cp_async_device_cute<<<1, 128, smem_size>>>(
reinterpret_cast<T const*>(raw_pointer_cast(d_in.data())),
reinterpret_cast<T*> (raw_pointer_cast(d_out.data())),
tiled_copy,
gmem_layout,
smem_layout);
// Copy results back to host
thrust::host_vector<T> h_out = d_out;
Tensor hA_out = make_tensor(recast_ptr<T>(h_out.data()), gmem_layout);
// Validate the results. Print only the first 3 errors.
int count = 3;
for (int i = 0; i < size(hA_out) && count > 0; ++i) {
EXPECT_EQ(hA_in(i), hA_out(i));
if (hA_in(i) != hA_out(i)) {
--count;
}
}
}
template <typename T, typename M, typename N, typename GMEM_STRIDE_TYPE, typename SMEM_LAYOUT, typename TILED_COPY>
void test_cp_async_no_swizzle() {
using namespace cute;
auto smem_atom = SMEM_LAYOUT{};
auto smem_layout = tile_to_shape(smem_atom, Shape<M, N>{});
auto gmem_layout = make_layout(make_shape(M{}, N{}), GMEM_STRIDE_TYPE{});
test_tiled_cp_async<T>(TILED_COPY{}, gmem_layout, smem_layout);
}
template <typename T, typename M, typename N, typename GMEM_STRIDE_TYPE, typename SWIZZLE_ATOM, typename SMEM_LAYOUT, typename TILED_COPY>
void test_cp_async_with_swizzle() {
using namespace cute;
auto swizzle_atom = SWIZZLE_ATOM{};
auto smem_atom = composition(swizzle_atom, SMEM_LAYOUT{});
auto smem_layout = tile_to_shape(smem_atom, Shape<M, N>{});
auto gmem_layout = make_layout(make_shape(M{}, N{}), GMEM_STRIDE_TYPE{});
test_tiled_cp_async<T>(TILED_COPY{}, gmem_layout, smem_layout);
}
| test/unit/cute/ampere/tiled_cp_async_testbed.hpp/0 | {
"file_path": "test/unit/cute/ampere/tiled_cp_async_testbed.hpp",
"repo_id": "test",
"token_count": 2168
} | 50 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cutlass_unit_test.h"
#include "../hopper/tma_store_testbed.hpp"
using namespace cute;
using namespace cutlass::test;
#if CUDA_12_0_SM90_FEATURES_SUPPORTED
template <class T, class TmaType = T, class GMEM_Layout, class SMEM_Layout, class CTA_Tile>
void
test_tma_store(GMEM_Layout const& gmem_layout,
SMEM_Layout const& smem_layout,
CTA_Tile const& cta_tile)
{
using namespace cute;
return test_tma_store<T, TmaType>(SM90_TMA_STORE{}, gmem_layout, smem_layout, cta_tile);
}
template <class T, class TmaType = T, class GMEM_Layout, class SMEM_Layout>
void
test_tma_store(GMEM_Layout const& gmem_layout,
SMEM_Layout const& smem_layout)
{
using namespace cute;
return test_tma_store<T, TmaType>(gmem_layout, smem_layout, product_each(shape(smem_layout)));
}
TEST(SM90_CuTe_Hopper, Tma_Load_1D)
{
Layout smem_layout = Layout<_256, _1>{};
{
Layout gmem_layout = smem_layout;
test_tma_store<int8_t>(gmem_layout, smem_layout);
test_tma_store<half_t>(gmem_layout, smem_layout);
test_tma_store< float>(gmem_layout, smem_layout);
test_tma_store<double>(gmem_layout, smem_layout);
}
{
Layout gmem_layout = make_layout(128, GenColMajor{});
test_tma_store<int8_t>(gmem_layout, smem_layout);
test_tma_store<half_t>(gmem_layout, smem_layout);
test_tma_store< float>(gmem_layout, smem_layout);
test_tma_store<double>(gmem_layout, smem_layout);
}
}
TEST(SM90_CuTe_Hopper, Tma_Store_32x32_Col)
{
Layout smem_layout = Layout<Shape<_32,_32>, Stride<_1,_32>>{};
{
Layout gmem_layout = smem_layout;
test_tma_store<int8_t>(gmem_layout, smem_layout);
test_tma_store<half_t>(gmem_layout, smem_layout);
test_tma_store< float>(gmem_layout, smem_layout);
test_tma_store<double>(gmem_layout, smem_layout);
}
{
Layout gmem_layout = make_layout(make_shape(32,32), GenColMajor{});
test_tma_store<int8_t>(gmem_layout, smem_layout);
test_tma_store<half_t>(gmem_layout, smem_layout);
test_tma_store< float>(gmem_layout, smem_layout);
test_tma_store<double>(gmem_layout, smem_layout);
}
{
Layout gmem_layout = make_layout(make_shape(32,32), make_stride(Int<1>{}, 1024));
test_tma_store<int8_t>(gmem_layout, smem_layout);
test_tma_store<half_t>(gmem_layout, smem_layout);
test_tma_store< float>(gmem_layout, smem_layout);
test_tma_store<double>(gmem_layout, smem_layout);
}
}
TEST(SM90_CuTe_Hopper, Tma_Store_32x32_Row)
{
Layout smem_layout = Layout<Shape<_32,_32>, Stride<_32,_1>>{};
{
Layout gmem_layout = smem_layout;
test_tma_store<int8_t>(gmem_layout, smem_layout);
test_tma_store<half_t>(gmem_layout, smem_layout);
test_tma_store< float>(gmem_layout, smem_layout);
test_tma_store<double>(gmem_layout, smem_layout);
}
{
Layout gmem_layout = make_layout(make_shape(32,32), GenRowMajor{});
test_tma_store<int8_t>(gmem_layout, smem_layout);
test_tma_store<half_t>(gmem_layout, smem_layout);
test_tma_store< float>(gmem_layout, smem_layout);
test_tma_store<double>(gmem_layout, smem_layout);
}
{
Layout gmem_layout = make_layout(make_shape(32,32), make_stride(1024, Int<1>{}));
test_tma_store<int8_t>(gmem_layout, smem_layout);
test_tma_store<half_t>(gmem_layout, smem_layout);
test_tma_store< float>(gmem_layout, smem_layout);
test_tma_store<double>(gmem_layout, smem_layout);
}
}
template <class T, template <typename> typename SWIZZLE_ATOM>
void
test_tma_store_swizzle_atom_mn()
{
auto smem_layout = SWIZZLE_ATOM<T>{};
Layout gmem_layout = make_layout(make_shape(2*size<0>(smem_layout), 2*size<1>(smem_layout)), GenColMajor{});
return test_tma_store<T>(gmem_layout, smem_layout);
}
template <class T, template <typename> typename SWIZZLE_ATOM>
void
test_tma_store_swizzle_atom_k()
{
auto smem_layout = SWIZZLE_ATOM<T>{};
Layout gmem_layout = make_layout(make_shape(2*size<0>(smem_layout), 2*size<1>(smem_layout)), GenRowMajor{});
return test_tma_store<T>(gmem_layout, smem_layout);
}
TEST(SM90_CuTe_Hopper, Tma_Store_Swizzle_Atoms)
{
test_tma_store_swizzle_atom_mn<int8_t, GMMA::Layout_MN_SW128_Atom>();
test_tma_store_swizzle_atom_mn<half_t, GMMA::Layout_MN_SW128_Atom>();
test_tma_store_swizzle_atom_mn< float, GMMA::Layout_MN_SW128_Atom>();
test_tma_store_swizzle_atom_mn<double, GMMA::Layout_MN_SW128_Atom>();
test_tma_store_swizzle_atom_mn<int8_t, GMMA::Layout_MN_SW64_Atom>();
test_tma_store_swizzle_atom_mn<half_t, GMMA::Layout_MN_SW64_Atom>();
test_tma_store_swizzle_atom_mn< float, GMMA::Layout_MN_SW64_Atom>();
test_tma_store_swizzle_atom_mn<double, GMMA::Layout_MN_SW64_Atom>();
test_tma_store_swizzle_atom_mn<int8_t, GMMA::Layout_MN_SW32_Atom>();
test_tma_store_swizzle_atom_mn<half_t, GMMA::Layout_MN_SW32_Atom>();
test_tma_store_swizzle_atom_mn< float, GMMA::Layout_MN_SW32_Atom>();
test_tma_store_swizzle_atom_mn<double, GMMA::Layout_MN_SW32_Atom>();
test_tma_store_swizzle_atom_mn<int8_t, GMMA::Layout_MN_INTER_Atom>();
test_tma_store_swizzle_atom_mn<half_t, GMMA::Layout_MN_INTER_Atom>();
test_tma_store_swizzle_atom_mn< float, GMMA::Layout_MN_INTER_Atom>();
test_tma_store_swizzle_atom_mn<double, GMMA::Layout_MN_INTER_Atom>();
test_tma_store_swizzle_atom_k<int8_t, GMMA::Layout_K_SW128_Atom>();
test_tma_store_swizzle_atom_k<half_t, GMMA::Layout_K_SW128_Atom>();
test_tma_store_swizzle_atom_k< float, GMMA::Layout_K_SW128_Atom>();
test_tma_store_swizzle_atom_k<double, GMMA::Layout_K_SW128_Atom>();
test_tma_store_swizzle_atom_k<int8_t, GMMA::Layout_K_SW64_Atom>();
test_tma_store_swizzle_atom_k<half_t, GMMA::Layout_K_SW64_Atom>();
test_tma_store_swizzle_atom_k< float, GMMA::Layout_K_SW64_Atom>();
test_tma_store_swizzle_atom_k<double, GMMA::Layout_K_SW64_Atom>();
test_tma_store_swizzle_atom_k<int8_t, GMMA::Layout_K_SW32_Atom>();
test_tma_store_swizzle_atom_k<half_t, GMMA::Layout_K_SW32_Atom>();
test_tma_store_swizzle_atom_k< float, GMMA::Layout_K_SW32_Atom>();
test_tma_store_swizzle_atom_k<double, GMMA::Layout_K_SW32_Atom>();
test_tma_store_swizzle_atom_k<int8_t, GMMA::Layout_K_INTER_Atom>();
test_tma_store_swizzle_atom_k<half_t, GMMA::Layout_K_INTER_Atom>();
test_tma_store_swizzle_atom_k< float, GMMA::Layout_K_INTER_Atom>();
test_tma_store_swizzle_atom_k<double, GMMA::Layout_K_INTER_Atom>();
}
template <class T, template <typename> typename SWIZZLE_ATOM>
void
test_tma_store_swizzle_tile_mn()
{
auto smem_layout = tile_to_shape(SWIZZLE_ATOM<T>{}, Shape<_128,_128>{});
Layout gmem_layout = make_layout(make_shape(2*size<0>(smem_layout), 2*size<1>(smem_layout)), GenColMajor{});
return test_tma_store<T>(gmem_layout, smem_layout);
}
template <class T, template <typename> typename SWIZZLE_ATOM>
void
test_tma_store_swizzle_tile_k()
{
auto smem_layout = tile_to_shape(SWIZZLE_ATOM<T>{}, Shape<_128,_128>{});
Layout gmem_layout = make_layout(make_shape(2*size<0>(smem_layout), 2*size<1>(smem_layout)), GenRowMajor{});
return test_tma_store<T>(gmem_layout, smem_layout);
}
TEST(SM90_CuTe_Hopper, Tma_Store_Swizzle_Tiles)
{
// Other T-types use too much smem
test_tma_store_swizzle_tile_mn<int8_t, GMMA::Layout_MN_SW128_Atom>();
test_tma_store_swizzle_tile_mn<half_t, GMMA::Layout_MN_SW128_Atom>();
test_tma_store_swizzle_tile_mn<int8_t, GMMA::Layout_MN_SW64_Atom>();
test_tma_store_swizzle_tile_mn<half_t, GMMA::Layout_MN_SW64_Atom>();
test_tma_store_swizzle_tile_mn<int8_t, GMMA::Layout_MN_SW32_Atom>();
test_tma_store_swizzle_tile_mn<half_t, GMMA::Layout_MN_SW32_Atom>();
test_tma_store_swizzle_tile_mn<int8_t, GMMA::Layout_MN_INTER_Atom>();
test_tma_store_swizzle_tile_mn<half_t, GMMA::Layout_MN_INTER_Atom>();
test_tma_store_swizzle_tile_k<int8_t, GMMA::Layout_K_SW128_Atom>();
test_tma_store_swizzle_tile_k<half_t, GMMA::Layout_K_SW128_Atom>();
test_tma_store_swizzle_tile_k<int8_t, GMMA::Layout_K_SW64_Atom>();
test_tma_store_swizzle_tile_k<half_t, GMMA::Layout_K_SW64_Atom>();
test_tma_store_swizzle_tile_k<int8_t, GMMA::Layout_K_SW32_Atom>();
test_tma_store_swizzle_tile_k<half_t, GMMA::Layout_K_SW32_Atom>();
test_tma_store_swizzle_tile_k<int8_t, GMMA::Layout_K_INTER_Atom>();
test_tma_store_swizzle_tile_k<half_t, GMMA::Layout_K_INTER_Atom>();
}
// Tensor by-mode
TEST(SM90_CuTe_Hopper, Tma_Store_Tensor)
{
// 3-mode TMA
{
Layout gmem_layout = make_layout(make_shape(128, 64, 5));
auto cta_tile = Shape<_64, _32>{}; // GMEM Tiling:
// Take 64-elem from m
// Take 32-elem from k
auto smem_layout = make_layout(Shape<_64,_32>{});
test_tma_store<half_t>(gmem_layout, smem_layout, cta_tile);
}
// 4-mode TMA
{
Layout gmem_layout = make_layout(make_shape(make_shape(80,40),make_shape(32,12)));
auto cta_tile = Shape<Shape<_16,_8>,Shape<_32,_2>>{}; // GMEM Tiling:
// Take 16-elem from m0, 8-elem from m1,
// Take 32-elem from k0, 2-elem from k1
auto smem_layout = make_layout(Shape<_128,_64>{});
test_tma_store<half_t>(gmem_layout, smem_layout, cta_tile);
}
// 5-mode TMA
{
Layout gmem_layout = make_layout(make_shape(make_shape(32,32,32),make_shape(32,12)));
auto cta_tile = Shape<Shape<_16,_4,_2>,Shape<_16,_2>>{}; // GMEM Tiling:
// Take 4-elem from m0, 4-elem from m1, 5-elem from m2
// Take 32-elem from k0, 2-elem from k1
auto smem_layout = make_layout(Shape<_128,_32>{});
test_tma_store<half_t>(gmem_layout, smem_layout, cta_tile);
}
}
// Tensor Multimode -- TMA with more than 5 modes in GMEM (packs residual modes into last TMA mode)
TEST(SM90_CuTe_Hopper, Tma_Store_Tensor_Multimode)
{
{
Layout gmem_layout = make_layout(make_shape(make_shape(32,3,2,2),make_shape(32,4,2)));
auto cta_tile = Shape<Shape<_32>, Shape<_32,_2>>{}; // GMEM Tiling:
// Take 32-elem from m0
// Take 32-elem from k0, 2-elem from k1
auto smem_layout = make_layout(Shape<_32,_64>{});
test_tma_store<half_t>(gmem_layout, smem_layout, cta_tile);
}
{
Layout gmem_layout = make_layout(make_shape(make_shape(64,3,2,2),make_shape(32,4,2)));
auto cta_tile = Shape<Shape<_32,_3>, Shape<_32,_2>>{}; // GMEM Tiling:
// Take 32-elem from m0, 3-elem from m1
// Take 32-elem from k0, 2-elem from k1
auto smem_layout = make_layout(Shape<_96,_64>{});
test_tma_store<half_t>(gmem_layout, smem_layout, cta_tile);
}
{
Layout gmem_layout = make_layout(make_shape(make_shape(64,3,2,3,2),make_shape(32,4,2,2)));
auto cta_tile = Shape<Shape<_32>, Shape<_16,_2>>{}; // GMEM Tiling:
// Take 32-elem from m0
// Take 16-elem from k0, 2-elem from k1
auto smem_layout = make_layout(Shape<_32,_32>{});
test_tma_store<half_t>(gmem_layout, smem_layout, cta_tile);
}
}
#endif
| test/unit/cute/hopper/tma_store.cu/0 | {
"file_path": "test/unit/cute/hopper/tma_store.cu",
"repo_id": "test",
"token_count": 5975
} | 51 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cute/atom/mma_atom.hpp"
#include "cute/atom/copy_atom.hpp"
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/arch/arch.h"
#include "cutlass/arch/mma.h"
#include "cutlass/layout/layout.h"
#include "cutlass/gemm/dispatch_policy.hpp"
#include "cutlass/gemm/collective/collective_mma.hpp"
#include "cutlass/epilogue/collective/collective_builder.hpp"
#include "cutlass/epilogue/collective/default_epilogue.hpp"
#include "cutlass/epilogue/thread/linear_combination.h"
namespace cutlass {
namespace gemm {
namespace device {
using namespace cute;
// This type is only intended to demonstrate porting 2.x kernels to 3.0
template<
class OperatorClass, class ArchTag,
class ElementA, class LayoutA,
class ElementB, class LayoutB,
class ElementC, class LayoutC,
class ElementAccumulator>
struct DefaultGemmConfigurationToCutlass3Types {
static_assert(sizeof(ElementA) == 0, "No valid DefaultGemmConfigurationToCutlass3Types configuration exists.");
};
///////////////////////////////////////////////////////////////////////////////
namespace detail {
template <typename Element, typename Layout, int Alignment, int SizeK>
struct DefaultGemm_TensorOpSm80_OperandA;
template <typename Element, typename Layout, int Alignment, int SizeK>
struct DefaultGemm_TensorOpSm80_OperandB;
//
// F16: 128-by-128-by-64
//
/// Operand A - Row-major (K-Major)
template <>
struct DefaultGemm_TensorOpSm80_OperandA<half_t, layout::RowMajor, 8, 64>
{
// Smem
using SmemLayoutAtom = decltype(
composition(Swizzle<3,3,3>{},
Layout<Shape < _8,_64>,
Stride<_64, _1>>{}));
using SmemCopyAtom = Copy_Atom<SM75_U32x4_LDSM_N, half_t>;
// Gmem
using GmemTiledCopy = decltype(
make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<cute::uint128_t>, half_t>{},
Layout<Shape <_16,_8>,
Stride< _8,_1>>{},
Layout<Shape < _1,_8>>{}));
};
/// Operand A - Column-major (M-major)
template <int SizeK>
struct DefaultGemm_TensorOpSm80_OperandA<half_t, layout::ColumnMajor, 8, SizeK>
{
// Smem
using SmemLayoutAtom = decltype(
composition(Swizzle<3,3,3>{},
Layout<Shape <_64, _8>,
Stride< _1,_64>>{}));
using SmemCopyAtom = Copy_Atom<SM75_U16x8_LDSM_T, half_t>;
// Gmem
using GmemTiledCopy = decltype(
make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<cute::uint128_t>, half_t>{},
Layout<Shape <_16, _8>,
Stride< _1,_16>>{},
Layout<Shape < _8, _1>>{}));
};
// Because the F32F16 TiledMMA is A-B symmetric, we can reuse the DefaultOperands
// Operand B - Column-Major (K-major)
template <int Alignment, int SizeK>
struct DefaultGemm_TensorOpSm80_OperandB<half_t, layout::ColumnMajor, Alignment, SizeK>
: DefaultGemm_TensorOpSm80_OperandA<half_t, layout::RowMajor, Alignment, SizeK>
{};
// Operand B - Row-Major (N-major)
template <int Alignment, int SizeK>
struct DefaultGemm_TensorOpSm80_OperandB<half_t, layout::RowMajor, Alignment, SizeK>
: DefaultGemm_TensorOpSm80_OperandA<half_t, layout::ColumnMajor, Alignment, SizeK>
{};
//
// F16: 128-by-128-by-32 (small k-block)
//
/// Operand A - Row-major (K-Major)
template <>
struct DefaultGemm_TensorOpSm80_OperandA<half_t, layout::RowMajor, 8, 32>
{
// Smem
using SmemLayoutAtom = decltype(
composition(Swizzle<2,3,3>{},
Layout<Shape < _8,_32>,
Stride<_32, _1>>{}));
using SmemCopyAtom = Copy_Atom<SM75_U32x4_LDSM_N, half_t>;
// Gmem
using GmemTiledCopy = decltype(
make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<cute::uint128_t>, half_t>{},
Layout<Shape <_32,_4>,
Stride< _4,_1>>{},
Layout<Shape < _1,_8>>{}));
};
}
///////////////////////////////////////////////////////////////////////////////
// Ampere MMA F32F16
template <typename LayoutA, typename LayoutB, typename LayoutC>
struct DefaultGemmConfigurationToCutlass3Types<
arch::OpClassTensorOp, arch::Sm80,
half_t, LayoutA,
half_t, LayoutB,
float, LayoutC,
float>
{
using TileShape = Shape<_128, _128, _32>;
static constexpr int ThreadCount = 128;
using DispatchPolicy = MainloopSm80CpAsync<3>;
using TiledMma = TiledMMA<
MMA_Atom<SM80_16x8x16_F32F16F16F32_TN>,
Layout<Shape<_2,_2,_1>>, // 2x2x1 thread group
Tile<_32,_32,_16>>; // 32x32x16 MMA for LDSM, 1x2x1 value group
// A
static constexpr int kAlignmentA = 8;
using DefaultOperandA = detail::DefaultGemm_TensorOpSm80_OperandA<
half_t, LayoutA, kAlignmentA, 32>;
using SmemLayoutAtomA = typename DefaultOperandA::SmemLayoutAtom; // M, K
using SmemCopyAtomA = typename DefaultOperandA::SmemCopyAtom;
using GmemTiledCopyA = typename DefaultOperandA::GmemTiledCopy;
// B
static constexpr int kAlignmentB = 8;
using DefaultOperandB = detail::DefaultGemm_TensorOpSm80_OperandB<
half_t, LayoutB, kAlignmentB, 32>;
using SmemLayoutAtomB = typename DefaultOperandB::SmemLayoutAtom; // N, K
using SmemCopyAtomB = typename DefaultOperandB::SmemCopyAtom;
using GmemTiledCopyB = typename DefaultOperandB::GmemTiledCopy;
// Mainloop
using CollectiveMainloop = collective::CollectiveMma<
DispatchPolicy, TileShape,
half_t, TagToStrideA_t<LayoutA>,
half_t, TagToStrideB_t<LayoutB>,
TiledMma,
GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A
GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B
>;
// Epilogue
using CollectiveEpilogue = epilogue::collective::DefaultEpilogue<
TagToStrideC_t<LayoutC>,
TagToStrideC_t<LayoutC>,
epilogue::thread::LinearCombination<float, 1, float, float>,
cutlass::gemm::EpilogueDefault>;
};
///////////////////////////////////////////////////////////////////////////////
namespace detail {
//
// TF32: 128-by-128-by-kblock (kBlock = 16, 32)
//
/// Operand A - Row-major (K-major) (kBlock = 32)
template <>
struct DefaultGemm_TensorOpSm80_OperandA<tfloat32_t, layout::RowMajor, 4, 32>
{
// Smem
using SmemLayoutAtom = decltype(
composition(Swizzle<3,2,3>{},
Layout<Shape < _8,_32>,
Stride<_32, _1>>{}));
using SmemCopyAtom = Copy_Atom<SM75_U32x4_LDSM_N, tfloat32_t>;
// Gmem
using GmemTiledCopy = decltype(
make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<cute::uint128_t>, tfloat32_t>{},
Layout<Shape <_16,_8>,
Stride< _8,_1>>{},
Layout<Shape < _1,_4>>{}));
};
/// Operand A - Row-major (K-major) (kBlock = 16)
template <>
struct DefaultGemm_TensorOpSm80_OperandA<tfloat32_t, layout::RowMajor, 4, 16>
{
// Smem
using SmemLayoutAtom = decltype(
composition(Swizzle<2,2,3>{},
Layout<Shape < _8,_16>,
Stride<_16, _1>>{}));
using SmemCopyAtom = Copy_Atom<SM75_U32x4_LDSM_N, tfloat32_t>;
// Gmem
using GmemTiledCopy = decltype(
make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<cute::uint128_t>, tfloat32_t>{},
Layout<Shape <_32,_4>,
Stride< _4,_1>>{},
Layout<Shape < _1,_4>>{}));
};
/// Operand A - Column-major (M-major)
template <int SizeK>
struct DefaultGemm_TensorOpSm80_OperandA<tfloat32_t, layout::ColumnMajor, 4, SizeK>
{
// Smem
using SmemLayoutAtom = decltype(
composition(Swizzle<3,2,3>{},
Layout<Shape <_32, _8>,
Stride< _1,_32>>{}));
using SmemCopyAtom = Copy_Atom<UniversalCopy<tfloat32_t>, tfloat32_t>;
// Gmem
using GmemTiledCopy = decltype(
make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<cute::uint128_t>, tfloat32_t>{},
Layout<Shape <_16, _8>,
Stride< _1,_16>>{},
Layout<Shape < _4, _1>>{}));
};
// Because the TF32 TiledMMA is A-B symmetric, we can reuse the DefaultOperands
// Operand B - Column-Major (K-major)
template <int Alignment, int SizeK>
struct DefaultGemm_TensorOpSm80_OperandB<tfloat32_t, layout::ColumnMajor, Alignment, SizeK>
: DefaultGemm_TensorOpSm80_OperandA<tfloat32_t, layout::RowMajor, Alignment, SizeK>
{};
// Operand B - Row-Major (N-major)
template <int Alignment, int SizeK>
struct DefaultGemm_TensorOpSm80_OperandB<tfloat32_t, layout::RowMajor, Alignment, SizeK>
: DefaultGemm_TensorOpSm80_OperandA<tfloat32_t, layout::ColumnMajor, Alignment, SizeK>
{};
}
///////////////////////////////////////////////////////////////////////////////
// Ampere MMA F32TF32
template <typename LayoutA, typename LayoutB, typename LayoutC>
struct DefaultGemmConfigurationToCutlass3Types<
arch::OpClassTensorOp, arch::Sm80,
tfloat32_t, LayoutA,
tfloat32_t, LayoutB,
float, LayoutC,
float>
{
using TileShape = Shape<_128, _128, _32>;
static constexpr int ThreadCount = 128;
using DispatchPolicy = MainloopSm80CpAsync<3>;
using TiledMma = TiledMMA<
MMA_Atom<SM80_16x8x8_F32TF32TF32F32_TN>,
Layout<Shape<_2,_2,_1>, Stride<_2, _1, _1>>, // 2x2x1 thread group
Tile<_32,_32,_8>>; // 32x32x8 MMA for LDSM, 1x2x1 value group
// A
static constexpr int kAlignmentA = 4;
using DefaultOperandA = detail::DefaultGemm_TensorOpSm80_OperandA<
tfloat32_t, LayoutA, kAlignmentA, 32>;
using SmemLayoutAtomA = typename DefaultOperandA::SmemLayoutAtom; // M, K
using SmemCopyAtomA = typename DefaultOperandA::SmemCopyAtom;
using GmemTiledCopyA = typename DefaultOperandA::GmemTiledCopy;
// B
static constexpr int kAlignmentB = 4;
using DefaultOperandB = detail::DefaultGemm_TensorOpSm80_OperandB<
tfloat32_t, LayoutB, kAlignmentB, 32>;
using SmemLayoutAtomB = typename DefaultOperandB::SmemLayoutAtom; // N, K
using SmemCopyAtomB = typename DefaultOperandB::SmemCopyAtom;
using GmemTiledCopyB = typename DefaultOperandB::GmemTiledCopy;
// Mainloop
using CollectiveMainloop = collective::CollectiveMma<
DispatchPolicy, TileShape,
tfloat32_t, TagToStrideA_t<LayoutA>,
tfloat32_t, TagToStrideB_t<LayoutB>,
TiledMma,
GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A
GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B
>;
// Epilogue
using CollectiveEpilogue = epilogue::collective::DefaultEpilogue<
TagToStrideC_t<LayoutC>,
TagToStrideC_t<LayoutC>,
epilogue::thread::LinearCombination<float, 1, float, float>,
cutlass::gemm::EpilogueDefault>;
};
///////////////////////////////////////////////////////////////////////////////
template <typename LayoutC>
struct DefaultGemmConfigurationToCutlass3Types<
arch::OpClassTensorOp, arch::Sm80,
int8_t, cutlass::layout::RowMajor,
int8_t, cutlass::layout::ColumnMajor,
int32_t, LayoutC,
int32_t>
{
using TileShape = Shape<_128, _128, _64>;
static constexpr int ThreadCount = 128;
using DispatchPolicy = MainloopSm80CpAsync<3>;
using TiledMma = TiledMMA<
MMA_Atom<SM80_16x8x32_S32S8S8S32_TN>,
Layout<Shape<_2,_2,_1>>, // 2x2x1 thread group
Tile<_32,_32,_32>>; // 16x16x32 MMA for LDSM, 1x2x1 value group
// A (M,K) K-major
using SmemLayoutAtomA = decltype(
composition(
Swizzle<2,4,3>{},
Layout<Shape <_16,_64>,
Stride<_64, _1>>{}));
static constexpr int kAlignmentA = 16;
using GmemTiledCopyA = decltype(
make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<cute::uint128_t>, int8_t>{},
Layout<Shape <_32,_4>,
Stride< _4,_1>>{},
Layout<Shape<_1,Int<kAlignmentA>>>{}));
// LDS.32- or LDSM-based copy atom
// using SmemCopyAtomA = Copy_Atom<DefaultCopy, uint8_t>;
using SmemCopyAtomA = Copy_Atom<SM75_U32x4_LDSM_N, uint8_t>; // LDSM works
// B (N,K) K-major
using SmemLayoutAtomB = decltype(
composition(
Swizzle<2,4,3>{},
Layout<Shape <_16,_64>,
Stride<_64, _1>>{}));
static constexpr int kAlignmentB = 16;
using GmemTiledCopyB = decltype(
make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<cute::uint128_t>, int8_t>{},
Layout<Shape <_32,_4>,
Stride< _4,_1>>{},
Layout<Shape<_1,Int<kAlignmentB>>>{}));
// LDS.32- or LDSM-based copy atom
// using SmemCopyAtomB = Copy_Atom<DefaultCopy, uint32_t>;
using SmemCopyAtomB = Copy_Atom<SM75_U32x4_LDSM_N, uint8_t>; // LDSM works
// Mainloop
using CollectiveMainloop = collective::CollectiveMma<
DispatchPolicy, TileShape,
int8_t, TagToStrideA_t<cutlass::layout::RowMajor>,
int8_t, TagToStrideB_t<cutlass::layout::ColumnMajor>,
TiledMma,
GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A
GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B
>;
using CollectiveEpilogue = epilogue::collective::DefaultEpilogue<
TagToStrideC_t<LayoutC>,
TagToStrideC_t<LayoutC>,
epilogue::thread::LinearCombination<int32_t, 1, int32_t, int32_t>,
cutlass::gemm::EpilogueDefault>;
};
///////////////////////////////////////////////////////////////////////////////
//////////////////////////// SIMT TWO STAGE ///////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
namespace detail {
template <typename Element, typename Layout, int ThreadCount, int ShapeM, int ShapeK>
struct DefaultGemm_Simt_OperandA;
///////////////////////////////////////////////////////////////////////////////
template <typename Element>
struct DefaultGemm_Simt_OperandA<Element, layout::ColumnMajor, 256, 128, 8>
{
using SmemLayoutAtom = Layout<Shape <_128, _8>,
Stride< _1,_128>>;
using SmemCopyAtom = Copy_Atom<DefaultCopy, Element>;
using GmemTiledCopy = decltype(
make_tiled_copy(Copy_Atom<UniversalCopy<Element>, Element>{},
Layout<Shape <_32, _8>,
Stride< _1,_32>>{},
Layout<Shape<_1,_1>>{}));
};
template <typename Element>
struct DefaultGemm_Simt_OperandA<Element, layout::RowMajor, 256, 128, 8>
{
using SmemLayoutAtom = Layout<Shape <_128, _8>,
Stride< _1,Int<128 + 4>>>; // Padded
using SmemCopyAtom = Copy_Atom<DefaultCopy, Element>;
using GmemTiledCopy = decltype(
make_tiled_copy(Copy_Atom<UniversalCopy<Element>, Element>{},
Layout<Shape <_32, _8>,
Stride< _8, _1>>{},
Layout<Shape<_1,_1>>{}));
};
template <typename Element, typename Layout, int ThreadCount, int ShapeN, int ShapeK>
struct DefaultGemm_Simt_OperandB;
template <typename Element, int ThreadCount, int ShapeN, int ShapeK>
struct DefaultGemm_Simt_OperandB<Element, layout::ColumnMajor, ThreadCount, ShapeN, ShapeK>
: DefaultGemm_Simt_OperandA<Element, layout::RowMajor, ThreadCount, ShapeN, ShapeK> {};
template <typename Element, int ThreadCount, int ShapeN, int ShapeK>
struct DefaultGemm_Simt_OperandB<Element, layout::RowMajor, ThreadCount, ShapeN, ShapeK>
: DefaultGemm_Simt_OperandA<Element, layout::ColumnMajor, ThreadCount, ShapeN, ShapeK> {};
} // end namespace detail
// SIMT Two Stage
template <
class ArchTag,
class ElementA, class LayoutA,
class ElementB, class LayoutB,
class ElementC, class LayoutC,
class ElementAccumulator>
struct DefaultGemmConfigurationToCutlass3Types<
arch::OpClassSimt, ArchTag,
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementAccumulator>
{
using TileShape = Shape<_128, _128, _8>;
static constexpr int ThreadCount = 256;
using DispatchPolicy = MainloopSm70TwoStage;
using TiledMma = TiledMMA<
MMA_Atom<UniversalFMA<ElementAccumulator, ElementA, ElementB, ElementC>>,
Layout<Shape<_16, _16, _1>>>;
// A
static constexpr int kAlignmentA = 1;
using DefaultOperandA = detail::DefaultGemm_Simt_OperandA<ElementA, LayoutA, ThreadCount, 128, 8>;
using SmemLayoutAtomA = typename DefaultOperandA::SmemLayoutAtom;
using SmemCopyAtomA = typename DefaultOperandA::SmemCopyAtom;
using GmemTiledCopyA = typename DefaultOperandA::GmemTiledCopy;
// B
static constexpr int kAlignmentB = 1;
using DefaultOperandB = detail::DefaultGemm_Simt_OperandB<ElementB, LayoutB, ThreadCount, 128, 8>;
using SmemLayoutAtomB = typename DefaultOperandB::SmemLayoutAtom;
using SmemCopyAtomB = typename DefaultOperandB::SmemCopyAtom;
using GmemTiledCopyB = typename DefaultOperandB::GmemTiledCopy;
// Mainloop
using CollectiveMainloop = collective::CollectiveMma<
DispatchPolicy, TileShape,
ElementA, TagToStrideA_t<LayoutA>,
ElementB, TagToStrideB_t<LayoutB>,
TiledMma,
GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A
GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B
>;
// Epilogue
using CollectiveEpilogue = epilogue::collective::DefaultEpilogue<
TagToStrideC_t<LayoutC>,
TagToStrideC_t<LayoutC>,
epilogue::thread::LinearCombination<ElementC, 1, ElementAccumulator, ElementAccumulator>,
cutlass::gemm::EpilogueDefault>;
};
//
// DP4A - int8 Proof-of-concept
//
// SIMT Two Stage TN - idp4a
template <
class ArchTag,
class ElementC, class LayoutC>
struct DefaultGemmConfigurationToCutlass3Types<
arch::OpClassSimt, ArchTag,
int8_t, cutlass::layout::RowMajor,
int8_t, cutlass::layout::ColumnMajor,
ElementC, LayoutC,
int32_t>
{
using TileShape = Shape<_128, _128, _32>;
static constexpr int ThreadCount = 256;
using DispatchPolicy = MainloopSm70TwoStage;
// NOTE: permuting MMA M mode lets us generate 128b smem loads (LDS.128) but has worst case bank conflicts
using TiledMma = TiledMMA<
MMA_Atom<SM61_DP4A>,
Layout<Shape<_16,_16,_1>>>; // Tile of atoms (threads)
// A (M,K) K-major
using ElementA = int8_t;
// 40% from regular M and N major layout
// using SmemLayoutAtomA = Layout<Shape <_128,_32>,
// Stride< _1,_128>>;
// 80% from interleaved layouts
using SmemLayoutAtomA = Layout<Shape <_128, Shape <_4, _8>>,
Stride< _4, Stride<_1,_512>>>;
using SmemCopyAtomA = Copy_Atom<DefaultCopy, ElementA>;
static constexpr int kAlignmentA = 4;
using GmemTiledCopyA = decltype(
make_tiled_copy(Copy_Atom<UniversalCopy<cute::uint32_t>, ElementA>{},
Layout<Shape <_32,_8>,
Stride< _8,_1>>{},
Layout<Shape < _1,_4>>{}));
// B (N,K) K-major
using ElementB = int8_t;
// 40% from regular M and N major layout
// using SmemLayoutAtomB = Layout<Shape <_128,_32>,
// Stride< _1,_128>>;
// 80% from interleaved layouts
using SmemLayoutAtomB = Layout<Shape <_128, Shape <_4, _8>>,
Stride< _4, Stride<_1,_512>>>;
using SmemCopyAtomB = Copy_Atom<DefaultCopy, ElementB>;
static constexpr int kAlignmentB = 4;
using GmemTiledCopyB = decltype(
make_tiled_copy(Copy_Atom<UniversalCopy<cute::uint32_t>, ElementB>{},
Layout<Shape <_32,_8>,
Stride< _8,_1>>{},
Layout<Shape < _1,_4>>{}));
// Mainloop
using CollectiveMainloop = collective::CollectiveMma<
DispatchPolicy, TileShape,
ElementA, TagToStrideA_t<cutlass::layout::RowMajor>,
ElementB, TagToStrideB_t<cutlass::layout::ColumnMajor>,
TiledMma,
GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A
GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B
>;
// Epilogue
using CollectiveEpilogue = epilogue::collective::DefaultEpilogue<
TagToStrideC_t<LayoutC>,
TagToStrideC_t<LayoutC>,
epilogue::thread::LinearCombination<ElementC, 1, int32_t, int32_t>,
cutlass::gemm::EpilogueDefault>;
};
///////////////////////////////////////////////////////////////////////////////
// SIMT Two Stage NN - idp4a
template <
class ArchTag,
class ElementC, class LayoutC>
struct DefaultGemmConfigurationToCutlass3Types<
arch::OpClassSimt, ArchTag,
int8_t, cutlass::layout::ColumnMajor,
int8_t, cutlass::layout::ColumnMajor,
ElementC, LayoutC,
int32_t>
{
using TileShape = Shape<_128, _128, _32>;
static constexpr int ThreadCount = 256;
using DispatchPolicy = MainloopSm70TwoStage;
using TiledMma = TiledMMA<
MMA_Atom<SM61_DP4A>,
Layout<Shape<_16, _16, _1>>>;
// A (M,K) M-major
using ElementA = int8_t;
using SmemLayoutAtomA = Layout<Shape <_128, Shape <_4, _8>>,
Stride< _4, Stride<_1,_512>>>;
using SmemCopyAtomA = Copy_Atom<DefaultCopy, ElementA>;
static constexpr int kAlignmentA = 1;
using GmemTiledCopyA = decltype(
make_tiled_copy(Copy_Atom<UniversalCopy<cute::uint8_t>, ElementA>{},
Layout<Shape <_32, _8>,
Stride< _1,_32>>{},
Layout<Shape < _1, _1>>{}));
// B (N,K) K-major
using ElementB = int8_t;
using SmemLayoutAtomB = Layout<Shape <_128, Shape <_4, _8>>,
Stride< _4, Stride<_1,_512>>>;
using SmemCopyAtomB = Copy_Atom<DefaultCopy, ElementB>;
static constexpr int kAlignmentB = 4;
using GmemTiledCopyB = decltype(
make_tiled_copy(Copy_Atom<UniversalCopy<cute::uint32_t>, ElementB>{},
Layout<Shape <_32,_8>,
Stride< _8,_1>>{},
Layout<Shape < _1,_4>>{}));
// Mainloop
using CollectiveMainloop = collective::CollectiveMma<
DispatchPolicy, TileShape,
ElementA, TagToStrideA_t<cutlass::layout::ColumnMajor>,
ElementB, TagToStrideB_t<cutlass::layout::ColumnMajor>,
TiledMma,
GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A
GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B
>;
// Epilogue
using CollectiveEpilogue = epilogue::collective::DefaultEpilogue<
TagToStrideC_t<LayoutC>,
TagToStrideC_t<LayoutC>,
epilogue::thread::LinearCombination<ElementC, 1, int32_t, int32_t>,
cutlass::gemm::EpilogueDefault>;
};
///////////////////////////////////////////////////////////////////////////////
// SIMT Two Stage NT - idp4a
template <
class ArchTag,
class ElementC, class LayoutC>
struct DefaultGemmConfigurationToCutlass3Types<
arch::OpClassSimt, ArchTag,
int8_t, cutlass::layout::ColumnMajor,
int8_t, cutlass::layout::RowMajor,
ElementC, LayoutC,
int32_t>
{
using TileShape = Shape<_128, _128, _32>;
static constexpr int ThreadCount = 256;
using DispatchPolicy = MainloopSm70TwoStage;
using TiledMma = TiledMMA<
MMA_Atom<SM61_DP4A>,
Layout<Shape<_16, _16, _1>>>;
// A (M,K) M-major
using ElementA = int8_t;
using SmemLayoutAtomA = Layout<Shape <_128, Shape <_4, _8>>,
Stride< _4, Stride<_1,_512>>>;
using SmemCopyAtomA = Copy_Atom<DefaultCopy, ElementA>;
static constexpr int kAlignmentA = 1;
using GmemTiledCopyA = decltype(
make_tiled_copy(Copy_Atom<UniversalCopy<cute::uint8_t>, ElementA>{},
Layout<Shape <_32, _8>,
Stride< _1,_32>>{},
Layout<Shape < _1, _1>>{}));
// B (N,K) N-major
using ElementB = int8_t;
using SmemLayoutAtomB = Layout<Shape <_128, Shape <_4, _8>>,
Stride< _4, Stride<_1,_512>>>;
using SmemCopyAtomB = Copy_Atom<DefaultCopy, ElementB>;
static constexpr int kAlignmentB = 1;
using GmemTiledCopyB = decltype(
make_tiled_copy(Copy_Atom<UniversalCopy<cute::uint8_t>, ElementB>{},
Layout<Shape <_32, _8>,
Stride< _1,_32>>{},
Layout<Shape < _1, _1>>{}));
// Mainloop
using CollectiveMainloop = collective::CollectiveMma<
DispatchPolicy, TileShape,
ElementA, TagToStrideA_t<cutlass::layout::ColumnMajor>,
ElementB, TagToStrideB_t<cutlass::layout::RowMajor>,
TiledMma,
GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A
GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B
>;
// Epilogue
using CollectiveEpilogue = epilogue::collective::DefaultEpilogue<
TagToStrideC_t<LayoutC>,
TagToStrideC_t<LayoutC>,
epilogue::thread::LinearCombination<ElementC, 1, int32_t, int32_t>,
cutlass::gemm::EpilogueDefault>;
};
///////////////////////////////////////////////////////////////////////////////
// SIMT Two Stage TT - idp4a
template <
class ArchTag,
class ElementC, class LayoutC>
struct DefaultGemmConfigurationToCutlass3Types<
arch::OpClassSimt, ArchTag,
int8_t, cutlass::layout::RowMajor,
int8_t, cutlass::layout::RowMajor,
ElementC, LayoutC,
int32_t>
{
using TileShape = Shape<_128, _128, _32>;
static constexpr int ThreadCount = 256;
using DispatchPolicy = MainloopSm70TwoStage;
using TiledMma = TiledMMA<
MMA_Atom<SM61_DP4A>,
Layout<Shape<_16, _16, _1>>>;
// A (M,K) K-major
using ElementA = int8_t;
using SmemLayoutAtomA = Layout<Shape <_128, Shape <_4, _8>>,
Stride< _4, Stride<_1,_512>>>;
using SmemCopyAtomA = Copy_Atom<DefaultCopy, ElementA>;
static constexpr int kAlignmentA = 4;
using GmemTiledCopyA = decltype(
make_tiled_copy(Copy_Atom<UniversalCopy<cute::uint32_t>, ElementA>{},
Layout<Shape <_32,_8>,
Stride< _8,_1>>{},
Layout<Shape < _1,_4>>{}));
// B (N,K) N-major
using ElementB = int8_t;
using SmemLayoutAtomB = Layout<Shape <_128, Shape <_4, _8>>,
Stride< _4, Stride<_1,_512>>>;
using SmemCopyAtomB = Copy_Atom<DefaultCopy, ElementB>;
static constexpr int kAlignmentB = 1;
using GmemTiledCopyB = decltype(
make_tiled_copy(Copy_Atom<UniversalCopy<cute::uint8_t>, ElementB>{},
Layout<Shape <_32, _8>,
Stride< _1,_32>>{},
Layout<Shape < _1, _1>>{}));
// Mainloop
using CollectiveMainloop = collective::CollectiveMma<
DispatchPolicy, TileShape,
ElementA, TagToStrideA_t<cutlass::layout::RowMajor>,
ElementB, TagToStrideB_t<cutlass::layout::RowMajor>,
TiledMma,
GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A
GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B
>;
// Epilogue
using CollectiveEpilogue = epilogue::collective::DefaultEpilogue<
TagToStrideC_t<LayoutC>,
TagToStrideC_t<LayoutC>,
epilogue::thread::LinearCombination<ElementC, 1, int32_t, int32_t>,
cutlass::gemm::EpilogueDefault>;
};
///////////////////////////////////////////////////////////////////////////////
/////////////////////////// SIMT MULTI STAGE //////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
// SIMT Multi Stage NT
template <
class ElementA,
class ElementB,
class ElementC, class LayoutC,
class ElementAccumulator>
struct DefaultGemmConfigurationToCutlass3Types<
arch::OpClassSimt, arch::Sm80,
ElementA, cutlass::layout::ColumnMajor,
ElementB, cutlass::layout::RowMajor,
ElementC, LayoutC,
ElementAccumulator>
{
using TileShape = Shape<_128, _128, _16>;
static constexpr int ThreadCount = 256;
using DispatchPolicy = MainloopSm80CpAsync<3>;
using TiledMma = TiledMMA<
MMA_Atom<UniversalFMA<ElementAccumulator, ElementA, ElementB, ElementC>>,
Layout<Shape<_16, _16, _1>>, // 16x16x1 thread group
Tile<Layout<Shape<_16,_2>,Stride<_2,_1>>, // 32x32x1 MMA with perm for load vectorization
Layout<Shape<_16,_2>,Stride<_2,_1>>,Underscore>>;
// A (M,K) M-major
using SmemLayoutAtomA = Layout<Shape<_128,_16>>;
using SmemCopyAtomA = Copy_Atom<DefaultCopy, ElementA>;
static constexpr int kAlignmentA = 2;
using AlignmentTypeA = cute::uint_byte_t<static_cast<int>(sizeof(ElementA)) * kAlignmentA>;
using GmemTiledCopyA = decltype(
make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<AlignmentTypeA>, ElementA>{},
Layout<Shape<_32,_8>>{},
Layout<Shape< _2,_1>>{}));
// B (N,K) N-major
using SmemLayoutAtomB = Layout<Shape<_128,_16>>;
using SmemCopyAtomB = Copy_Atom<DefaultCopy, ElementB>;
static constexpr int kAlignmentB = 2;
using AlignmentTypeB = cute::uint_byte_t<static_cast<int>(sizeof(ElementB)) * kAlignmentB>;
using GmemTiledCopyB = decltype(
make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<AlignmentTypeB>, ElementB>{},
Layout<Shape<_32,_8>>{},
Layout<Shape< _2,_1>>{}));
// Mainloop
using CollectiveMainloop = collective::CollectiveMma<
DispatchPolicy, TileShape,
ElementA, TagToStrideA_t<cutlass::layout::ColumnMajor>,
ElementB, TagToStrideB_t<cutlass::layout::RowMajor>,
TiledMma,
GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A
GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B
>;
// Epilogue
using CollectiveEpilogue = epilogue::collective::DefaultEpilogue<
TagToStrideC_t<LayoutC>,
TagToStrideC_t<LayoutC>,
epilogue::thread::LinearCombination<ElementC, 1, ElementAccumulator, ElementAccumulator>,
cutlass::gemm::EpilogueDefault>;
};
///////////////////////////////////////////////////////////////////////////////
// SIMT Multi Stage TN
template <
class ElementA,
class ElementB,
class ElementC, class LayoutC,
class ElementAccumulator>
struct DefaultGemmConfigurationToCutlass3Types<
arch::OpClassSimt, arch::Sm80,
ElementA, cutlass::layout::RowMajor,
ElementB, cutlass::layout::ColumnMajor,
ElementC, LayoutC,
ElementAccumulator>
{
using TileShape = Shape<_128, _128, _16>;
static constexpr int ThreadCount = 256;
using DispatchPolicy = MainloopSm80CpAsync<3>;
using TiledMma = TiledMMA<
MMA_Atom<UniversalFMA<ElementAccumulator, ElementA, ElementB, ElementC>>,
Layout<Shape<_16, _16, _1>>>;
// A (M,K) K-major
using SmemLayoutAtomA = Layout<Shape <_128, _16>,
Stride< _1, Int<128 + 1>>>; // Padded by kAlignmentA
using SmemCopyAtomA = Copy_Atom<DefaultCopy, ElementA>;
static constexpr int kAlignmentA = 1;
using GmemTiledCopyA = decltype(
make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<ElementA>, ElementA>{},
Layout<Shape <_16,_16>,
Stride<_16, _1>>{}));
// B (N,K) K-major
using SmemLayoutAtomB = Layout<Shape <_128, _16>,
Stride< _1, Int<128 + 1>>>; // Padded by kAlignmentB
using SmemCopyAtomB = Copy_Atom<DefaultCopy, ElementB>;
static constexpr int kAlignmentB = 1;
using GmemTiledCopyB = decltype(
make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<ElementB>, ElementB>{},
Layout<Shape <_16,_16>,
Stride<_16, _1>>{}));
// Mainloop
using CollectiveMainloop = collective::CollectiveMma<
DispatchPolicy, TileShape,
ElementA, TagToStrideA_t<cutlass::layout::RowMajor>,
ElementB, TagToStrideB_t<cutlass::layout::ColumnMajor>,
TiledMma,
GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A
GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B
>;
// Epilogue
using CollectiveEpilogue = epilogue::collective::DefaultEpilogue<
TagToStrideC_t<LayoutC>,
TagToStrideC_t<LayoutC>,
epilogue::thread::LinearCombination<ElementC, 1, ElementAccumulator, ElementAccumulator>,
cutlass::gemm::EpilogueDefault>;
};
///////////////////////////////////////////////////////////////////////////////
// SIMT Multi Stage NN
template <
class ElementA,
class ElementB,
class ElementC, class LayoutC,
class ElementAccumulator>
struct DefaultGemmConfigurationToCutlass3Types<
arch::OpClassSimt, arch::Sm80,
ElementA, cutlass::layout::ColumnMajor,
ElementB, cutlass::layout::ColumnMajor,
ElementC, LayoutC,
ElementAccumulator>
{
using TileShape = Shape<_128, _128, _16>;
static constexpr int ThreadCount = 256;
using DispatchPolicy = MainloopSm80CpAsync<3>;
using TiledMma = TiledMMA<
MMA_Atom<UniversalFMA<ElementAccumulator, ElementA, ElementB, ElementC>>,
Layout<Shape<_16, _16, _1>>, // 16x16x1 thread group
Tile<Layout<Shape<_16,_2>,Stride<_2,_1>>,Underscore,Underscore>>; // 32x16x1 MMA with perm for load vectorization
// A (M,K) M-major
using SmemLayoutAtomA = Layout<Shape<_128,_16>>;
using SmemCopyAtomA = Copy_Atom<DefaultCopy, ElementA>;
static constexpr int kAlignmentA = 2;
using AlignmentTypeA = cute::uint_byte_t<static_cast<int>(sizeof(ElementA)) * kAlignmentA>;
using GmemTiledCopyA = decltype(
make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<AlignmentTypeA>, ElementA>{},
Layout<Shape<_32,_8>>{},
Layout<Shape< _2,_1>>{}));
// B (N,K) K-major
using SmemLayoutAtomB = Layout<Shape <_128, _16>,
Stride< _1, Int<128 + 1>>>; // Padded by kAlignmentB
using SmemCopyAtomB = Copy_Atom<DefaultCopy, ElementB>;
static constexpr int kAlignmentB = 1;
using GmemTiledCopyB = decltype(
make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<ElementB>, ElementB>{},
Layout<Shape <_16,_16>,
Stride<_16, _1>>{}));
// Mainloop
using CollectiveMainloop = collective::CollectiveMma<
DispatchPolicy, TileShape,
ElementA, TagToStrideA_t<cutlass::layout::ColumnMajor>,
ElementB, TagToStrideB_t<cutlass::layout::ColumnMajor>,
TiledMma,
GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A
GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B
>;
// Epilogue
using CollectiveEpilogue = epilogue::collective::DefaultEpilogue<
TagToStrideC_t<LayoutC>,
TagToStrideC_t<LayoutC>,
epilogue::thread::LinearCombination<ElementC, 1, ElementAccumulator, ElementAccumulator>,
cutlass::gemm::EpilogueDefault>;
};
///////////////////////////////////////////////////////////////////////////////
// SIMT Multi Stage TT
template <
class ElementA,
class ElementB,
class ElementC, class LayoutC,
class ElementAccumulator>
struct DefaultGemmConfigurationToCutlass3Types<
arch::OpClassSimt, arch::Sm80,
ElementA, cutlass::layout::RowMajor,
ElementB, cutlass::layout::RowMajor,
ElementC, LayoutC,
ElementAccumulator>
{
using TileShape = Shape<_128, _128, _16>;
static constexpr int ThreadCount = 256;
using DispatchPolicy = MainloopSm80CpAsync<3>;
using TiledMma = TiledMMA<
MMA_Atom<UniversalFMA<ElementAccumulator, ElementA, ElementB, ElementC>>,
Layout<Shape<_16, _16, _1>>, // 16x16x1 thread group
Tile<Underscore,Layout<Shape<_16,_2>,Stride<_2,_1>>,Underscore>>; // 16x32x1 MMA with perm for load vectorization
// A (M,K) K-major
using SmemLayoutAtomA = Layout<Shape <_128, _16>,
Stride< _1, Int<128 + 1>>>; // Padded by kAlignmentA
using SmemCopyAtomA = Copy_Atom<DefaultCopy, ElementA>;
static constexpr int kAlignmentA = 1;
using GmemTiledCopyA = decltype(
make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<ElementA>, ElementA>{},
Layout<Shape <_16,_16>,
Stride<_16, _1>>{}));
// B (N,K) N-major
using SmemLayoutAtomB = Layout<Shape <_128,_16>>;
using SmemCopyAtomB = Copy_Atom<DefaultCopy, ElementB>;
static constexpr int kAlignmentB = 2;
using AlignmentTypeB = cute::uint_byte_t<static_cast<int>(sizeof(ElementB)) * kAlignmentB>;
using GmemTiledCopyB = decltype(
make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<AlignmentTypeB>, ElementB>{},
Layout<Shape<_32,_8>>{},
Layout<Shape< _2,_1>>{}));
// Mainloop
using CollectiveMainloop = collective::CollectiveMma<
DispatchPolicy, TileShape,
ElementA, TagToStrideA_t<cutlass::layout::RowMajor>,
ElementB, TagToStrideB_t<cutlass::layout::RowMajor>,
TiledMma,
GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A
GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B
>;
// Epilogue
using CollectiveEpilogue = epilogue::collective::DefaultEpilogue<
TagToStrideC_t<LayoutC>,
TagToStrideC_t<LayoutC>,
epilogue::thread::LinearCombination<ElementC, 1, ElementAccumulator, ElementAccumulator>,
cutlass::gemm::EpilogueDefault>;
};
///////////////////////////////////////////////////////////////////////////////
// Ampere fp64 MMA TN (K-Major A and K-Major B)
template <>
struct DefaultGemmConfigurationToCutlass3Types<
arch::OpClassTensorOp, arch::Sm80,
double, cutlass::layout::RowMajor,
double, cutlass::layout::ColumnMajor,
double, cutlass::layout::ColumnMajor,
double>
{
using TileShape = Shape<_128, _64, _16>;
static constexpr int ThreadCount = 128;
using DispatchPolicy = MainloopSm80CpAsync<3>;
using TiledMma = TiledMMA<
MMA_Atom<SM80_8x8x4_F64F64F64F64_TN>, // Atom
Layout<Shape<_2,_2,_1>>, // Atom layout
Tile<Layout<Shape<_16,_2>,Stride<_2,_1>>, // 32x32x4 MMA with perm for load vectorization
Layout<Shape<_16,_2>,Stride<_2,_1>>,
Underscore>>;
// A (M,K) K-Major
using SmemLayoutAtomA = decltype(
composition(Swizzle<2,0,4>{},
Layout<Shape <_4,_16>,
Stride<_1, _4>>{})); // M, K
using SmemCopyAtomA = Copy_Atom<DefaultCopy, double>;
static constexpr int kAlignmentA = 1;
using GmemTiledCopyA = decltype(
make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<double>, double>{}, // CopyAtom
Layout<Shape < _8,_16>,
Stride<_16, _1>>{}, // ThrLayout for CopyAtom
Layout<Shape<_1,_1>>{})); // Value layout: 1x1 doubles
// B (N,K) K-Major
using SmemLayoutAtomB = decltype(
composition(Swizzle<2,0,4>{},
Layout<Shape <_4,_16>,
Stride<_1, _4>>{})); // N, K
using SmemCopyAtomB = Copy_Atom<DefaultCopy, double>;
static constexpr int kAlignmentB = 1;
using GmemTiledCopyB = decltype(
make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<double>, double>{}, // CopyAtom
Layout<Shape < _8,_16>,
Stride<_16, _1>>{}, // ThrLayout for CopyAtom
Layout<Shape<_1,_1>>{})); // Value layout: 1x1 doubles
// Mainloop
using CollectiveMainloop = collective::CollectiveMma<
DispatchPolicy, TileShape,
double, TagToStrideA_t<cutlass::layout::RowMajor>,
double, TagToStrideB_t<cutlass::layout::ColumnMajor>,
TiledMma,
GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A
GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B
>;
// Epilogue
using CollectiveEpilogue = epilogue::collective::DefaultEpilogue<
TagToStrideC_t<cutlass::layout::ColumnMajor>,
TagToStrideC_t<cutlass::layout::ColumnMajor>,
epilogue::thread::LinearCombination<double, 1, double, double>,
cutlass::gemm::EpilogueDefault>;
/*
using EpilogueOutputOp = epilogue::collective::Epilogue<
epilogue::thread::LinearCombination<double, 1, double, double>,
Layout<Shape <_64,_32>,
Stride< _1,_64>>, // SMEM layout
Copy_Atom<UniversalCopy<double>,double>, // R2S with tiled_mma layout
decltype(make_tiled_copy(Copy_Atom<UniversalCopy<double>,double>{},// S2R
Layout<Shape <_16,_16>,
Stride< _1,_16>>{}, // Thread layout
Layout<Shape<_2,_1>>{})), // Value layout
Copy_Atom<UniversalCopy<double>,double> // R2G with S2R_dst layout
>;
*/
};
///////////////////////////////////////////////////////////////////////////////
// Ampere fp64 MMA NN (M-Major A and K-Major B)
template <>
struct DefaultGemmConfigurationToCutlass3Types<
arch::OpClassTensorOp, arch::Sm80,
double, cutlass::layout::ColumnMajor,
double, cutlass::layout::ColumnMajor,
double, cutlass::layout::ColumnMajor,
double>
{
using TileShape = Shape<_128, _64, _16>;
static constexpr int ThreadCount = 128;
using DispatchPolicy = MainloopSm80CpAsync<3>;
using TiledMma = TiledMMA<
MMA_Atom<SM80_8x8x4_F64F64F64F64_TN>, // Atom
Layout<Shape<_2,_2,_1>>, // Atom layout
Tile<Layout<Shape<_16,_2>,Stride<_2,_1>>, // 32x32x4 MMA with perm for load vectorization
Layout<Shape<_16,_2>,Stride<_2,_1>>,
Underscore>>;
// A (M,K) M-Major
using SmemLayoutAtomA = decltype(
composition(Swizzle<2,2,2>{},
Layout<Shape <_16, _4>,
Stride< _1,_16>>{})); // M, K
using SmemCopyAtomA = Copy_Atom<DefaultCopy, double>;
static constexpr int kAlignmentA = 2;
using GmemTiledCopyA = decltype(
make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<cute::uint128_t>, double>{}, // CopyAtom
Layout<Shape <_16, _8>,
Stride< _1,_16>>{}, // ThrLayout for CopyAtom
Layout<Shape<_2,_1>>{})); // Value layout: 2x1 doubles
// B (N,K) K-Major
using SmemLayoutAtomB = decltype(
composition(Swizzle<2,0,4>{},
Layout<Shape <_4,_16>,
Stride<_1, _4>>{}));// N, K
using SmemCopyAtomB = Copy_Atom<DefaultCopy, double>;
static constexpr int kAlignmentB = 1;
using GmemTiledCopyB = decltype(
make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<double>, double>{}, // CopyAtom
Layout<Shape < _8,_16>,
Stride<_16, _1>>{}, // ThrLayout for CopyAtom
Layout<Shape<_1,_1>>{})); // Value layout: 1x1 doubles
// Mainloop
using CollectiveMainloop = collective::CollectiveMma<
DispatchPolicy, TileShape,
double, TagToStrideA_t<cutlass::layout::ColumnMajor>,
double, TagToStrideB_t<cutlass::layout::ColumnMajor>,
TiledMma,
GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A
GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B
>;
// Epilogue
using CollectiveEpilogue = epilogue::collective::DefaultEpilogue<
TagToStrideC_t<cutlass::layout::ColumnMajor>,
TagToStrideC_t<cutlass::layout::ColumnMajor>,
epilogue::thread::LinearCombination<double, 1, double, double>,
cutlass::gemm::EpilogueDefault>;
};
///////////////////////////////////////////////////////////////////////////////
// Ampere fp64 MMA NT (M-Major A and N-Major B)
template <>
struct DefaultGemmConfigurationToCutlass3Types<
arch::OpClassTensorOp, arch::Sm80,
double, cutlass::layout::ColumnMajor,
double, cutlass::layout::RowMajor,
double, cutlass::layout::ColumnMajor,
double>
{
using TileShape = Shape<_128, _64, _16>;
static constexpr int ThreadCount = 128;
using DispatchPolicy = MainloopSm80CpAsync<3>;
using TiledMma = TiledMMA<
MMA_Atom<SM80_8x8x4_F64F64F64F64_TN>, // Atom
Layout<Shape<_2,_2,_1>>, // Atom layout
Tile<Layout<Shape<_16,_2>,Stride<_2,_1>>, // 32x32x4 MMA with perm for load vectorization
Layout<Shape<_16,_2>,Stride<_2,_1>>,
Underscore>>;
// A (M,K) M-Major
using SmemLayoutAtomA = decltype(
composition(Swizzle<2,2,2>{},
Layout<Shape <_16, _4>,
Stride< _1,_16>>{})); // M, K
using SmemCopyAtomA = Copy_Atom<DefaultCopy, double>;
static constexpr int kAlignmentA = 2;
using GmemTiledCopyA = decltype(
make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<cute::uint128_t>, double>{}, // CopyAtom
Layout<Shape <_16, _8>,
Stride< _1,_16>>{}, // ThrLayout for CopyAtom
Layout<Shape<_2,_1>>{})); // Value layout: 2x1 doubles
// B (N,K) N-Major
using SmemLayoutAtomB = decltype(
composition(Swizzle<2,2,2>{},
Layout<Shape <_16, _4>,
Stride< _1,_16>>{})); // N, K
using SmemCopyAtomB = Copy_Atom<DefaultCopy, double>;
static constexpr int kAlignmentB = 2;
using GmemTiledCopyB = decltype(
make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<cute::uint128_t>, double>{}, // CopyAtom
Layout<Shape <_16, _8>,
Stride< _1,_16>>{}, // ThrLayout for CopyAtom
Layout<Shape<_2,_1>>{})); // Value layout: 2x1 doubles
// Mainloop
using CollectiveMainloop = collective::CollectiveMma<
DispatchPolicy, TileShape,
double, TagToStrideA_t<cutlass::layout::ColumnMajor>,
double, TagToStrideB_t<cutlass::layout::RowMajor>,
TiledMma,
GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A
GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B
>;
// Epilogue
using CollectiveEpilogue = epilogue::collective::DefaultEpilogue<
TagToStrideC_t<cutlass::layout::ColumnMajor>,
TagToStrideC_t<cutlass::layout::ColumnMajor>,
epilogue::thread::LinearCombination<double, 1, double, double>,
cutlass::gemm::EpilogueDefault>;
};
///////////////////////////////////////////////////////////////////////////////
// Ampere fp64 MMA TT (K-Major A and N-Major B)
template <>
struct DefaultGemmConfigurationToCutlass3Types<
arch::OpClassTensorOp, arch::Sm80,
double, cutlass::layout::RowMajor,
double, cutlass::layout::RowMajor,
double, cutlass::layout::ColumnMajor,
double>
{
using TileShape = Shape<_128, _64, _16>;
static constexpr int ThreadCount = 128;
using DispatchPolicy = MainloopSm80CpAsync<3>;
using TiledMma = TiledMMA<
MMA_Atom<SM80_8x8x4_F64F64F64F64_TN>, // Atom
Layout<Shape<_2,_2,_1>>, // Atom layout
Tile<Layout<Shape<_16,_2>,Stride<_2,_1>>, // 32x32x4 MMA with perm for load vectorization
Layout<Shape<_16,_2>,Stride<_2,_1>>,
Underscore>>;
// A (M,K) K-Major
using SmemLayoutAtomA = decltype(
composition(Swizzle<2,0,4>{},
Layout<Shape <_4,_16>,
Stride<_1, _4>>{})); // M, K
using SmemCopyAtomA = Copy_Atom<DefaultCopy, double>;
static constexpr int kAlignmentA = 1;
using GmemTiledCopyA = decltype(
make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<double>, double>{}, // CopyAtom
Layout<Shape < _8,_16>,
Stride<_16, _1>>{}, // ThrLayout for CopyAtom
Layout<Shape<_1,_1>>{})); // Value layout: 1x1 doubles
// B (N,K) N-Major
using SmemLayoutAtomB = decltype(
composition(Swizzle<2,2,2>{},
Layout<Shape <_16, _4>,
Stride< _1,_16>>{})); // N, K
using SmemCopyAtomB = Copy_Atom<DefaultCopy, double>;
static constexpr int kAlignmentB = 2;
using GmemTiledCopyB = decltype(
make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<cute::uint128_t>, double>{}, // CopyAtom
Layout<Shape <_16, _8>,
Stride< _1,_16>>{}, // ThrLayout for CopyAtom
Layout<Shape<_2,_1>>{})); // Value layout: 2x1 doubles
// Mainloop
using CollectiveMainloop = collective::CollectiveMma<
DispatchPolicy, TileShape,
double, TagToStrideA_t<cutlass::layout::RowMajor>,
double, TagToStrideB_t<cutlass::layout::RowMajor>,
TiledMma,
GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A
GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B
>;
// Epilogue
using CollectiveEpilogue = epilogue::collective::DefaultEpilogue<
TagToStrideC_t<cutlass::layout::ColumnMajor>,
TagToStrideC_t<cutlass::layout::ColumnMajor>,
epilogue::thread::LinearCombination<double, 1, double, double>,
cutlass::gemm::EpilogueDefault>;
};
///////////////////////////////////////////////////////////////////////////////
// Hopper fp64 MMA TN
template <>
struct DefaultGemmConfigurationToCutlass3Types<
arch::OpClassTensorOp, arch::Sm90,
double, cutlass::layout::RowMajor,
double, cutlass::layout::ColumnMajor,
double, cutlass::layout::ColumnMajor,
double>
{
using TileShape = Shape<_128, _64, _16>;
static constexpr int ThreadCount = 128;
using DispatchPolicy = MainloopSm80CpAsync<3>;
using TiledMma = TiledMMA<
MMA_Atom<SM90_16x8x16_F64F64F64F64_TN>,
Layout<Shape<_2,_2,_1>>>;
// A (M,K) K-major
using SmemLayoutAtomA = decltype(
make_ordered_layout(Shape<_128,_16>{},
Step < _2, _1>{})); // M, K
using SmemCopyAtomA = Copy_Atom<DefaultCopy, double>;
static constexpr int kAlignmentA = 2;
using GmemTiledCopyA = decltype(
make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<cute::uint128_t>, double>{},
Layout<Shape <_16,_8>,
Stride< _8,_1>>{},
Layout<Shape < _1,_2>>{}));
// B (N,K) K-major
using SmemLayoutAtomB = decltype(
make_ordered_layout(Shape<_64,_16>{},
Step < _2, _1>{})); // N, K
using SmemCopyAtomB = Copy_Atom<DefaultCopy, double>;
static constexpr int kAlignmentB = 2;
using GmemTiledCopyB = decltype(
make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<cute::uint128_t>, double>{},
Layout<Shape <_16,_8>,
Stride< _8,_1>>{},
Layout<Shape < _1,_2>>{}));
// Mainloop
using CollectiveMainloop = collective::CollectiveMma<
DispatchPolicy, TileShape,
double, TagToStrideA_t<cutlass::layout::RowMajor>,
double, TagToStrideB_t<cutlass::layout::ColumnMajor>,
TiledMma,
GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A
GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B
>;
// Epilogue
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape, Shape<_1,_1,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
double, double,
double, cutlass::layout::ColumnMajor, 1,
double, cutlass::layout::ColumnMajor, 1,
cutlass::epilogue::collective::EpilogueScheduleAuto
>::CollectiveOp;
};
///////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace gemm
} // namespace cutlass
| test/unit/gemm/device/default_gemm_configuration.hpp/0 | {
"file_path": "test/unit/gemm/device/default_gemm_configuration.hpp",
"repo_id": "test",
"token_count": 23557
} | 52 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide GEMM interface with stream-K scheduling
*/
#include <iostream>
#include "cutlass/cutlass.h"
#include "cute/tensor.hpp"
#include "cute/atom/mma_atom.hpp"
#include "cutlass/numeric_types.h"
#include "cutlass/gemm/device/gemm_universal_adapter.h"
#include "cutlass/gemm/kernel/gemm_universal.hpp"
#include "cutlass/gemm/kernel/tile_scheduler.hpp"
#include "cutlass/gemm/collective/collective_builder.hpp"
#include "cutlass/epilogue/collective/collective_builder.hpp"
#include "cutlass/epilogue/collective/sm70_epilogue_vectorized.hpp"
#include "cutlass/epilogue/collective/default_epilogue.hpp"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "../../common/cutlass_unit_test.h"
#include "gemm_testbed_3x.hpp"
#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
using namespace cute;
TEST(SM90_Device_Gemm_f16t_f16t_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x64_1x1x1) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementAccumulator = float;
using LayoutC = cutlass::layout::ColumnMajor;
using TileShape_MNK = Shape<_128,_128,_64>;
using ClusterShape_MNK = Shape<_1,_1,_1>;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
ElementA, LayoutA, 8,
ElementB, LayoutB, 8,
ElementAccumulator,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAuto,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::half_t, LayoutC, 8,
cutlass::half_t, LayoutC, 8,
cutlass::epilogue::collective::EpilogueScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue,
cutlass::gemm::StreamKScheduler
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0));
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0));
}
TEST(SM90_Device_Gemm_f16t_f16t_f32n_tensor_op_gmma_f32_cooperative_stream_k, 256x128x64_1x2x1) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementAccumulator = float;
using LayoutC = cutlass::layout::ColumnMajor;
using TileShape_MNK = Shape<_256,_128,_64>;
using ClusterShape_MNK = Shape<_1,_2,_1>;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
ElementA, LayoutA, 8,
ElementB, LayoutB, 8,
ElementAccumulator,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAuto,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::half_t, LayoutC, 8,
cutlass::half_t, LayoutC, 8,
cutlass::epilogue::collective::EpilogueScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue,
cutlass::gemm::StreamKScheduler
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0));
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0));
}
///////////////////////////////////////////////////////////////////////////////
/////////////////////////////// Cluster 2x2x1 ////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_f16t_f16t_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x64_2x2x1) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::RowMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using TileShape_MNK = Shape<_128,_128,_64>;
using ClusterShape_MNK = Shape<_2,_2,_1>;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAuto,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::half_t, LayoutC, 8,
cutlass::half_t, LayoutC, 8,
cutlass::epilogue::collective::EpilogueScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue,
cutlass::gemm::StreamKScheduler
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0));
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0));
}
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_f16t_f16n_f32n_tensor_op_gmma_f32_cooperative_stream_k, 256x128x64_2x2x1) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using TileShape_MNK = Shape<_256,_128,_64>;
using ClusterShape_MNK = Shape<_2,_2,_1>;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAuto,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::half_t, LayoutC, 8,
cutlass::half_t, LayoutC, 8,
cutlass::epilogue::collective::EpilogueScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue,
cutlass::gemm::StreamKScheduler
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0));
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0));
}
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_f16n_f16t_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x64_2x2x1) {
using LayoutA = cutlass::layout::ColumnMajor;
using LayoutB = cutlass::layout::RowMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using TileShape_MNK = Shape<_128,_128,_64>;
using ClusterShape_MNK = Shape<_2,_2,_1>;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAuto,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::half_t, LayoutC, 8,
cutlass::half_t, LayoutC, 8,
cutlass::epilogue::collective::EpilogueScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue,
cutlass::gemm::StreamKScheduler
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0));
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0));
}
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_f16n_f16n_f32n_tensor_op_gmma_f32_cooperative_stream_k, 256x128x64_2x2x1) {
using LayoutA = cutlass::layout::ColumnMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using TileShape_MNK = Shape<_256,_128,_64>;
using ClusterShape_MNK = Shape<_1,_2,_1>;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAuto,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::half_t, LayoutC, 8,
cutlass::half_t, LayoutC, 8,
cutlass::epilogue::collective::EpilogueScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue,
cutlass::gemm::StreamKScheduler
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0));
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0));
}
///////////////////////////////////////////////////////////////////////////////
/////////////////////////////// Cluster 4x1x1 ////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_f16t_f16t_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x64_4x1x1) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::RowMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using TileShape_MNK = Shape<_128,_128,_64>;
using ClusterShape_MNK = Shape<_4,_1,_1>;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAuto,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::half_t, LayoutC, 8,
cutlass::half_t, LayoutC, 8,
cutlass::epilogue::collective::EpilogueScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue,
cutlass::gemm::StreamKScheduler
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0));
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0));
}
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_f16t_f16n_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x64_4x1x1) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using TileShape_MNK = Shape<_128,_128,_64>;
using ClusterShape_MNK = Shape<_4,_1,_1>;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAuto,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::half_t, LayoutC, 8,
cutlass::half_t, LayoutC, 8,
cutlass::epilogue::collective::EpilogueScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue,
cutlass::gemm::StreamKScheduler
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0));
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0));
}
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_f16n_f16t_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x64_4x1x1) {
using LayoutA = cutlass::layout::ColumnMajor;
using LayoutB = cutlass::layout::RowMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using TileShape_MNK = Shape<_128,_128,_64>;
using ClusterShape_MNK = Shape<_4,_1,_1>;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAuto,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::half_t, LayoutC, 8,
cutlass::half_t, LayoutC, 8,
cutlass::epilogue::collective::EpilogueScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue,
cutlass::gemm::StreamKScheduler
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0));
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0));
}
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_f16n_f16n_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x64_4x1x1) {
using LayoutA = cutlass::layout::ColumnMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using TileShape_MNK = Shape<_128,_128,_64>;
using ClusterShape_MNK = Shape<_4,_1,_1>;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAuto,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::half_t, LayoutC, 8,
cutlass::half_t, LayoutC, 8,
cutlass::epilogue::collective::EpilogueScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue,
cutlass::gemm::StreamKScheduler
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0));
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0));
}
///////////////////////////////////////////////////////////////////////////////
/////////////////////////////// Cluster 1x4x1 ////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_f16t_f16t_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x64_1x4x1) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::RowMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using TileShape_MNK = Shape<_128,_128,_64>;
using ClusterShape_MNK = Shape<_1,_4,_1>;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAuto,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::half_t, LayoutC, 8,
cutlass::half_t, LayoutC, 8,
cutlass::epilogue::collective::EpilogueScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue,
cutlass::gemm::StreamKScheduler
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0));
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0));
}
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_f16t_f16n_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x64_1x4x1) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using TileShape_MNK = Shape<_128,_128,_64>;
using ClusterShape_MNK = Shape<_1,_4,_1>;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAuto,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::half_t, LayoutC, 8,
cutlass::half_t, LayoutC, 8,
cutlass::epilogue::collective::EpilogueScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue,
cutlass::gemm::StreamKScheduler
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0));
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0));
}
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_f16n_f16t_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x64_1x4x1) {
using LayoutA = cutlass::layout::ColumnMajor;
using LayoutB = cutlass::layout::RowMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using TileShape_MNK = Shape<_128,_128,_64>;
using ClusterShape_MNK = Shape<_1,_4,_1>;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAuto,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::half_t, LayoutC, 8,
cutlass::half_t, LayoutC, 8,
cutlass::epilogue::collective::EpilogueScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue,
cutlass::gemm::StreamKScheduler
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0));
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0));
}
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_f16n_f16n_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x64_1x4x1) {
using LayoutA = cutlass::layout::ColumnMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using TileShape_MNK = Shape<_128,_128,_64>;
using ClusterShape_MNK = Shape<_1,_4,_1>;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAuto,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::half_t, LayoutC, 8,
cutlass::half_t, LayoutC, 8,
cutlass::epilogue::collective::EpilogueScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue,
cutlass::gemm::StreamKScheduler
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0));
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0));
}
///////////////////////////////////////////////////////////////////////////////
/////////////////////////////// Cluster 2x4x1 ////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_f16t_f16t_f32n_tensor_op_gmma_f32_cooperative_stream_k, 256x128x64_2x4x1) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::RowMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using TileShape_MNK = Shape<_256,_128,_64>;
using ClusterShape_MNK = Shape<_2,_4,_1>;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAuto,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::half_t, LayoutC, 8,
cutlass::half_t, LayoutC, 8,
cutlass::epilogue::collective::EpilogueScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue,
cutlass::gemm::StreamKScheduler
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0));
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0));
}
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_f16t_f16n_f32n_tensor_op_gmma_f32_cooperative_stream_k, 256x128x64_2x4x1) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using TileShape_MNK = Shape<_256,_128,_64>;
using ClusterShape_MNK = Shape<_2,_4,_1>;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAuto,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::half_t, LayoutC, 8,
cutlass::half_t, LayoutC, 8,
cutlass::epilogue::collective::EpilogueScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue,
cutlass::gemm::StreamKScheduler
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0));
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0));
}
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_f16n_f16t_f32n_tensor_op_gmma_f32_cooperative_stream_k, 256x128x64_2x4x1) {
using LayoutA = cutlass::layout::ColumnMajor;
using LayoutB = cutlass::layout::RowMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using TileShape_MNK = Shape<_256,_128,_64>;
using ClusterShape_MNK = Shape<_2,_4,_1>;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAuto,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::half_t, LayoutC, 8,
cutlass::half_t, LayoutC, 8,
cutlass::epilogue::collective::EpilogueScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue,
cutlass::gemm::StreamKScheduler
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0));
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0));
}
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_f16n_f16n_f32n_tensor_op_gmma_f32_cooperative_stream_k, 256x128x64_2x4x1) {
using LayoutA = cutlass::layout::ColumnMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using TileShape_MNK = Shape<_256,_128,_64>;
using ClusterShape_MNK = Shape<_2,_4,_1>;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAuto,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::half_t, LayoutC, 8,
cutlass::half_t, LayoutC, 8,
cutlass::epilogue::collective::EpilogueScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue,
cutlass::gemm::StreamKScheduler
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0));
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0));
}
TEST(SM90_Device_Gemm_f16t_f16n_f16n_tensor_op_gmma_f32_cooperative_stream_k_epilogue, 256x128x64_2x2x1) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using TileShape_MNK = Shape<_256,_128,_64>;
using ClusterShape_MNK = Shape<_2,_2,_1>;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::half_t, LayoutC, 8,
cutlass::half_t, LayoutC, 8,
cutlass::epilogue::TmaWarpSpecializedCooperative
>::CollectiveOp;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue,
cutlass::gemm::StreamKScheduler
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0));
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0));
}
TEST(SM90_Device_Gemm_f16t_f16n_f16t_tensor_op_gmma_f32_cooperative_stream_k_epilogue, 256x128x64_2x2x1) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
using TileShape_MNK = Shape<_256,_128,_64>;
using ClusterShape_MNK = Shape<_2,_2,_1>;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::half_t, LayoutC, 8,
cutlass::half_t, LayoutC, 8,
cutlass::epilogue::TmaWarpSpecializedCooperative
>::CollectiveOp;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue,
cutlass::gemm::StreamKScheduler
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0));
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0));
}
TEST(SM90_Device_Gemm_f16t_f16n_f32n_tensor_op_gmma_f32_cooperative_stream_k_epilogue, 128x128x64_2x2x1) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using TileShape_MNK = Shape<_128,_128,_64>;
using ClusterShape_MNK = Shape<_2,_2,_1>;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
float, LayoutC, 4,
float, LayoutC, 4,
cutlass::epilogue::TmaWarpSpecializedCooperative
>::CollectiveOp;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue,
cutlass::gemm::StreamKScheduler
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0));
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0));
}
TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_stream_k_epilogue, 128x128x64_2x2x1) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
using TileShape_MNK = Shape<_128,_128,_64>;
using ClusterShape_MNK = Shape<_2,_2,_1>;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
float, LayoutC, 4,
float, LayoutC, 4,
cutlass::epilogue::TmaWarpSpecializedCooperative
>::CollectiveOp;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue,
cutlass::gemm::StreamKScheduler
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0));
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0));
}
TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_stream_k_epilogue, 256x128x64_2x2x1_BiasF32_ReLU) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
using TileShape_MNK = Shape<_256,_128,_64>;
using ClusterShape_MNK = Shape<_2,_2,_1>;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative;
using FusionOperation = cutlass::epilogue::fusion::LinCombPerRowBiasEltActAux<
LayoutC, cutlass::epilogue::thread::ReLu, cutlass::half_t, float, cutlass::half_t, float>;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::half_t, LayoutC, 8,
cutlass::half_t, LayoutC, 8,
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue,
cutlass::gemm::StreamKScheduler
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>(1.0, 0.0));
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>(1.0, 1.0));
}
#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
| test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cooperative_stream_k.cu/0 | {
"file_path": "test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cooperative_stream_k.cu",
"repo_id": "test",
"token_count": 15914
} | 53 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "../../common/cutlass_unit_test.h"
#include "cutlass/core_io.h"
#include "cutlass/numeric_types.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/gemm/kernel/default_gemv.h"
#include "cutlass/gemm/kernel/gemv_batched_strided.h"
namespace test {
namespace gemm {
namespace kernel {
template<typename ThreadBlockShape_,
typename ThreadShape_,
typename ElementAB_,
typename ElementAccumulator_,
typename ElementCD_,
typename LayoutA_,
typename LayoutB_,
typename LayoutCD_,
int THREAD_B = 1, // batch tile size
bool DEBUG=false>
void batched_gemv_kernel_test(cutlass::gemm::BatchedGemmCoord problem_size,
ElementCD_ alpha = ElementCD_(1),
ElementCD_ beta = ElementCD_(0),
bool perf_test = false,
int perf_test_iter = 1)
{
using ThreadBlockShape = ThreadBlockShape_;
using ThreadShape = ThreadShape_;
using ElementA = ElementAB_;
using LayoutA = LayoutA_;
using ElementB = ElementAB_;
using LayoutB = LayoutB_;
using ElementAccumulator = ElementCD_;
using ElementCD = ElementCD_;
using LayoutCD = LayoutCD_;
using GemvKernel = cutlass::gemm::kernel::DefaultGemv<ThreadBlockShape,
ThreadShape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementCD,
LayoutCD,
ElementAccumulator>;
using ThreadBlockGemv = typename GemvKernel::ThreadBlockGemv;
using ThreadBlockSwizzle = typename GemvKernel::ThreadBlockSwizzle;
if (DEBUG)
{
problem_size = cutlass::gemm::BatchedGemmCoord(
problem_size.m(), problem_size.n(), problem_size.k(), 1);
}
// Create host tensors that will be the backing store for the batches
// Note that no device memory is initially allocated
cutlass::HostTensor<ElementA, LayoutA> matrix_A({problem_size.m(), problem_size.k()}, false);
cutlass::HostTensor<ElementB, LayoutB> matrix_B({problem_size.k(), problem_size.n()}, false);
cutlass::HostTensor<ElementCD, LayoutCD> matrix_C_computed({problem_size.m(), problem_size.n()}, false);
cutlass::HostTensor<ElementCD, LayoutCD> matrix_C_reference({problem_size.m(), problem_size.n()}, false);
// Reserve memory for the batch of tensors
matrix_A.reserve(problem_size.m()*problem_size.k()*problem_size.batch());
matrix_B.reserve(problem_size.n()*problem_size.k()*problem_size.batch());
matrix_C_computed.reserve(problem_size.m()*problem_size.n()*problem_size.batch());
matrix_C_reference.reserve(problem_size.m()*problem_size.n()*problem_size.batch(), false);
// Fill eatch tensor batch
const int seed = 9876;
for (int b = 0; b < problem_size.batch(); b++)
{
if(DEBUG)
{
cutlass::reference::host::BlockFillSequential(
matrix_A.host_data_ptr_offset(b*matrix_A.capacity()), matrix_A.capacity());
cutlass::reference::host::BlockFillSequential(
matrix_B.host_data_ptr_offset(b*matrix_B.capacity()), matrix_B.capacity());
}
else
{
cutlass::reference::host::TensorFillRandomUniform(
matrix_A.host_view(b*matrix_A.capacity()),
seed + 1660,
8,
-8,
0
);
cutlass::reference::host::TensorFillRandomUniform(
matrix_B.host_view(b*matrix_B.capacity()),
seed + 1880,
8,
-8,
0
);
}
cutlass::reference::host::TensorFill(matrix_C_computed.host_view(b*matrix_C_computed.capacity()));
cutlass::reference::host::TensorFill(matrix_C_reference.host_view(b*matrix_C_reference.capacity()));
}
matrix_A.sync_device();
matrix_B.sync_device();
matrix_C_computed.sync_device();
ThreadBlockSwizzle swizzle;
cutlass::gemm::BatchedGemmCoord tiled_size{ThreadBlockShape::kM,
ThreadBlockShape::kN,
problem_size.k(), // no split-k
DEBUG ? 1 : THREAD_B };
cutlass::gemm::BatchedGemmCoord tiled_shape = swizzle.get_tiled_shape(problem_size, tiled_size);
#if 0
printf("tiled_size = %d %d %d %d\n", tiled_size.m(), tiled_size.n(), tiled_size.k(), tiled_size.batch());
printf("tiled_shape = %d %d %d %d\n", tiled_shape.m(), tiled_shape.n(), tiled_shape.k(), tiled_shape.batch());
#endif
// No split-k
EXPECT_EQ(tiled_size.k(), problem_size.k());
dim3 grid = swizzle.get_grid_shape(tiled_shape);
dim3 block(tiled_size.n() / ThreadShape::kN, tiled_size.batch(), tiled_size.k() / problem_size.k());
// Some sanity checks
EXPECT_TRUE( block.x*block.y*block.z <= 1024 );
EXPECT_TRUE( block.x <= 1024 );
EXPECT_TRUE( block.y <= 1024 );
EXPECT_TRUE( block.z <= 64 );
#if 0
printf("grid dim = %d, %d, %d\n", grid.x, grid.y, grid.z);
printf("block dim = %d, %d, %d\n", block.x, block.y, block.z);
#endif
cudaError_t result;
cudaEvent_t start_event, end_event;
for (int iter = 0; iter < (perf_test ? (perf_test_iter+1) : 1); ++iter)
{
if (perf_test && iter == 1)
{
result = cudaEventCreate(&start_event);
EXPECT_EQ(result, cudaSuccess);
result = cudaEventCreate(&end_event);
EXPECT_EQ(result, cudaSuccess);
result = cudaEventRecord(start_event);
EXPECT_EQ(result, cudaSuccess);
}
if (beta == ElementCD(0))
{
if (alpha == ElementCD(1))
{
cutlass::gemm::kernel::GemvBatchedStrided<GemvKernel><<< grid, block >>>(
problem_size,
matrix_A.device_ref(),
matrix_A.capacity(),
matrix_B.device_ref(),
matrix_B.capacity(),
matrix_C_computed.device_ref(),
matrix_C_computed.capacity()
);
}
else
{
cutlass::gemm::kernel::GemvBatchedStrided<GemvKernel><<< grid, block >>>(
problem_size,
alpha,
matrix_A.device_ref(),
matrix_A.capacity(),
matrix_B.device_ref(),
matrix_B.capacity(),
matrix_C_computed.device_ref(),
matrix_C_computed.capacity()
);
}
}
else
{
cutlass::gemm::kernel::GemvBatchedStrided<GemvKernel, ElementCD, false><<< grid, block >>>(
problem_size,
alpha,
beta,
matrix_A.device_ref(),
matrix_A.capacity(),
matrix_B.device_ref(),
matrix_B.capacity(),
matrix_C_computed.device_ref(),
matrix_C_computed.capacity(),
matrix_C_computed.device_ref(),
matrix_C_computed.capacity()
);
}
if (iter == 0)
{
result = cudaGetLastError();
EXPECT_EQ(result, cudaSuccess) << " kernel error: " << cudaGetErrorString(result);
}
}
if (perf_test)
{
result = cudaEventRecord(end_event);
EXPECT_EQ(result, cudaSuccess);
}
result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess) << " kernel error: " << cudaGetErrorString(result);
if (perf_test)
{
float ms;
result = cudaEventElapsedTime(&ms, start_event, end_event);
EXPECT_EQ(result, cudaSuccess);
double flops = (double(problem_size.m()) *
double(problem_size.n()) *
double(problem_size.k()) *
double(problem_size.batch()) * 2); // 2 for MAC
double read_bytes = double(problem_size.batch()) * (sizeof(ElementA)*double(problem_size.m())*double(problem_size.k()) +
sizeof(ElementB)*double(problem_size.k())*double(problem_size.n()));
double write_bytes = double(problem_size.batch()) * (sizeof(ElementCD)*double(problem_size.m())*double(problem_size.n()));
double avg_runtime = double(ms) / perf_test_iter;
double gflops_per_sec = flops / 1.0e6 / avg_runtime;
double read_bandwidth = read_bytes / 1.0e6 / avg_runtime;
double write_bandwidth = write_bytes / 1.0e6 / avg_runtime;
std::cout << "\n\nProblem size: "
<< problem_size.m()
<< " x " << problem_size.n()
<< " x " << problem_size.k()
<< " x " << problem_size.batch()
<< std::endl;
std::cout << " GFLOPs: " << gflops_per_sec << std::endl;
std::cout << "BW (R/W): " << read_bandwidth << " / " << write_bandwidth << " GB/sec" << std::endl;
std::cout << " Runtime: " << avg_runtime << " ms" << std::endl;
}
else
{
matrix_C_computed.sync_host();
// Compute the batched gemms
for (int b = 0; b < problem_size.batch(); b++)
{
cutlass::reference::host::Gemm<ElementA, LayoutA, ElementB, LayoutB,
ElementCD, LayoutCD, ElementCD,
ElementCD>
reference_gemm;
reference_gemm(
problem_size.mnk(), alpha,
matrix_A.host_ref(b * matrix_A.capacity()),
matrix_B.host_ref(b * matrix_B.capacity()), beta,
matrix_C_reference.host_ref(b * matrix_C_computed.capacity()));
bool passed = cutlass::reference::host::TensorEquals(
matrix_C_computed.host_view(b * matrix_C_computed.capacity()),
matrix_C_reference.host_view(b * matrix_C_reference.capacity()));
EXPECT_TRUE(passed)
//<< "A:\n" << matrix_A.host_view() << "\n"
//<< "B:\n" << matrix_B.host_view() << "\n"
<< "Batch: " << b << "\n"
<< "Reference:\n"
<< matrix_C_reference.host_view(b * matrix_C_reference.capacity())
<< "\n"
<< "Computed:\n"
<< matrix_C_computed.host_view(b * matrix_C_computed.capacity())
<< "\n";
}
}
}
template<typename ThreadBlockShape_,
typename ThreadShape_,
typename ElementAB_,
typename ElementAccumulator_,
typename ElementCD_,
typename LayoutA_,
typename LayoutB_,
typename LayoutCD_,
int THREAD_B = 1, // batch tile size
bool DEBUG=false>
void batched_gemv_kernel_perf_test(cutlass::gemm::BatchedGemmCoord problem_size,
ElementCD_ alpha = ElementCD_(1),
ElementCD_ beta = ElementCD_(0),
int iter = 50)
{
batched_gemv_kernel_test<ThreadBlockShape_,
ThreadShape_,
ElementAB_,
ElementAccumulator_,
ElementCD_,
LayoutA_,
LayoutB_,
LayoutCD_,
THREAD_B,
DEBUG>(problem_size, alpha, beta, true, iter);
}
} // namespace threadblock
} // namespace kernel
} // namespace test
| test/unit/gemm/kernel/testbed_gemv.h/0 | {
"file_path": "test/unit/gemm/kernel/testbed_gemv.h",
"repo_id": "test",
"token_count": 7083
} | 54 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for thread-level GEMM
*/
#include "cutlass/cutlass.h"
#include "../../common/cutlass_unit_test.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/half.h"
#include "cutlass/gemm/warp/default_mma_complex_tensor_op.h"
#include "cutlass/core_io.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/gemm.h"
#include "testbed.h"
#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED)
////////////////////////////////////////////////////////////////////////////////////////////////////
// complex<double> * complex<double> => complex<double>
// Input data type: complex<double>
// Math instruction: mma.sync.aligned.m8n8k4.f64.f64.f64.f64
// Output data type: complex<double>
///////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_complex_tensor_op_f64, 8x8x4_8x8x4_nt) {
using Shape = cutlass::gemm::GemmShape<8, 8, 4>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Element = cutlass::complex<double>;
using ElementC = cutlass::complex<double>;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor
>::Type;
test::gemm::warp::TestbedComplex<MmaTensorOp, cutlass::gemm::GemmShape<8, 8, 4> >().run();
}
TEST(SM80_warp_gemm_complex_tensor_op_f64, 16x16x4_8x8x4_nt) {
using Shape = cutlass::gemm::GemmShape<16, 16, 4>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Element = cutlass::complex<double>;
using ElementC = cutlass::complex<double>;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor
>::Type;
test::gemm::warp::TestbedComplex<MmaTensorOp, cutlass::gemm::GemmShape<16, 16, 4> >().run();
}
TEST(SM80_warp_gemm_complex_tensor_op_f64, 16x32x4_8x8x4_nt) {
using Shape = cutlass::gemm::GemmShape<16, 32, 4>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Element = cutlass::complex<double>;
using ElementC = cutlass::complex<double>;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor
>::Type;
test::gemm::warp::TestbedComplex<MmaTensorOp, cutlass::gemm::GemmShape<16, 32, 4> >().run();
}
TEST(SM80_warp_gemm_complex_tensor_op_f64, 32x16x4_8x8x4_nt) {
using Shape = cutlass::gemm::GemmShape<32, 16, 4>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Element = cutlass::complex<double>;
using ElementC = cutlass::complex<double>;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor
>::Type;
test::gemm::warp::TestbedComplex<MmaTensorOp, cutlass::gemm::GemmShape<32, 16, 4> >().run();
}
TEST(SM80_warp_gemm_complex_tensor_op_f64, 32x32x4_8x8x4_nt) {
using Shape = cutlass::gemm::GemmShape<32, 32, 4>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Element = cutlass::complex<double>;
using ElementC = cutlass::complex<double>;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor
>::Type;
test::gemm::warp::TestbedComplex<MmaTensorOp, cutlass::gemm::GemmShape<32, 32, 4> >().run();
}
TEST(SM80_warp_gemm_complex_tensor_op_f64, 32x32x4_8x8x4_nh) {
using Shape = cutlass::gemm::GemmShape<32, 32, 4>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Element = cutlass::complex<double>;
using ElementC = cutlass::complex<double>;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
cutlass::ComplexTransform::kNone,
cutlass::ComplexTransform::kConjugate
>::Type;
test::gemm::warp::TestbedComplex<MmaTensorOp, cutlass::gemm::GemmShape<32, 32, 4> >().run();
}
TEST(SM80_warp_gemm_complex_tensor_op_f64, 32x32x4_8x8x4_ct) {
using Shape = cutlass::gemm::GemmShape<32, 32, 4>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Element = cutlass::complex<double>;
using ElementC = cutlass::complex<double>;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
cutlass::ComplexTransform::kConjugate,
cutlass::ComplexTransform::kNone
>::Type;
test::gemm::warp::TestbedComplex<MmaTensorOp, cutlass::gemm::GemmShape<32, 32, 4> >().run();
}
TEST(SM80_warp_gemm_complex_tensor_op_f64, 8x8x4_8x8x4_tn) {
using Shape = cutlass::gemm::GemmShape<8, 8, 4>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Element = cutlass::complex<double>;
using ElementC = cutlass::complex<double>;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise128x4;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise128x4;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor
>::Type;
test::gemm::warp::TestbedComplex<MmaTensorOp, cutlass::gemm::GemmShape<8, 8, 4> >().run();
}
TEST(SM80_warp_gemm_complex_tensor_op_f64, 16x16x4_8x8x4_tn) {
using Shape = cutlass::gemm::GemmShape<16, 16, 4>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Element = cutlass::complex<double>;
using ElementC = cutlass::complex<double>;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise128x4;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise128x4;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor
>::Type;
test::gemm::warp::TestbedComplex<MmaTensorOp, cutlass::gemm::GemmShape<16, 16, 4> >().run();
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
// complex<float> * complex<float> => complex<float>
// Input data type: complex<float>
// Math instruction: mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32
// Output data type: complex<float>
// Shared memory layout: Congrous
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_complex_tensor_op_f32, 16x16x8_16x8x8_nt) {
using Shape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::complex<float>;
using ElementC = cutlass::complex<float>;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor
>::Type;
test::gemm::warp::TransformedTestbedComplex<
MmaTensorOp, cutlass::gemm::GemmShape<16, 16, 8> >()
.run();
}
TEST(SM80_warp_gemm_complex_tensor_op_f32, 16x16x16_16x8x8_nt) {
using Shape = cutlass::gemm::GemmShape<16, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::complex<float>;
using ElementC = cutlass::complex<float>;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor
>::Type;
test::gemm::warp::TransformedTestbedComplex<
MmaTensorOp, cutlass::gemm::GemmShape<16, 16, 16> >()
.run();
}
TEST(SM80_warp_gemm_complex_tensor_op_f32, 16x32x8_16x8x8_nt) {
using Shape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::complex<float>;
using ElementC = cutlass::complex<float>;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor
>::Type;
test::gemm::warp::TransformedTestbedComplex<
MmaTensorOp, cutlass::gemm::GemmShape<16, 32, 8> >()
.run();
}
TEST(SM80_warp_gemm_complex_tensor_op_f32, 32x16x8_16x16x8_nt) {
using Shape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::complex<float>;
using ElementC = cutlass::complex<float>;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor
>::Type;
test::gemm::warp::TransformedTestbedComplex<
MmaTensorOp, cutlass::gemm::GemmShape<32, 16, 8> >()
.run();
}
TEST(SM80_warp_gemm_complex_tensor_op_f32, 32x32x8_16x8x8_nt) {
using Shape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::complex<float>;
using ElementC = cutlass::complex<float>;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor
>::Type;
test::gemm::warp::TransformedTestbedComplex<
MmaTensorOp, cutlass::gemm::GemmShape<32, 32, 8> >()
.run();
}
TEST(SM80_warp_gemm_complex_tensor_op_f32, 32x32x8_16x8x8_nh) {
using Shape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::complex<float>;
using ElementC = cutlass::complex<float>;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
cutlass::ComplexTransform::kNone,
cutlass::ComplexTransform::kConjugate
>::Type;
test::gemm::warp::TransformedTestbedComplex<
MmaTensorOp, cutlass::gemm::GemmShape<32, 32, 8> >()
.run();
}
TEST(SM80_warp_gemm_complex_tensor_op_f32, 32x32x8_16x8x8_ct) {
using Shape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::complex<float>;
using ElementC = cutlass::complex<float>;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
cutlass::ComplexTransform::kConjugate,
cutlass::ComplexTransform::kNone
>::Type;
test::gemm::warp::TransformedTestbedComplex<
MmaTensorOp, cutlass::gemm::GemmShape<32, 32, 8> >()
.run();
}
///////////////////////////////////////////////////////////////////////////////////////////////////
// complex<float> * complex<float> => complex<float>
// Input data type: complex<float>
// Math instruction: mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32
// Output data type: complex<float>
// Shared memory layout: Crosswise
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_complex_tensor_op_f32, 16x16x8_16x8x8_tn) {
using Shape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::complex<float>;
using ElementC = cutlass::complex<float>;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicand64bCrosswise;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicand64bCrosswise;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor
>::Type;
test::gemm::warp::TransformedTestbedComplex<
MmaTensorOp, cutlass::gemm::GemmShape<16, 16, 8> >()
.run();
}
// TEST FAILS crosswise complex<float> TN mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32 test fails for k = 2*8 = 16
TEST(SM80_warp_gemm_complex_tensor_op_f32, 16x16x16_16x8x8_tn) {
using Shape = cutlass::gemm::GemmShape<16, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::complex<float>;
using ElementC = cutlass::complex<float>;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicand64bCrosswise;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicand64bCrosswise;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor
>::Type;
test::gemm::warp::TransformedTestbedComplex<
MmaTensorOp, cutlass::gemm::GemmShape<16, 16, 16> >()
.run();
}
TEST(SM80_warp_gemm_complex_tensor_op_f32, 32x32x8_16x8x8_tn) {
using Shape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::complex<float>;
using ElementC = cutlass::complex<float>;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicand64bCrosswise;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicand64bCrosswise;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor
>::Type;
test::gemm::warp::TransformedTestbedComplex<
MmaTensorOp, cutlass::gemm::GemmShape<32, 32, 8> >()
.run();
}
TEST(SM80_warp_gemm_complex_tensor_op_f32, 32x64x8_16x8x8_tn) {
using Shape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::complex<float>;
using ElementC = cutlass::complex<float>;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicand64bCrosswise;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicand64bCrosswise;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor
>::Type;
test::gemm::warp::TransformedTestbedComplex<
MmaTensorOp, cutlass::gemm::GemmShape<32, 64, 8> >()
.run();
}
TEST(SM80_warp_gemm_complex_tensor_op_f32, 64x32x8_16x8x8_tn) {
using Shape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::complex<float>;
using ElementC = cutlass::complex<float>;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicand64bCrosswise;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicand64bCrosswise;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor
>::Type;
test::gemm::warp::TransformedTestbedComplex<
MmaTensorOp, cutlass::gemm::GemmShape<64, 32, 8> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_complex_tensor_op_f64, 32x32x8_8x8x4_tn) {
using Shape = cutlass::gemm::GemmShape<32, 32, 4>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Element = cutlass::complex<double>;
using ElementC = cutlass::complex<double>;
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor
>::Type;
test::gemm::warp::TransformedTestbedComplex<
MmaTensorOp, cutlass::gemm::GemmShape<32, 32, 8> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_complex_tensor_op_f64, 32x32x8_8x8x4_nt) {
using Shape = cutlass::gemm::GemmShape<32, 32, 4>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Element = cutlass::complex<double>;
using ElementC = cutlass::complex<double>;
using LayoutA = cutlass::layout::ColumnMajor;
using LayoutB = cutlass::layout::RowMajor;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor
>::Type;
test::gemm::warp::TransformedTestbedComplex<
MmaTensorOp, cutlass::gemm::GemmShape<32, 32, 8> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////////////////////
#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED)
| test/unit/gemm/warp/gemm_complex_sm80.cu/0 | {
"file_path": "test/unit/gemm/warp/gemm_complex_sm80.cu",
"repo_id": "test",
"token_count": 8445
} | 55 |