text
stringlengths 27
947k
| id
stringlengths 10
118
| metadata
dict | __index_level_0__
int64 0
80
|
---|---|---|---|
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cstdint>
#else
#include <cstdint>
#include <cmath>
#include <type_traits>
#endif
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/uint128.h"
#include "cutlass/coord.h"
#include "cutlass/half.h"
/**
* \file
* \brief Math utilities
*/
namespace cutlass {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
CUTLASS_HOST_DEVICE void swap(T &lhs, T &rhs) {
T tmp = lhs;
lhs = rhs;
rhs = tmp;
}
/******************************************************************************
* Static math utilities
******************************************************************************/
/// Mixed precision dot product
template <typename Index, typename LongIndex, int N>
CUTLASS_HOST_DEVICE LongIndex dot(
Coord<N, Index> const &coord,
Coord<N, LongIndex> const &stride,
LongIndex acc = LongIndex()) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < N; ++n) {
acc += LongIndex(coord[n]) * stride[n];
}
return acc;
}
/**
* Statically determine if N is a power-of-two
*/
template <int N>
struct is_pow2 {
static bool const value = ((N & (N - 1)) == 0);
};
/**
* Statically determine log2(N), rounded down
*/
template <int N, int CurrentVal = N, int Count = 0>
struct log2_down {
/// Static logarithm value
enum { value = log2_down<N, (CurrentVal >> 1), Count + 1>::value };
};
// Base case
template <int N, int Count>
struct log2_down<N, 1, Count> {
enum { value = Count };
};
/**
* Statically determine log2(N), rounded up
*/
template <int N, int CurrentVal = N, int Count = 0>
struct log2_up {
/// Static logarithm value
enum { value = log2_up<N, (CurrentVal >> 1), Count + 1>::value };
};
// Base case
template <int N, int Count>
struct log2_up<N, 1, Count> {
enum { value = ((1 << Count) < N) ? Count + 1 : Count };
};
/**
* Statically estimate sqrt(N) to the nearest power-of-two
*/
template <int N>
struct sqrt_est {
enum { value = 1 << (log2_up<N>::value / 2) };
};
/**
* For performing a constant-division with a compile-time assertion that the
* Divisor evenly-divides the Dividend.
*/
template <int Dividend, int Divisor>
struct divide_assert {
enum { value = Dividend / Divisor };
static_assert((Dividend % Divisor == 0), "Not an even multiple");
};
/******************************************************************************
* Rounding
******************************************************************************/
/**
* Round dividend up to the nearest multiple of divisor
*/
template <typename dividend_t, typename divisor_t>
CUTLASS_HOST_DEVICE
CUTLASS_CONSTEXPR_IF_CXX17
dividend_t round_nearest(dividend_t dividend, divisor_t divisor) {
return ((dividend + divisor - 1) / divisor) * divisor;
}
template <typename value_t>
CUTLASS_HOST_DEVICE
CUTLASS_CONSTEXPR_IF_CXX17
value_t abs_for_integer(value_t a) {
return ((a > 0) ? a : -a);
}
/**
* Greatest common divisor
*/
template <typename value_t>
CUTLASS_HOST_DEVICE
CUTLASS_CONSTEXPR_IF_CXX17
value_t gcd(value_t a, value_t b) {
for (;;) {
if (a == 0) return cutlass::abs_for_integer(b);
b %= a;
if (b == 0) return cutlass::abs_for_integer(a);
a %= b;
}
}
/**
* Least common multiple
*/
template <typename value_t>
CUTLASS_HOST_DEVICE
CUTLASS_CONSTEXPR_IF_CXX17
value_t lcm(value_t a, value_t b) {
value_t temp = cutlass::gcd(a, b);
return (temp != 0) ? value_t(cutlass::abs_for_integer(a) / temp * cutlass::abs_for_integer(b)) : value_t{};
}
/**
* Greatest common divisor
*/
template <typename value_t>
CUTLASS_HOST_DEVICE
CUTLASS_CONSTEXPR_IF_CXX17
value_t gcd_cxx11(value_t a, value_t b) {
return (a == 0 || b == 0) ? cutlass::abs_for_integer(a | b) : cutlass::gcd_cxx11(b, a % b);
}
/**
* Least common multiple
*/
template <typename value_t>
CUTLASS_HOST_DEVICE
CUTLASS_CONSTEXPR_IF_CXX17
value_t lcm_cxx11(value_t a, value_t b) {
return cutlass::gcd_cxx11(a, b) ? (cutlass::abs_for_integer(a) / cutlass::gcd_cxx11(a, b) *
cutlass::abs_for_integer(b))
: value_t{};
}
/// Returns the smallest value in the half-open range [a, a+b) that is a multiple of b
CUTLASS_HOST_DEVICE
CUTLASS_CONSTEXPR_IF_CXX17
int round_up(int a, int b) {
return ((a + b - 1) / b) * b;
}
/// Returns the ceiling of (a / b)
CUTLASS_HOST_DEVICE
CUTLASS_CONSTEXPR_IF_CXX17
int ceil_div(int a, int b) {
return (a + b - 1) / b;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/**
* log2 computation, what's the
* difference between the below codes and
* log2_up/down codes?
*/
template <typename value_t>
CUTLASS_HOST_DEVICE
CUTLASS_CONSTEXPR_IF_CXX17
value_t clz(value_t x) {
for (int i = 31; i >= 0; --i) {
if ((1 << i) & x)
return value_t(31 - i);
}
return value_t(32);
}
template <typename value_t>
CUTLASS_HOST_DEVICE
CUTLASS_CONSTEXPR_IF_CXX17
value_t find_log2(value_t x) {
int a = int(31 - clz(x));
a += (x & (x - 1)) != 0; // Round up, add 1 if not a power of 2.
return a;
}
/**
* Find divisor, using find_log2
*/
CUTLASS_HOST_DEVICE
CUTLASS_CONSTEXPR_IF_CXX17
void find_divisor(unsigned int& mul, unsigned int& shr, unsigned int denom) {
if (denom == 1) {
mul = 0;
shr = 0;
} else {
unsigned int p = 31 + find_log2(denom);
unsigned m = unsigned(((1ull << p) + unsigned(denom) - 1) / unsigned(denom));
mul = m;
shr = p - 32;
}
}
/**
* Find quotient and remainder using device-side intrinsics
*/
CUTLASS_HOST_DEVICE
CUTLASS_CONSTEXPR_IF_CXX17
void fast_divmod(int& quo, int& rem, int src, int div, unsigned int mul, unsigned int shr) {
#if defined(__CUDA_ARCH__)
// Use IMUL.HI if div != 1, else simply copy the source.
quo = (div != 1) ? __umulhi(src, mul) >> shr : src;
#else
quo = int((div != 1) ? int(((int64_t)src * mul) >> 32) >> shr : src);
#endif
// The remainder.
rem = src - (quo * div);
}
// For long int input
CUTLASS_HOST_DEVICE
CUTLASS_CONSTEXPR_IF_CXX17
void fast_divmod(int& quo, int64_t& rem, int64_t src, int div, unsigned int mul, unsigned int shr) {
#if defined(__CUDA_ARCH__)
// Use IMUL.HI if div != 1, else simply copy the source.
quo = (div != 1) ? __umulhi(src, mul) >> shr : src;
#else
quo = int((div != 1) ? ((src * mul) >> 32) >> shr : src);
#endif
// The remainder.
rem = src - (quo * div);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Object to encapsulate the fast division+modulus operation.
///
/// This object precomputes two values used to accelerate the computation and is best used
/// when the divisor is a grid-invariant. In this case, it may be computed in host code and
/// marshalled along other kernel arguments using the 'Params' pattern.
///
/// Example:
///
///
/// int quotient, remainder, dividend, divisor;
///
/// FastDivmod divmod(divisor);
///
/// divmod(quotient, remainder, dividend);
///
/// // quotient = (dividend / divisor)
/// // remainder = (dividend % divisor)
///
struct FastDivmod {
int divisor;
unsigned int multiplier;
unsigned int shift_right;
/// Find quotient and remainder using device-side intrinsics
CUTLASS_HOST_DEVICE
void fast_divmod(int& quotient, int& remainder, int dividend) const {
#if defined(__CUDA_ARCH__)
// Use IMUL.HI if divisor != 1, else simply copy the source.
quotient = (divisor != 1) ? __umulhi(dividend, multiplier) >> shift_right : dividend;
#else
quotient = int((divisor != 1) ? int(((int64_t)dividend * multiplier) >> 32) >> shift_right : dividend);
#endif
// The remainder.
remainder = dividend - (quotient * divisor);
}
/// For long int input
CUTLASS_HOST_DEVICE
void fast_divmod(int& quotient, int64_t& remainder, int64_t dividend) const {
#if defined(__CUDA_ARCH__)
// Use IMUL.HI if divisor != 1, else simply copy the source.
quotient = (divisor != 1) ? __umulhi(dividend, multiplier) >> shift_right : dividend;
#else
quotient = int((divisor != 1) ? ((dividend * multiplier) >> 32) >> shift_right : dividend);
#endif
// The remainder.
remainder = dividend - (quotient * divisor);
}
/// Construct the FastDivmod object, in host code ideally.
///
/// This precomputes some values based on the divisor and is computationally expensive.
CUTLASS_HOST_DEVICE
FastDivmod(): divisor(0), multiplier(0), shift_right(0) { }
CUTLASS_HOST_DEVICE
FastDivmod(int divisor): divisor(divisor) {
if (divisor != 1) {
unsigned int p = 31 + find_log2(divisor);
unsigned m = unsigned(((1ull << p) + unsigned(divisor) - 1) / unsigned(divisor));
multiplier = m;
shift_right = p - 32;
} else {
multiplier = 0;
shift_right = 0;
}
}
/// Computes integer division and modulus using precomputed values. This is computationally
/// inexpensive.
CUTLASS_HOST_DEVICE
void operator()(int "ient, int &remainder, int dividend) const {
fast_divmod(quotient, remainder, dividend);
}
/// Computes integer division using precomputed values. This is computationally
/// inexpensive.
CUTLASS_HOST_DEVICE
int div(int dividend) const {
int quotient, remainder;
fast_divmod(quotient, remainder, dividend);
return quotient;
}
/// Alias for `div` to match the interface of FastDivmodU64
CUTLASS_HOST_DEVICE
int divide(int dividend) const {
return div(dividend);
}
/// Computes integer division and modulus using precomputed values. This is computationally
/// inexpensive.
///
/// Simply returns the quotient
CUTLASS_HOST_DEVICE
int divmod(int &remainder, int dividend) const {
int quotient;
fast_divmod(quotient, remainder, dividend);
return quotient;
}
/// Computes integer division and modulus using precomputed values. This is computationally
/// inexpensive.
CUTLASS_HOST_DEVICE
void operator()(int "ient, int64_t &remainder, int64_t dividend) const {
fast_divmod(quotient, remainder, dividend);
}
/// Computes integer division and modulus using precomputed values. This is computationally
/// inexpensive.
CUTLASS_HOST_DEVICE
int divmod(int64_t &remainder, int64_t dividend) const {
int quotient;
fast_divmod(quotient, remainder, dividend);
return quotient;
}
/// Returns the divisor when cast to integer
CUTLASS_HOST_DEVICE
operator int() const { return divisor; }
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Object to encapsulate the fast division+modulus operation for 64b integer division.
///
/// This object precomputes two values used to accelerate the computation and is best used
/// when the divisor is a grid-invariant. In this case, it may be computed in host code and
/// marshalled along other kernel arguments using the 'Params' pattern.
///
/// Example:
///
///
/// uint64_t quotient, remainder, dividend, divisor;
///
/// FastDivmodU64 divmod(divisor);
///
/// divmod(quotient, remainder, dividend);
///
/// // quotient = (dividend / divisor)
/// // remainder = (dividend % divisor)
///
struct FastDivmodU64 {
uint64_t divisor;
uint64_t multiplier;
unsigned int shift_right;
unsigned int round_up;
//
// Static methods
//
/// Computes b, where 2^b is the greatest power of two that is less than or equal to x
CUTLASS_HOST_DEVICE
static uint32_t integer_log2(uint64_t x) {
uint32_t n = 0;
while (x >>= 1) {
++n;
}
return n;
}
/// Default ctor
CUTLASS_HOST_DEVICE
FastDivmodU64(): divisor(0), multiplier(0), shift_right(0), round_up(0) { }
/// Construct the FastDivmod object, in host code ideally.
///
/// This precomputes some values based on the divisor and is computationally expensive.
CUTLASS_HOST_DEVICE
FastDivmodU64(uint64_t divisor_): divisor(divisor_), multiplier(1), shift_right(0), round_up(0) {
if (divisor) {
shift_right = integer_log2(divisor);
if ((divisor & (divisor - 1)) == 0) {
multiplier = 0;
}
else {
uint64_t power_of_two = (uint64_t(1) << shift_right);
uint64_t multiplier_lo = uint128_t(0, power_of_two) / divisor;
multiplier = uint128_t(power_of_two, power_of_two) / divisor;
round_up = (multiplier_lo == multiplier ? 1 : 0);
}
}
}
/// Returns the quotient of floor(dividend / divisor)
CUTLASS_HOST_DEVICE
uint64_t divide(uint64_t dividend) const {
uint64_t quotient = 0;
#ifdef __CUDA_ARCH__
uint64_t x = dividend;
if (multiplier) {
x = __umul64hi(dividend + round_up, multiplier);
}
quotient = (x >> shift_right);
#else
quotient = dividend / divisor;
#endif
return quotient;
}
/// Computes the remainder given a computed quotient and dividend
CUTLASS_HOST_DEVICE
uint64_t modulus(uint64_t quotient, uint64_t dividend) const {
return uint32_t(dividend - quotient * divisor);
}
/// Returns the quotient of floor(dividend / divisor) and computes the remainder
CUTLASS_HOST_DEVICE
uint64_t divmod(uint64_t &remainder, uint64_t dividend) const {
uint64_t quotient = divide(dividend);
remainder = modulus(quotient, dividend);
return quotient;
}
/// Computes integer division and modulus using precomputed values. This is computationally
/// inexpensive.
CUTLASS_HOST_DEVICE
void operator()(uint64_t "ient, uint64_t &remainder, uint64_t dividend) const {
quotient = divmod(remainder, dividend);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Object to encapsulate the fast division+modulus operation for 64b integer division
/// in which the divisor is a power of two.
struct FastDivmodU64Pow2 {
uint64_t divisor;
unsigned int shift_right;
/// Default ctor
CUTLASS_HOST_DEVICE
FastDivmodU64Pow2(): divisor(0), shift_right(0) { }
/// Construct the FastDivmod object, in host code ideally.
///
/// This precomputes some values based on the divisor and is computationally expensive.
CUTLASS_HOST_DEVICE
FastDivmodU64Pow2(uint64_t divisor_): divisor(divisor_), shift_right(FastDivmodU64::integer_log2(divisor_)) { }
/// Returns the quotient of floor(dividend / divisor)
CUTLASS_HOST_DEVICE
uint64_t divide(uint64_t dividend) const {
return dividend >> shift_right;
}
/// Computes the remainder given a computed quotient and dividend
CUTLASS_HOST_DEVICE
uint64_t modulus(uint64_t dividend) const {
// See https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#division-modulo-operations
return dividend & (divisor - 1);
}
/// Returns the quotient of floor(dividend / divisor) and computes the remainder
CUTLASS_HOST_DEVICE
uint64_t divmod(uint64_t &remainder, uint64_t dividend) const {
uint64_t quotient = divide(dividend);
remainder = modulus(dividend);
return quotient;
}
/// Computes integer division and modulus using precomputed values. This is computationally
/// inexpensive.
CUTLASS_HOST_DEVICE
void operator()(uint64_t "ient, uint64_t &remainder, uint64_t dividend) const {
quotient = divmod(remainder, dividend);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes the coordinate decomposition from a linear index (64-bit linear index => coord<int32_t>)
///
/// This decomposition is accelerated by the FastDivmodU64 object. It is assumed that
/// a coordinate of <Rank> indices can be decomposed by <Rank - 1> div/mod operations.
/// Note, is assumed that element divmod[0] divides by extent[1].
///
/// For example, assume 4-D coordinate (n, p, q, c) is mapped to a linear index `npqc`. This
/// can be decomposed via three divide and modulus operations:
///
/// c = npqc % C; | divmod[2] = FastDivmodU64(C)
/// npq = npqc / C; | coord[3] = c
///
/// q = npq % Q; | divmod[1] = FastDivmodU64(Q)
/// np = npq / Q; | coord[2] = q
///
/// p = np % P; | divmod[0] = FastDivmodU64(P)
/// n = np / P; | coord[1] = p
///
/// | coord[0] = n
///
template <int Rank>
CUTLASS_HOST_DEVICE Coord<Rank> CoordinateDecomposition(
uint64_t linear_idx, ///< Linear index to decompose
FastDivmodU64 const *divmod) { ///< Pointer to array of Rank-1 FastDivmodU64 objects
static_assert(Rank > 0, "CoordinateDecomposition requires Rank=1 or greater.");
Coord<Rank> coord;
CUTLASS_PRAGMA_UNROLL
for (int i = Rank; i > 1; --i) {
uint64_t remainder;
linear_idx = divmod[i - 2].divmod(remainder, linear_idx);
coord[i - 1] = int(remainder);
}
coord[0] = int(linear_idx);
return coord;
}
/// Computes the coordinate decomposition from a linear index (32-bit linear index => coord<int32_t>)
template <int Rank>
CUTLASS_HOST_DEVICE Coord<Rank> CoordinateDecomposition(
int linear_idx, ///< Linear index to decompose
FastDivmod const *divmod) { ///< Pointer to array of Rank-1 FastDivmodU64 objects
static_assert(Rank > 0, "CoordinateDecomposition requires Rank=1 or greater.");
Coord<Rank> coord;
CUTLASS_PRAGMA_UNROLL
for (int i = Rank; i > 1; --i) {
int remainder;
linear_idx = divmod[i - 2].divmod(remainder, linear_idx);
coord[i - 1] = int(remainder);
}
coord[0] = int(linear_idx);
return coord;
}
template <int Rank>
CUTLASS_HOST_DEVICE Coord<Rank> CoordinateDecompositionLittleEndian(
uint64_t linear_idx, ///< Linear index to decompose
FastDivmodU64 const *divmod) { ///< Pointer to array of Rank-1 FastDivmodU64 objects
static_assert(Rank > 0, "CoordinateDecomposition requires Rank=1 or greater.");
Coord<Rank> coord;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Rank - 1; ++i) {
uint64_t remainder;
linear_idx = divmod[i].divmod(remainder, linear_idx);
coord[i] = int(remainder);
}
coord[Rank - 1] = int(linear_idx);
return coord;
}
/// Computes the coordinate decomposition from a linear index (32-bit linear index => coord<int32_t>)
template <int Rank>
CUTLASS_HOST_DEVICE Coord<Rank> CoordinateDecompositionLittleEndian(
int linear_idx, ///< Linear index to decompose
FastDivmod const *divmod) { ///< Pointer to array of Rank-1 FastDivmodU64 objects
static_assert(Rank > 0, "CoordinateDecomposition requires Rank=1 or greater.");
Coord<Rank> coord;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Rank - 1; ++i) {
int remainder;
linear_idx = divmod[i].divmod(remainder, linear_idx);
coord[i] = int(remainder);
}
coord[Rank - 1] = int(linear_idx);
return coord;
}
/// Safely computes the offset of a linear index in bytes for all types
template <typename Element>
CUTLASS_HOST_DEVICE int64_t OffsetBytes(int64_t index) {
static_assert(
(sizeof_bits<Element>::value >= 8 && !(sizeof_bits<Element>::value % 8)) ||
(sizeof_bits<Element>::value < 8 && !(8 % sizeof_bits<Element>::value)),
"Size of numeric type in bits must either be divisible by 8 bits, or 8 bits must be divisible by the size.");
if (sizeof_bits<Element>::value >= 8) {
return index * (sizeof_bits<Element>::value / 8);
}
else {
int const kElementsPerByte = ((8 / sizeof_bits<Element>::value) + ((sizeof_bits<Element>::value >= 8) ? 1 : 0));
return index / kElementsPerByte;
}
}
CUTLASS_HOST_DEVICE int64_t OffsetBytes(int64_t index, int64_t element_sizeof_bits) {
if (element_sizeof_bits >= 8) {
return index * (element_sizeof_bits / 8);
}
else {
int64_t const kElementsPerByte = ((8 / element_sizeof_bits) + ((element_sizeof_bits >= 8) ? 1 : 0));
return index / kElementsPerByte;
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
// Min/Max
/////////////////////////////////////////////////////////////////////////////////////////////////
template <int A, int B>
struct Min {
static int const kValue = (A < B) ? A : B;
};
template <int A, int B>
struct Max {
static int const kValue = (A > B) ? A : B;
};
CUTLASS_HOST_DEVICE
CUTLASS_CONSTEXPR_IF_CXX17 int const_min(int a, int b) {
return (b < a ? b : a);
}
CUTLASS_HOST_DEVICE
CUTLASS_CONSTEXPR_IF_CXX17 int const_max(int a, int b) {
return (b > a ? b : a);
}
template <typename T>
CUTLASS_HOST_DEVICE
T fast_min(T a, T b) {
return (b < a ? b : a);
}
template <>
CUTLASS_HOST_DEVICE
float fast_min(float a, float b) {
return fminf(a, b);
}
template <typename T>
CUTLASS_HOST_DEVICE
T fast_max(T a, T b) {
return (a < b ? b : a);
}
template <>
CUTLASS_HOST_DEVICE
float fast_max(float a, float b) {
return fmaxf(a, b);
}
CUTLASS_HOST_DEVICE
float fast_cos(float theta) {
#if defined(__CUDA_ARCH__)
return ::cosf(theta);
#else
return std::cos(theta);
#endif
}
CUTLASS_HOST_DEVICE
double fast_cos(double theta) {
#if defined(__CUDA_ARCH__)
return ::cos(theta);
#else
return std::cos(theta);
#endif
}
CUTLASS_HOST_DEVICE
float fast_sin(float theta) {
#if defined(__CUDA_ARCH__)
return ::sinf(theta);
#else
return std::sin(theta);
#endif
}
CUTLASS_HOST_DEVICE
double fast_sin(double theta) {
#if defined(__CUDA_ARCH__)
return ::sin(theta);
#else
return std::sin(theta);
#endif
}
CUTLASS_HOST_DEVICE
float fast_acos(float theta) {
#if defined(__CUDA_ARCH__)
return ::acosf(theta);
#else
return std::acos(theta);
#endif
}
CUTLASS_HOST_DEVICE
double fast_acos(double theta) {
#if defined(__CUDA_ARCH__)
return ::acos(theta);
#else
return std::acos(theta);
#endif
}
CUTLASS_HOST_DEVICE
float fast_asin(float theta) {
#if defined(__CUDA_ARCH__)
return ::asinf(theta);
#else
return std::asin(theta);
#endif
}
CUTLASS_HOST_DEVICE
double fast_asin(double theta) {
#if defined(__CUDA_ARCH__)
return ::asin(theta);
#else
return std::asin(theta);
#endif
}
CUTLASS_HOST_DEVICE
float fast_sqrt(float theta) {
#if defined(__CUDA_ARCH__)
return ::sqrtf(theta);
#else
return std::sqrt(theta);
#endif
}
CUTLASS_HOST_DEVICE
double fast_sqrt(double theta) {
#if defined(__CUDA_ARCH__)
return ::sqrt(theta);
#else
return std::sqrt(theta);
#endif
}
CUTLASS_HOST_DEVICE
float fast_exp(float x) {
#if defined(__CUDA_ARCH__)
return ::expf(x);
#else
return std::exp(x);
#endif
}
CUTLASS_HOST_DEVICE
double fast_exp(double x) {
#if defined(__CUDA_ARCH__)
return ::exp(x);
#else
return std::exp(x);
#endif
}
CUTLASS_HOST_DEVICE
half_t fast_exp(half_t x) {
#if defined(__CUDA_ARCH__) && (__CUDACC_VER_MAJOR__ >= 10) && (__CUDA_ARCH__ >= 750)
return (half_t)(::hexp(x.to_half()));
#else
return (half_t)(fast_exp(float(x)));
#endif
}
CUTLASS_HOST_DEVICE
float fast_log(float x) {
#if defined(__CUDA_ARCH__)
return ::logf(x);
#else
return std::log(x);
#endif
}
CUTLASS_HOST_DEVICE
double fast_log(double x) {
#if defined(__CUDA_ARCH__)
return ::log(x);
#else
return std::log(x);
#endif
}
CUTLASS_HOST_DEVICE
float fast_tanh(float x) {
#if defined(__CUDA_ARCH__)
#if (__CUDACC_VER_MAJOR__ >= 11) && (__CUDA_ARCH__ >= 750)
float y;
asm volatile ( "tanh.approx.f32 %0, %1; " : "=f"(y) : "f"(x));
return y;
#else
return ::tanhf(x);
#endif
#else
return std::tanh(x);
#endif
}
CUTLASS_HOST_DEVICE
double fast_tanh(double x) {
#if defined(__CUDA_ARCH__)
return ::tanh(x);
#else
return std::tanh(x);
#endif
}
CUTLASS_HOST_DEVICE
half_t fast_tanh(half_t x) {
#if defined(__CUDA_ARCH__) && (__CUDACC_VER_MAJOR__ >= 11) && (__CUDA_ARCH__ >= 750)
asm volatile ( "tanh.approx.f16 %0, %1;" : "=h"(x.raw()) : "h"(x.raw()));
return x;
#else
return half_t(fast_tanh(float(x)));
#endif
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
struct fast_exp_op {
CUTLASS_HOST_DEVICE
T operator()(T const &rhs) const {
return fast_exp(rhs);
}
};
#if defined(__CUDA_ARCH__) && (__CUDACC_VER_MAJOR__ >= 10) && (__CUDA_ARCH__ >= 750)
template <int N>
struct fast_exp_op<Array<half_t, N>> {
CUTLASS_DEVICE
Array<half_t, N> operator()(Array<half_t, N> const &rhs) const {
Array<half_t, N> result;
// use x2 specialization
__half2 const *in = reinterpret_cast<__half2 const *>(&rhs);
__half2 *out = reinterpret_cast<__half2 *>(&result);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
out[i] = ::h2exp(in[i]);
}
// residual
if (N % 2) {
half_t last = rhs[N - 1];
result[N - 1] = half_t(::hexp(last.to_half()));
}
return result;
}
};
#endif // #if defined(__CUDA_ARCH__)
template <typename T, int N>
struct fast_exp_op<Array<T, N>> {
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &rhs) const {
fast_exp_op<T> fast_op;
Array<T, N> y;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
y[i] = fast_op(rhs[i]);
}
return y;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
struct fast_tanh_op {
CUTLASS_HOST_DEVICE
T operator()(T const &rhs) const {
return fast_tanh(rhs);
}
};
#if defined(__CUDA_ARCH__) && (__CUDACC_VER_MAJOR__ >= 11) && (__CUDA_ARCH__ >= 750)
template <int N>
struct fast_tanh_op<Array<half_t, N>> {
CUTLASS_DEVICE
Array<half_t, N> operator()(Array<half_t, N> const &rhs) const {
Array<half_t, N> result;
// use x2 specialization
uint32_t const *in = reinterpret_cast<uint32_t const *>(&rhs);
uint32_t *out = reinterpret_cast<uint32_t *>(&result);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
asm volatile ("tanh.approx.f16x2 %0, %1;" : "=r"(out[i]) : "r"(in[i]));
}
// residual
if (N % 2) {
uint16_t const *in = reinterpret_cast<uint16_t const *>(&rhs);
uint16_t *out = reinterpret_cast<uint16_t *>(&result);
asm volatile ("tanh.approx.f16 %0, %1;" : "=h"(out[N - 1]) : "h"(in[N - 1]));
}
return result;
}
};
#endif // #if defined(__CUDA_ARCH__)
template <typename T, int N>
struct fast_tanh_op<Array<T, N>> {
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &rhs) const {
fast_tanh_op<T> fast_op;
Array<T, N> y;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
y[i] = fast_op(rhs[i]);
}
return y;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Absolute value function
template <typename T>
CUTLASS_HOST_DEVICE
T absolute_value(T x) {
if (x < T()) {
return -x;
}
return x;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/fast_math.h/0 | {
"file_path": "include/cutlass/fast_math.h",
"repo_id": "include",
"token_count": 11015
} | 28 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/dispatch_policy.hpp"
#include "cutlass/numeric_types.h"
#include "cutlass/pipeline/pipeline.hpp"
#include "cutlass/trace.h"
#include "cute/arch/cluster_sm90.hpp"
#include "cute/arch/copy_sm90.hpp"
#include "cute/algorithm/functional.hpp"
#include "cute/atom/mma_atom.hpp"
#include "cute/algorithm/gemm.hpp"
#include "cute/tensor_predicate.hpp"
#include "cute/numeric/arithmetic_tuple.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass::gemm::collective {
using namespace cute;
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
int Stages,
class ClusterShape,
int PipelineAsyncMmaStages,
class TileShape_,
class ElementA_,
class StrideA_,
class ElementB_,
class StrideB_,
class TiledMma_,
class GmemTiledCopyA_,
class SmemLayoutAtomA_,
class SmemCopyAtomA_,
class TransformA_,
class GmemTiledCopyB_,
class SmemLayoutAtomB_,
class SmemCopyAtomB_,
class TransformB_>
struct CollectiveMma<
MainloopSm90TmaGmma<Stages, ClusterShape, PipelineAsyncMmaStages>,
TileShape_,
ElementA_,
StrideA_,
ElementB_,
StrideB_,
TiledMma_,
GmemTiledCopyA_,
SmemLayoutAtomA_,
SmemCopyAtomA_,
TransformA_,
GmemTiledCopyB_,
SmemLayoutAtomB_,
SmemCopyAtomB_,
TransformB_>
{
//
// Type Aliases
//
using DispatchPolicy = MainloopSm90TmaGmma<Stages, ClusterShape, PipelineAsyncMmaStages>;
using TileShape = TileShape_;
using ElementA = ElementA_;
using StrideA = StrideA_;
using ElementB = ElementB_;
using StrideB = StrideB_;
using TiledMma = TiledMma_;
using ElementAccumulator = typename TiledMma::ValTypeC;
using GmemTiledCopyA = GmemTiledCopyA_;
using GmemTiledCopyB = GmemTiledCopyB_;
using SmemLayoutAtomA = SmemLayoutAtomA_;
using SmemLayoutAtomB = SmemLayoutAtomB_;
using SmemCopyAtomA = SmemCopyAtomA_;
using SmemCopyAtomB = SmemCopyAtomB_;
using TransformA = TransformA_;
using TransformB = TransformB_;
using ArchTag = typename DispatchPolicy::ArchTag;
using CtaShape_MNK = decltype(shape_div(TileShape{}, ClusterShape{}));
using MainloopPipeline = cutlass::PipelineTmaAsync<DispatchPolicy::Stages>;
using PipelineParams = typename MainloopPipeline::Params;
using PipelineState = typename cutlass::PipelineState<DispatchPolicy::Stages>;
static constexpr int ThreadCount = CUTE_STATIC_V(size(TiledMma{}));
static_assert(cute::rank(SmemLayoutAtomA{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)");
static_assert((size<0>(TileShape{}) % size<0>(SmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert((size<2>(TileShape{}) % size<1>(SmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert(cute::rank(SmemLayoutAtomB{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)");
static_assert((size<1>(TileShape{}) % size<0>(SmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert((size<2>(TileShape{}) % size<1>(SmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
// Tile along modes in a way that maximizes the TMA box size.
using SmemLayoutA = decltype(tile_to_shape(
SmemLayoutAtomA{},
make_shape(shape<0>(TileShape{}), shape<2>(TileShape{}), Int<DispatchPolicy::Stages>{}),
cute::conditional_t< ::cutlass::gemm::detail::is_major<0,StrideA>(), Step<_2,_1,_3>, Step<_1,_2,_3>>{}));
using SmemLayoutB = decltype(tile_to_shape(
SmemLayoutAtomB{},
make_shape(shape<1>(TileShape{}), shape<2>(TileShape{}), Int<DispatchPolicy::Stages>{}),
cute::conditional_t< ::cutlass::gemm::detail::is_major<0,StrideB>(), Step<_2,_1,_3>, Step<_1,_2,_3>>{}));
static_assert(DispatchPolicy::Stages >= 2, "Specialization requires Stages set to value 1 or more.");
static_assert(cute::is_base_of<cute::GMMA::DescriptorIterator, typename TiledMma::FrgTypeA>::value &&
cute::is_base_of<cute::GMMA::DescriptorIterator, typename TiledMma::FrgTypeB>::value,
"MMA atom must source both A and B operand from smem_desc for this mainloop.");
static_assert(cute::is_same_v<GmemTiledCopyA, SM90_TMA_LOAD> || cute::is_same_v<GmemTiledCopyA, SM90_TMA_LOAD_MULTICAST>,
"GmemTiledCopy - invalid SM90 TMA copy atom specified.");
static_assert(cute::is_same_v<GmemTiledCopyB, SM90_TMA_LOAD> || cute::is_same_v<GmemTiledCopyB, SM90_TMA_LOAD_MULTICAST>,
"GmemTiledCopy - invalid SM90 TMA copy atom specified.");
// TMA converts f32 input to tf32 when copying from GMEM to SMEM
// For all other types, cast to size equivalent uint type to avoid any rounding by TMA.
static constexpr bool ConvertF32toTF32A = cute::is_same_v<float, ElementA>;
static constexpr bool ConvertF32toTF32B = cute::is_same_v<float, ElementB>;
using InternalElementA = cute::conditional_t<ConvertF32toTF32A, tfloat32_t, uint_bit_t<sizeof_bits_v<ElementA>>>;
using InternalElementB = cute::conditional_t<ConvertF32toTF32B, tfloat32_t, uint_bit_t<sizeof_bits_v<ElementB>>>;
struct SharedStorage {
cute::array_aligned<typename TiledMma::ValTypeA, cute::cosize_v<SmemLayoutA>> smem_A;
cute::array_aligned<typename TiledMma::ValTypeB, cute::cosize_v<SmemLayoutB>> smem_B;
using PipelineStorage = typename MainloopPipeline::SharedStorage;
alignas(16) PipelineStorage pipeline_storage;
};
// Host side kernel arguments
struct Arguments {
ElementA const* ptr_A;
StrideA dA;
ElementB const* ptr_B;
StrideB dB;
uint32_t mma_promotion_interval = 4;
};
// Device side kernel params
struct Params {
// Assumption: StrideA is congruent with Problem_MK
using TMA_A = decltype(make_tma_copy(
GmemTiledCopyA{},
make_tensor(static_cast<InternalElementA const*>(nullptr), repeat_like(StrideA{}, int32_t(0)), StrideA{}),
SmemLayoutA{}(_,_,0),
make_shape(shape<0>(TileShape{}), shape<2>(TileShape{})),
size<1>(ClusterShape{}))); // mcast along N mode for this M load, if any
// Assumption: StrideB is congruent with Problem_NK
using TMA_B = decltype(make_tma_copy(
GmemTiledCopyB{},
make_tensor(static_cast<InternalElementB const*>(nullptr), repeat_like(StrideB{}, int32_t(0)), StrideB{}),
SmemLayoutB{}(_,_,0),
make_shape(shape<1>(TileShape{}), shape<2>(TileShape{})),
size<0>(ClusterShape{}))); // mcast along M mode for this N load, if any
TMA_A tma_load_a;
TMA_B tma_load_b;
};
//
// Methods
//
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(ProblemShape const& problem_shape, Arguments const& args, void* workspace) {
(void) workspace;
// Optionally append 1s until problem shape is rank-4 (MNKL), in case it is only rank-3 (MNK)
auto problem_shape_MNKL = append<4>(problem_shape, 1);
auto [M,N,K,L] = problem_shape_MNKL;
auto ptr_A = reinterpret_cast<InternalElementA const*>(args.ptr_A);
auto ptr_B = reinterpret_cast<InternalElementB const*>(args.ptr_B);
Tensor tensor_a = make_tensor(ptr_A, make_layout(make_shape(M,K,L), args.dA));
Tensor tensor_b = make_tensor(ptr_B, make_layout(make_shape(N,K,L), args.dB));
typename Params::TMA_A tma_load_a = make_tma_copy(
GmemTiledCopyA{},
tensor_a,
SmemLayoutA{}(_,_,cute::Int<0>{}),
make_shape(shape<0>(TileShape{}), shape<2>(TileShape{})),
size<1>(ClusterShape{})); // mcast along N mode for this M load, if any
typename Params::TMA_B tma_load_b = make_tma_copy(
GmemTiledCopyB{},
tensor_b,
SmemLayoutB{}(_,_,cute::Int<0>{}),
make_shape(shape<1>(TileShape{}), shape<2>(TileShape{})),
size<0>(ClusterShape{})); // mcast along M mode for this N load, if any
return {
tma_load_a,
tma_load_b
};
}
template<class ProblemShape>
CUTLASS_HOST_DEVICE static bool
can_implement(
ProblemShape const& problem_shape,
[[maybe_unused]] Arguments const& args) {
constexpr int tma_alignment_bits = 128;
auto problem_shape_MNKL = append<4>(problem_shape, 1);
auto [M,N,K,L] = problem_shape_MNKL;
bool implementable = true;
constexpr int min_tma_aligned_elements_A = tma_alignment_bits / cutlass::sizeof_bits<ElementA>::value;
implementable = implementable && cutlass::detail::check_alignment<min_tma_aligned_elements_A>(cute::make_shape(M,K,L), StrideA{});
constexpr int min_tma_aligned_elements_B = tma_alignment_bits / cutlass::sizeof_bits<ElementB>::value;
implementable = implementable && cutlass::detail::check_alignment<min_tma_aligned_elements_B>(cute::make_shape(N,K,L), StrideB{});
if (!implementable) {
CUTLASS_TRACE_HOST(" CAN IMPLEMENT: Problem Size doesn't meet the minimum alignment requirements for TMA.\n");
}
return implementable;
}
/// Issue Tma Descriptor Prefetch -- ideally from a single thread for best performance
CUTLASS_DEVICE
static void prefetch_tma_descriptors(Params const& mainloop_params) {
cute::prefetch_tma_descriptor(mainloop_params.tma_load_a.get_tma_descriptor());
cute::prefetch_tma_descriptor(mainloop_params.tma_load_b.get_tma_descriptor());
}
/// Perform a collective-scoped matrix multiply-accumulate
/// Producer Perspective
template <
class TensorA, class TMA_LOAD_A,
class TensorB, class TMA_LOAD_B,
class FrgTensorC,
class KTileIterator
>
CUTLASS_DEVICE void
operator() (
TensorA const& gA, TMA_LOAD_A& tma_load_a,
TensorB const& gB, TMA_LOAD_B& tma_load_b,
FrgTensorC& accum,
KTileIterator k_tile_iter, int k_tile_count,
int thread_idx,
uint32_t block_rank_in_cluster,
char* shared_memory,
Params const& mainloop_params)
{
using namespace cute;
static_assert(is_rmem<FrgTensorC>::value, "C tensor must be rmem resident.");
static_assert(cute::rank(SmemLayoutAtomA{}) == 2, "SmemLayoutAtom must be rank 2.");
static_assert(cute::rank(SmemLayoutAtomB{}) == 2, "SmemLayoutAtom must be rank 2.");
static_assert(cute::rank(SmemLayoutA{}) == 3, "Smem layout must be rank 3.");
static_assert(cute::rank(SmemLayoutB{}) == 3, "Smem layout must be rank 3.");
static_assert(cute::is_void_v<SmemCopyAtomA>,
"SM90 GMMA mainloops cannot have a non-void copy atom for smem sourced instructions.");
static_assert(cute::is_void_v<SmemCopyAtomB>,
"SM90 GMMA mainloops cannot have a non-void copy atom for smem sourced instructions.");
SharedStorage& storage = *reinterpret_cast<SharedStorage*>(shared_memory);
Tensor sA = make_tensor(make_smem_ptr(storage.smem_A.data()), SmemLayoutA{}); // (BLK_M,BLK_K,PIPE)
Tensor sB = make_tensor(make_smem_ptr(storage.smem_B.data()), SmemLayoutB{}); // (BLK_N,BLK_K,PIPE)
//
// Prepare the TMA loads for A and B
//
constexpr uint32_t cluster_shape_x = get<0>(ClusterShape());
uint2 cluster_local_block_id = {block_rank_in_cluster % cluster_shape_x, block_rank_in_cluster / cluster_shape_x};
auto block_tma_a = tma_load_a.get_slice(cluster_local_block_id.y);
auto block_tma_b = tma_load_b.get_slice(cluster_local_block_id.x);
// Applies the mapping from block_tma_a
Tensor tAgA = block_tma_a.partition_S(gA); // (TMA,TMA_M,TMA_K,k)
Tensor tAsA = block_tma_a.partition_D(sA); // (TMA,TMA_M,TMA_K,PIPE)
Tensor tBgB = block_tma_b.partition_S(gB); // (TMA,TMA_N,TMA_K,k)
Tensor tBsB = block_tma_b.partition_D(sB); // (TMA,TMA_N,TMA_K,PIPE)
//
// Prepare TMA membars and PREFETCH
//
// Number of pipelined k-tiles in smem
constexpr int K_PIPE_MAX = DispatchPolicy::Stages;
// NOTE: Another parameter: Partition the pipeline between active MMAs and active TMAs
// Tunable via the dispatch policy to tollerate latencies evenly across the math and compute stages
// K_PIPE_MMAS: The max number of active MMA pipes at beginning of every loop
// K_PIPE_TMAS: The max number of active TMA pipes at beginning of every loop (geq 1)
constexpr int K_PIPE_MMAS = DispatchPolicy::PipelineAsyncMmaStages;
constexpr int K_PIPE_TMAS = K_PIPE_MAX - K_PIPE_MMAS;
static_assert(0 <= K_PIPE_MMAS && K_PIPE_MMAS < K_PIPE_MAX);
static_assert(0 < K_PIPE_TMAS && K_PIPE_TMAS <= K_PIPE_MAX);
static_assert(K_PIPE_MMAS < K_PIPE_MAX - 1);
// Set the bytes transferred in this TMA transaction (may involve multiple issues)
constexpr uint32_t TmaTransactionBytes = static_cast<uint32_t>(
cutlass::bits_to_bytes(size<0>(sA) * size<1>(sA) * sizeof_bits<InternalElementA>::value) +
cutlass::bits_to_bytes(size<0>(sB) * size<1>(sB) * sizeof_bits<InternalElementB>::value));
// Obtain warp index
int warp_idx = canonical_warp_idx_sync();
int warp_group_thread_idx = thread_idx % NumThreadsPerWarpGroup;
PipelineParams params;
params.transaction_bytes = TmaTransactionBytes;
params.role = MainloopPipeline::ThreadCategory::ProducerConsumer;
params.is_leader = warp_group_thread_idx == 0;
params.num_consumers = NumThreadsPerWarpGroup;
MainloopPipeline pipeline(storage.pipeline_storage, params, ClusterShape{});
// State variables used for iterating the circular buffer
// smem_pipe_read / release is used by the consumer of SMEM data - i.e MMA
// smem_pipe_write is used by the producer of SMEM data - i.e TMA
PipelineState smem_pipe_read;
PipelineState smem_pipe_release;
PipelineState smem_pipe_write = cutlass::make_producer_start_state<MainloopPipeline>();
// We need this to guarantee that the Pipeline init is visible
// To all producers and consumer blocks in the Cluster
if constexpr (size(ClusterShape{}) > 1) {
cute::cluster_arrive_relaxed();
cute::cluster_wait();
}
else {
__syncthreads();
}
// Set predicate for the lowest lane_id in the warp
int lane_predicate = cute::elect_one_sync();
uint16_t mcast_mask_a = 0;
uint16_t mcast_mask_b = 0;
// Keep a copy to know when to stop issuing loads
int k_tile_count_tma = k_tile_count;
// Issue TmaLoads (Prologue fetches)
if (warp_idx == 0 && lane_predicate == 1) {
// Maps the tile -> block, value
if constexpr (cute::is_same_v<GmemTiledCopyA, SM90_TMA_LOAD_MULTICAST>) {
auto block_layout = Layout<typename DispatchPolicy::ClusterShape>{}; // (m,n) -> block_id
for (int n = 0; n < size<1>(block_layout); ++n) {
mcast_mask_a |= (uint16_t(1) << block_layout(cluster_local_block_id.x,n,Int<0>{}));
}
}
if constexpr (cute::is_same_v<GmemTiledCopyB, SM90_TMA_LOAD_MULTICAST>) {
auto block_layout = Layout<typename DispatchPolicy::ClusterShape>{}; // (m,n) -> block_id
for (int m = 0; m < size<0>(block_layout); ++m) {
mcast_mask_b |= (uint16_t(1) << block_layout(m,cluster_local_block_id.y,Int<0>{}));
}
}
// Issue the prologue loads
int prologue_tma_count = min(K_PIPE_MAX, k_tile_count);
CUTLASS_PRAGMA_UNROLL
for (int stage = 0; stage < prologue_tma_count; ++stage) {
pipeline.producer_acquire(smem_pipe_write);
using BarrierType = typename MainloopPipeline::ProducerBarrierType;
BarrierType* tma_barrier = pipeline.producer_get_barrier(smem_pipe_write);
copy(tma_load_a.with(*tma_barrier, mcast_mask_a), tAgA(_,_,_,*k_tile_iter), tAsA(_,_,_,stage));
copy(tma_load_b.with(*tma_barrier, mcast_mask_b), tBgB(_,_,_,*k_tile_iter), tBsB(_,_,_,stage));
++k_tile_iter;
++smem_pipe_write;
}
k_tile_count_tma -= prologue_tma_count;
}
//
// Define C accumulators and A/B partitioning
//
TiledMma tiled_mma;
auto thread_mma = tiled_mma.get_thread_slice(thread_idx);
Tensor tCsA = thread_mma.partition_A(sA); // (MMA,MMA_M,MMA_K,PIPE)
Tensor tCsB = thread_mma.partition_B(sB); // (MMA,MMA_N,MMA_K,PIPE)
// Allocate "fragments/descriptors"
Tensor tCrA = thread_mma.make_fragment_A(tCsA); // (MMA,MMA_M,MMA_K,PIPE)
Tensor tCrB = thread_mma.make_fragment_B(tCsB); // (MMA,MMA_N,MMA_K,PIPE)
CUTE_STATIC_ASSERT_V(size<1>(tCsA) == size<1>(accum)); // M
CUTE_STATIC_ASSERT_V(size<1>(tCsB) == size<2>(accum)); // N
CUTE_STATIC_ASSERT_V(size<2>(tCsA) == size<2>(tCsB)); // K
CUTE_STATIC_ASSERT_V(size<3>(tCsA) == size<3>(tCsB)); // PIPE
CUTE_STATIC_ASSERT_V(size<3>(tCsA) == size<3>(tAsA)); // PIPE
CUTE_STATIC_ASSERT_V(size<3>(tCsB) == size<3>(tBsB)); // PIPE
CUTE_STATIC_ASSERT_V(Int<DispatchPolicy::Stages>{} == size<2>(sA)); // PIPE
CUTE_STATIC_ASSERT_V(Int<DispatchPolicy::Stages>{} == size<2>(sB)); // PIPE
__syncthreads();
tiled_mma.accumulate_ = GMMA::ScaleOut::Zero;
warpgroup_fence_operand(accum);
// Prologue MMAs
CUTLASS_PRAGMA_UNROLL
for (int prologue_mma_count = min(K_PIPE_MMAS, k_tile_count);
prologue_mma_count > 0; --prologue_mma_count)
{
// WAIT on smem_pipe_read until it's data is available
pipeline.consumer_wait(smem_pipe_read);
warpgroup_arrive();
// Unroll the K mode manually to set scale D to 1
CUTLASS_PRAGMA_UNROLL
for (int k_block = 0; k_block < size<2>(tCrA); ++k_block) {
// (V,M,K) x (V,N,K) => (V,M,N)
cute::gemm(tiled_mma, tCrA(_,_,k_block,smem_pipe_read.index()), tCrB(_,_,k_block,smem_pipe_read.index()), accum);
tiled_mma.accumulate_ = GMMA::ScaleOut::One;
}
warpgroup_commit_batch();
++smem_pipe_read;
--k_tile_count;
}
warpgroup_fence_operand(accum);
//
// PIPELINED MAIN LOOP
//
CUTLASS_PRAGMA_NO_UNROLL
for ( ; k_tile_count > 0; --k_tile_count)
{
// WAIT on smem_pipe_read until data is available
pipeline.consumer_wait(smem_pipe_read);
//
// Compute on k_tile
//
warpgroup_fence_operand(accum);
warpgroup_arrive();
// Unroll the K mode manually to set scale D to 1
CUTLASS_PRAGMA_UNROLL
for (int k_block = 0; k_block < size<2>(tCrA); ++k_block) {
// (V,M,K) x (V,N,K) => (V,M,N)
cute::gemm(tiled_mma, tCrA(_,_,k_block,smem_pipe_read.index()), tCrB(_,_,k_block,smem_pipe_read.index()), accum);
tiled_mma.accumulate_ = GMMA::ScaleOut::One;
}
warpgroup_commit_batch();
/// Wait on the GMMA barrier for K_PIPE_MMAS (or fewer) outstanding to ensure smem_pipe_write is consumed
warpgroup_wait<K_PIPE_MMAS>();
warpgroup_fence_operand(accum);
pipeline.consumer_release(smem_pipe_release); // UNLOCK wr stage, done _computing_ on it
//
// Copy gmem to smem for *k_tile_iter
//
// Do Acquire & Load only if needed - helps with both performance and also corner case illegal barrier-ops
if (warp_idx == 0 && lane_predicate == 1 && (k_tile_count_tma > 0) ) {
pipeline.producer_acquire(smem_pipe_write); // LOCK wr stage, for _writing_
using BarrierType = typename MainloopPipeline::ProducerBarrierType;
BarrierType* tma_barrier = pipeline.producer_get_barrier(smem_pipe_write);
copy(tma_load_a.with(*tma_barrier, mcast_mask_a), tAgA(_,_,_,*k_tile_iter), tAsA(_,_,_,smem_pipe_write.index()));
copy(tma_load_b.with(*tma_barrier, mcast_mask_b), tBgB(_,_,_,*k_tile_iter), tBsB(_,_,_,smem_pipe_write.index()));
++smem_pipe_write;
++k_tile_iter;
--k_tile_count_tma;
}
// Advance consumer pipeline
++smem_pipe_read;
++smem_pipe_release;
}
// Wait on all GMMAs
warpgroup_wait<0>();
warpgroup_fence_operand(accum);
// Workaround for ensuring Smem destruction doesn't happen accidentally
if constexpr (size(typename DispatchPolicy::ClusterShape{}) > 1) {
cute::cluster_arrive();
cute::cluster_wait();
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::gemm::collective
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/collective/sm90_mma_tma_gmma_ss.hpp/0 | {
"file_path": "include/cutlass/gemm/collective/sm90_mma_tma_gmma_ss.hpp",
"repo_id": "include",
"token_count": 9432
} | 29 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/arch/mma.h"
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/arch.h"
#include "cutlass/device_kernel.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/gemm/kernel/gemm_universal.h"
#include "cutlass/gemm/kernel/default_gemm_universal.h"
#include "cutlass/gemm/device/default_gemm_configuration.h"
#include "cutlass/gemm/device/gemm_universal_base.h"
#include "cutlass/layout/permute.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
/*!
GemmUniversal is a stateful, reusable GEMM handle. Once initialized for a given GEMM computation
(problem geometry and data references), it can be reused across different GEMM problems having the
geometry. (Once initialized, details regarding problem geometry and references to workspace memory
cannot be updated.)
The universal GEMM accommodates serial reductions, parallel reductions, batched strided, and
batched array variants.
*/
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator_ = ElementC_,
/// Operator class tag
typename OperatorClass_ = arch::OpClassSimt,
/// Tag indicating architecture to tune for. This is the minimum SM that
/// supports the intended feature. The device kernel can be built
/// targeting any SM larger than this number.
typename ArchTag_ = arch::Sm70,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle_ = threadblock::GemmIdentityThreadblockSwizzle<>,
/// Number of stages used in the pipelined mainloop
int Stages =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kStages,
/// Access granularity of A matrix in units of elements
int AlignmentA =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kAlignmentA,
/// Access granularity of B matrix in units of elements
int AlignmentB =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kAlignmentB,
/// Operation performed by GEMM
typename Operator_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::Operator,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA = ComplexTransform::kNone,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB = ComplexTransform::kNone,
/// Gather operand A by using an index array
bool GatherA = false,
/// Gather operand B by using an index array
bool GatherB = false,
/// Scatter result D by using an index array
bool ScatterD = false,
/// Permute result D
typename PermuteDLayout_ = layout::NoPermute,
/// Permute operand A
typename PermuteALayout_ = layout::NoPermute,
/// Permute operand B
typename PermuteBLayout_ = layout::NoPermute
>
class GemmUniversal :
public GemmUniversalBase<
typename kernel::DefaultGemmUniversal<
ElementA_,
LayoutA_,
TransformA,
AlignmentA,
ElementB_,
LayoutB_,
TransformB,
AlignmentB,
ElementC_,
LayoutC_,
ElementAccumulator_,
OperatorClass_,
ArchTag_,
ThreadblockShape_,
WarpShape_,
InstructionShape_,
EpilogueOutputOp_,
ThreadblockSwizzle_,
Stages,
Operator_,
SharedMemoryClearOption::kNone,
GatherA,
GatherB,
ScatterD,
PermuteDLayout_,
PermuteALayout_,
PermuteBLayout_
>::GemmKernel
> {
public:
using ElementAccumulator = ElementAccumulator_;
using OperatorClass = OperatorClass_;
using ArchTag = ArchTag_;
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using EpilogueOutputOp = EpilogueOutputOp_;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using Operator = Operator_;
using PermuteDLayout = PermuteDLayout_;
using PermuteALayout = PermuteALayout_;
using PermuteBLayout = PermuteBLayout_;
static int const kStages = Stages;
static int const kAlignmentA = AlignmentA;
static int const kAlignmentB = AlignmentB;
static int const kAlignmentC = EpilogueOutputOp::kCount;
static ComplexTransform const kTransformA = TransformA;
static ComplexTransform const kTransformB = TransformB;
using Base = GemmUniversalBase<
typename kernel::DefaultGemmUniversal<
ElementA_,
LayoutA_,
TransformA,
AlignmentA,
ElementB_,
LayoutB_,
TransformB,
AlignmentB,
ElementC_,
LayoutC_,
ElementAccumulator_,
OperatorClass_,
ArchTag_,
ThreadblockShape_,
WarpShape_,
InstructionShape_,
EpilogueOutputOp_,
ThreadblockSwizzle_,
Stages,
Operator_,
SharedMemoryClearOption::kNone,
GatherA,
GatherB,
ScatterD,
PermuteDLayout_,
PermuteALayout_,
PermuteBLayout_
>::GemmKernel
>;
using Arguments = typename Base::Arguments;
using GemmKernel = typename Base::GemmKernel;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for column-major output exchanges problem size and operand.
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Element type for internal accumulation
typename ElementAccumulator_,
/// Operator class tag
typename OperatorClass_,
/// Tag indicating architecture to tune for. This is the minimum SM that
/// supports the intended feature. The device kernel can be built
/// targeting any SM larger than this number.
typename ArchTag_,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape_,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape_,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape_,
/// Epilogue output operator
typename EpilogueOutputOp_,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle_,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Access granularity of A matrix in units of elements
int AlignmentA,
/// Access granularity of B matrix in units of elements
int AlignmentB,
/// Operation performed by GEMM
typename Operator_,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Gather operand A by using an index array
bool GatherA,
/// Gather operand B by using an index array
bool GatherB,
/// Scatter result D by using an index array
bool ScatterD,
/// Permute result D
typename PermuteDLayout_,
/// Permute operand A
typename PermuteALayout_,
/// Permute operand B
typename PermuteBLayout_
>
class GemmUniversal<ElementA_, LayoutA_, ElementB_, LayoutB_, ElementC_,
layout::ColumnMajor, // partially specialized on LayoutC
ElementAccumulator_, OperatorClass_, ArchTag_, ThreadblockShape_,
WarpShape_, InstructionShape_, EpilogueOutputOp_,
ThreadblockSwizzle_, Stages, AlignmentA, AlignmentB,
Operator_, TransformA, TransformB, GatherA, GatherB, ScatterD,
PermuteDLayout_, PermuteALayout_, PermuteBLayout_> {
public:
using ElementA = ElementA_;
using LayoutA = LayoutA_;
using TensorRefA = TensorRef<ElementA const, LayoutA>;
using ElementB = ElementB_;
using LayoutB = LayoutB_;
using TensorRefB = TensorRef<ElementB const, LayoutB>;
using ElementC = ElementC_;
using LayoutC = layout::ColumnMajor;
using TensorRefC = TensorRef<ElementC const, LayoutC>;
using TensorRefD = TensorRef<ElementC, LayoutC>;
using ElementAccumulator = ElementAccumulator_;
using OperatorClass = OperatorClass_;
using ArchTag = ArchTag_;
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using EpilogueOutputOp = EpilogueOutputOp_;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using Operator = Operator_;
using PermuteDLayout = PermuteDLayout_;
using PermuteALayout = PermuteALayout_;
using PermuteBLayout = PermuteBLayout_;
static int const kStages = Stages;
static int const kAlignmentA = AlignmentA;
static int const kAlignmentB = AlignmentB;
static ComplexTransform const kTransformA = TransformA;
static ComplexTransform const kTransformB = TransformB;
using UnderlyingOperator = typename GemmUniversal<
ElementB,
typename layout::LayoutTranspose<LayoutB>::type,
ElementA,
typename layout::LayoutTranspose<LayoutA>::type,
ElementC,
layout::RowMajor,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
kAlignmentB,
kAlignmentA,
Operator,
kTransformB,
kTransformA,
GatherB,
GatherA,
ScatterD,
PermuteDLayout,
PermuteBLayout,
PermuteALayout
>::Base;
using GemmKernel = typename UnderlyingOperator::GemmKernel;
static int const kAlignmentC = EpilogueOutputOp::kCount;
/// Argument structure
using Arguments = typename UnderlyingOperator::Arguments;
private:
UnderlyingOperator underlying_operator_;
public:
/// Constructs the GEMM.
GemmUniversal() { }
/// Helper to construct a transposed equivalent for the underying GEMM operator
static Arguments to_underlying_arguments(Arguments const &args) {
return args.transposed_problem();
}
/// Determines whether the GEMM can execute the given problem.
static Status can_implement(Arguments const &args) {
return UnderlyingOperator::can_implement(to_underlying_arguments(args));
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
return UnderlyingOperator::get_workspace_size(to_underlying_arguments(args));
}
/// Computes the grid shape
static dim3 get_grid_shape(Arguments const &args) {
return UnderlyingOperator::get_grid_shape(to_underlying_arguments(args));
}
/// Computes the maximum number of active blocks per multiprocessor
static int maximum_active_blocks(int smem_capacity = -1) {
return UnderlyingOperator::maximum_active_blocks(smem_capacity);
}
/// Initializes GEMM state from arguments.
Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) {
return underlying_operator_.initialize(to_underlying_arguments(args), workspace, stream);
}
/// Lightweight update given a subset of arguments
Status update(Arguments const &args, void *workspace = nullptr) {
return underlying_operator_.update(to_underlying_arguments(args), workspace);
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
return underlying_operator_.run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace, stream);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/device/gemm_universal.h/0 | {
"file_path": "include/cutlass/gemm/device/gemm_universal.h",
"repo_id": "include",
"token_count": 5204
} | 30 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief This file contains definitions and utility functions for describing problem shapes
for 3.x Ptr-Array GEMMs and Grouped GEMMs.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/tensor_coord.h"
#include "cute/container/array.hpp"
#if ! defined(__CUDACC_RTC__)
#include <initializer_list>
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass::gemm {
////////////////////////////////////////////////////////////////////////////////////////////////////
template <class ProblemShape_>
struct GroupProblemShape {
using UnderlyingProblemShape = ProblemShape_;
int32_t num_groups = 1;
UnderlyingProblemShape* problem_shapes = nullptr;
UnderlyingProblemShape const* host_problem_shapes = nullptr;
CUTLASS_HOST_DEVICE
int32_t groups() const { return num_groups; }
CUTLASS_HOST_DEVICE
UnderlyingProblemShape const
get_problem_shape(int32_t group_idx) const {
return problem_shapes[group_idx];
}
CUTLASS_HOST_DEVICE
UnderlyingProblemShape const
get_host_problem_shape(int32_t group_idx) const {
return host_problem_shapes[group_idx];
}
CUTLASS_HOST_DEVICE
bool
is_host_problem_shape_available() {
return host_problem_shapes != nullptr;
}
};
template <class ProblemShape_>
class ArrayProblemShape {
public:
using UnderlyingProblemShape = ProblemShape_;
ArrayProblemShape() = default;
ArrayProblemShape(UnderlyingProblemShape ps) : problem_shape_(ps) {}
// Num of groups for Ptr-Array GEMM always remain one, just the number of batches (l) can vary
// This is just to maintain uniformity with GroupProblemShape
constexpr int32_t groups() const { return 1; }
UnderlyingProblemShape* problem_shapes() const {
return &problem_shape_;
}
UnderlyingProblemShape const* host_problem_shapes() const {
return &problem_shape_;
}
// This is just to maintain uniformity with GroupProblemShape
CUTLASS_HOST_DEVICE
UnderlyingProblemShape const
get_problem_shape(int32_t /* unused */ = 0) const {
return problem_shape_;
}
CUTLASS_HOST_DEVICE
UnderlyingProblemShape const
get_host_problem_shape(int32_t /* unused */ = 0) const {
return problem_shape_;
}
CUTLASS_HOST_DEVICE
bool
is_host_problem_shape_available() {
return true;
}
private:
UnderlyingProblemShape problem_shape_{};
};
} // namespace cutlass::gemm
| include/cutlass/gemm/group_array_problem_shape.hpp/0 | {
"file_path": "include/cutlass/gemm/group_array_problem_shape.hpp",
"repo_id": "include",
"token_count": 1233
} | 31 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level TRMM definitions combine threadblock-scoped matrix multiply-add with
the appropriate threadblock-scoped epilogue.
Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are
accommodated by exchanging A and B operands and assuming transposed layouts.
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/complex.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/kernel/trmm_universal.h"
#include "cutlass/gemm/kernel/default_trmm.h"
#include "cutlass/gemm/kernel/default_trmm_complex.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Side Mode for the kernel
SideMode kSideMode,
/// Fill Mode for the triangular matrix
FillMode kFillMode,
/// Diag Type for the triangular matrix
DiagType kDiagType,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by TRMM
typename Operator,
///
typename Enable = void
>
struct DefaultTrmmUniversal;
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Real-valued TRMM kernels
//
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Side Mode for the kernel
SideMode kSideMode,
/// Fill Mode for the triangular matrix
FillMode kFillMode,
/// Diag Type for the triangular matrix
DiagType kDiagType,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by TRMM
typename Operator>
struct DefaultTrmmUniversal<
ElementA,
LayoutA,
ComplexTransform::kNone, // transform A
kAlignmentA,
ElementB,
LayoutB,
ComplexTransform::kNone, // transform B
kAlignmentB,
kSideMode,
kFillMode,
kDiagType,
ElementC,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
SplitKSerial,
Operator,
typename platform::enable_if< ! cutlass::is_complex<ElementAccumulator>::value>::type
> {
using DefaultTrmmKernel = typename kernel::DefaultTrmm<
ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
kSideMode,
kFillMode,
kDiagType,
ElementC,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
SplitKSerial,
Operator
>::TrmmKernel;
/// Define the kernel in terms of the default kernel
using TrmmKernel = kernel::TrmmUniversal<
typename DefaultTrmmKernel::Mma,
typename DefaultTrmmKernel::Epilogue,
ThreadblockSwizzle,
kSideMode,
kFillMode,
kDiagType
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Complex-valued TRMM kernels
//
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Side Mode for the kernel
SideMode kSideMode,
/// Fill Mode for the triangular matrix
FillMode kFillMode,
/// Diag Type for the triangular matrix
DiagType kDiagType,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by TRMM
typename Operator
>
struct DefaultTrmmUniversal<
ElementA,
LayoutA,
TransformA,
kAlignmentA,
ElementB,
LayoutB,
TransformB,
kAlignmentB,
kSideMode,
kFillMode,
kDiagType,
ElementC,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
SplitKSerial,
Operator,
typename platform::enable_if<cutlass::is_complex<ElementAccumulator>::value>::type
> {
using DefaultTrmmKernel = typename kernel::DefaultTrmmComplex<
ElementA,
LayoutA,
ElementB,
LayoutB,
kSideMode,
kFillMode,
kDiagType,
ElementC,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
TransformA,
TransformB,
Operator,
SplitKSerial
>::TrmmKernel;
/// Define the kernel in terms of the default kernel
using TrmmKernel = kernel::TrmmUniversal<
typename DefaultTrmmKernel::Mma,
typename DefaultTrmmKernel::Epilogue,
ThreadblockSwizzle,
kSideMode,
kFillMode,
kDiagType
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/kernel/default_trmm_universal.h/0 | {
"file_path": "include/cutlass/gemm/kernel/default_trmm_universal.h",
"repo_id": "include",
"token_count": 3372
} | 32 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/arch/arch.h"
#include "cutlass/fast_math.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/semaphore.h"
#include "cutlass/gemm/kernel/gemm_universal.hpp"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/kernel/params_universal_base.h"
#include "cutlass/trace.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_ ///! Threadblock swizzling function
>
class GemmUniversal<
Mma_,
Epilogue_,
ThreadblockSwizzle_,
void,
// 3.x kernels use the first template argument to define the ProblemShape
// We use this invariant to SFINAE dispatch against either the 2.x API or the 3.x API
cute::enable_if_t<not (cute::is_tuple<Mma_>::value || IsCutlass3ArrayKernel<Mma_>::value)>
> {
public:
using Mma = Mma_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using ElementA = typename Mma::IteratorA::Element;
using LayoutA = typename Mma::IteratorA::Layout;
using ElementB = typename Mma::IteratorB::Element;
using LayoutB = typename Mma::IteratorB::Layout;
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename Epilogue::OutputTileIterator::Layout;
static ComplexTransform const kTransformA = Mma::kTransformA;
static ComplexTransform const kTransformB = Mma::kTransformB;
using Operator = typename Mma::Operator;
using OperatorClass = typename Mma::Operator::OperatorClass;
using ThreadblockShape = typename Mma::Shape;
using WarpShape = typename Mma::Operator::Shape;
using InstructionShape = typename Mma::Policy::Operator::InstructionShape;
using ArchTag = typename Mma::ArchTag;
static int const kStages = Mma::kStages;
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
/// Split-K preserves splits that are 128b aligned
static int const kSplitKAlignment = const_max(128 / sizeof_bits<ElementA>::value, 128 / sizeof_bits<ElementB>::value);
//
// Structures
//
/// Argument structure
struct Arguments : UniversalArgumentsBase
{
//
// Data members
//
typename EpilogueOutputOp::Params epilogue;
void const * ptr_A;
void const * ptr_B;
void const * ptr_C;
void * ptr_D;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_C;
typename LayoutA::Stride stride_a;
typename LayoutB::Stride stride_b;
typename LayoutC::Stride stride_c;
typename LayoutC::Stride stride_d;
typename LayoutA::Stride::LongIndex lda;
typename LayoutB::Stride::LongIndex ldb;
typename LayoutC::Stride::LongIndex ldc;
typename LayoutC::Stride::LongIndex ldd;
int const * ptr_gather_A_indices;
int const * ptr_gather_B_indices;
int const * ptr_scatter_D_indices;
//
// Methods
//
Arguments():
ptr_A(nullptr), ptr_B(nullptr), ptr_C(nullptr), ptr_D(nullptr),
ptr_gather_A_indices(nullptr),
ptr_gather_B_indices(nullptr),
ptr_scatter_D_indices(nullptr)
{}
/// constructs an arguments structure
Arguments(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_count,
typename EpilogueOutputOp::Params epilogue,
void const * ptr_A,
void const * ptr_B,
void const * ptr_C,
void * ptr_D,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C,
int64_t batch_stride_D,
typename LayoutA::Stride stride_a,
typename LayoutB::Stride stride_b,
typename LayoutC::Stride stride_c,
typename LayoutC::Stride stride_d,
int const *ptr_gather_A_indices = nullptr,
int const *ptr_gather_B_indices = nullptr,
int const *ptr_scatter_D_indices = nullptr)
:
UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D),
epilogue(epilogue),
ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D),
batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_C(batch_stride_C),
stride_a(stride_a), stride_b(stride_b), stride_c(stride_c), stride_d(stride_d),
ptr_gather_A_indices(ptr_gather_A_indices), ptr_gather_B_indices(ptr_gather_B_indices),
ptr_scatter_D_indices(ptr_scatter_D_indices)
{
lda = 0;
ldb = 0;
ldc = 0;
ldd = 0;
CUTLASS_TRACE_HOST("GemmUniversal::Arguments::Arguments() - problem_size: " << problem_size);
}
/// constructs an arguments structure
Arguments(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_count,
typename EpilogueOutputOp::Params epilogue,
void const * ptr_A,
void const * ptr_B,
void const * ptr_C,
void * ptr_D,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C,
int64_t batch_stride_D,
typename LayoutA::Stride::LongIndex lda,
typename LayoutB::Stride::LongIndex ldb,
typename LayoutC::Stride::LongIndex ldc,
typename LayoutC::Stride::LongIndex ldd,
int const *ptr_gather_A_indices = nullptr,
int const *ptr_gather_B_indices = nullptr,
int const *ptr_scatter_D_indices = nullptr
):
UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D),
epilogue(epilogue),
ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D),
batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_C(batch_stride_C),
lda(lda), ldb(ldb), ldc(ldc), ldd(ldd),
ptr_gather_A_indices(ptr_gather_A_indices), ptr_gather_B_indices(ptr_gather_B_indices),
ptr_scatter_D_indices(ptr_scatter_D_indices)
{
stride_a = make_Coord(lda);
stride_b = make_Coord(ldb);
stride_c = make_Coord(ldc);
stride_d = make_Coord(ldd);
CUTLASS_TRACE_HOST("GemmUniversal::Arguments::Arguments() - problem_size: " << problem_size);
}
/// Returns arguments for the transposed problem
Arguments transposed_problem() const
{
Arguments args(*this);
std::swap(args.problem_size.m(), args.problem_size.n());
std::swap(args.ptr_A, args.ptr_B);
std::swap(args.lda, args.ldb);
std::swap(args.stride_a, args.stride_b);
std::swap(args.batch_stride_A, args.batch_stride_B);
std::swap(args.ptr_gather_A_indices, args.ptr_gather_B_indices);
return args;
}
};
//
// Structure for precomputing values in host memory and passing to kernels
//
/// Parameters structure
struct Params : UniversalParamsBase<
ThreadblockSwizzle,
ThreadblockShape,
ElementA,
ElementB,
ElementC,
LayoutA,
LayoutB>
{
using ParamsBase = UniversalParamsBase<
ThreadblockSwizzle,
ThreadblockShape,
ElementA,
ElementB,
ElementC,
LayoutA,
LayoutB>;
//
// Data members
//
typename Mma::IteratorA::Params params_A;
typename Mma::IteratorB::Params params_B;
typename Epilogue::OutputTileIterator::Params params_C;
typename Epilogue::OutputTileIterator::Params params_D;
typename EpilogueOutputOp::Params output_op;
void * ptr_A;
void * ptr_B;
void * ptr_C;
void * ptr_D;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_C;
int * ptr_gather_A_indices;
int * ptr_gather_B_indices;
int * ptr_scatter_D_indices;
//
// Host dispatch API
//
/// Default constructor
Params() = default;
/// Constructor
Params(
Arguments const &args, /// GEMM application arguments
int device_sms, /// Number of SMs on the device
int sm_occupancy) /// Kernel SM occupancy (in thread blocks)
:
ParamsBase(args, device_sms, sm_occupancy),
params_A(args.lda ? make_Coord_with_padding<LayoutA::kStrideRank>(args.lda) : args.stride_a),
params_B(args.ldb ? make_Coord_with_padding<LayoutB::kStrideRank>(args.ldb) : args.stride_b),
params_C(args.ldc ? make_Coord_with_padding<LayoutC::kStrideRank>(args.ldc) : args.stride_c),
params_D(args.ldd ? make_Coord_with_padding<LayoutC::kStrideRank>(args.ldd) : args.stride_d),
output_op(args.epilogue),
ptr_A(const_cast<void *>(args.ptr_A)),
ptr_B(const_cast<void *>(args.ptr_B)),
ptr_C(const_cast<void *>(args.ptr_C)),
ptr_D(args.ptr_D),
batch_stride_A(args.batch_stride_A),
batch_stride_B(args.batch_stride_B),
batch_stride_C(args.batch_stride_C),
ptr_gather_A_indices(const_cast<int *>(args.ptr_gather_A_indices)),
ptr_gather_B_indices(const_cast<int *>(args.ptr_gather_B_indices)),
ptr_scatter_D_indices(const_cast<int *>(args.ptr_scatter_D_indices))
{}
/// Lightweight update given a subset of arguments.
void update(Arguments const &args)
{
CUTLASS_TRACE_HOST("GemmUniversal::Params::update()");
// Update input/output pointers
ptr_A = const_cast<void *>(args.ptr_A);
ptr_B = const_cast<void *>(args.ptr_B);
ptr_C = const_cast<void *>(args.ptr_C);
ptr_D = args.ptr_D;
batch_stride_A = args.batch_stride_A;
batch_stride_B = args.batch_stride_B;
batch_stride_C = args.batch_stride_C;
this->batch_stride_D = args.batch_stride_D;
ptr_gather_A_indices = const_cast<int *>(args.ptr_gather_A_indices);
ptr_gather_B_indices = const_cast<int *>(args.ptr_gather_B_indices);
ptr_scatter_D_indices = const_cast<int *>(args.ptr_scatter_D_indices);
output_op = args.epilogue;
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
public:
//
// Host dispatch API
//
/// Determines whether kernel satisfies alignment
static Status can_implement(
cutlass::gemm::GemmCoord const & problem_size)
{
CUTLASS_TRACE_HOST("GemmUniversal::can_implement()");
static int const kAlignmentA = (cute::is_same<LayoutA,
layout::ColumnMajorInterleaved<32>>::value)
? 32
: (cute::is_same<LayoutA,
layout::ColumnMajorInterleaved<64>>::value)
? 64
: Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = (cute::is_same<LayoutB,
layout::RowMajorInterleaved<32>>::value)
? 32
: (cute::is_same<LayoutB,
layout::RowMajorInterleaved<64>>::value)
? 64
: Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = (cute::is_same<LayoutC,
layout::ColumnMajorInterleaved<32>>::value)
? 32
: (cute::is_same<LayoutC,
layout::ColumnMajorInterleaved<64>>::value)
? 64
: Epilogue::OutputTileIterator::kElementsPerAccess;
bool isAMisaligned = false;
bool isBMisaligned = false;
bool isCMisaligned = false;
if (cute::is_same<LayoutA, layout::RowMajor>::value) {
isAMisaligned = problem_size.k() % kAlignmentA;
} else if (cute::is_same<LayoutA, layout::ColumnMajor>::value) {
isAMisaligned = problem_size.m() % kAlignmentA;
} else if (cute::is_same<LayoutA, layout::ColumnMajorInterleaved<32>>::value
|| cute::is_same<LayoutA, layout::ColumnMajorInterleaved<64>>::value) {
isAMisaligned = problem_size.k() % kAlignmentA;
}
if (cute::is_same<LayoutB, layout::RowMajor>::value) {
isBMisaligned = problem_size.n() % kAlignmentB;
} else if (cute::is_same<LayoutB, layout::ColumnMajor>::value) {
isBMisaligned = problem_size.k() % kAlignmentB;
} else if (cute::is_same<LayoutB, layout::RowMajorInterleaved<32>>::value
|| cute::is_same<LayoutB, layout::RowMajorInterleaved<64>>::value) {
isBMisaligned = problem_size.k() % kAlignmentB;
}
if (cute::is_same<LayoutC, layout::RowMajor>::value) {
isCMisaligned = problem_size.n() % kAlignmentC;
} else if (cute::is_same<LayoutC, layout::ColumnMajor>::value) {
isCMisaligned = problem_size.m() % kAlignmentC;
} else if (cute::is_same<LayoutC, layout::ColumnMajorInterleaved<32>>::value
|| cute::is_same<LayoutC, layout::ColumnMajorInterleaved<64>>::value) {
isCMisaligned = problem_size.n() % kAlignmentC;
}
if (isAMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for A operand");
return Status::kErrorMisalignedOperand;
}
if (isBMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for B operand");
return Status::kErrorMisalignedOperand;
}
if (isCMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for C operand");
return Status::kErrorMisalignedOperand;
}
CUTLASS_TRACE_HOST(" returning kSuccess");
return Status::kSuccess;
}
static Status can_implement(Arguments const &args) {
return can_implement(args.problem_size);
}
public:
//
// Device-only API
//
// Factory invocation
CUTLASS_DEVICE
static void invoke(
Params const ¶ms,
SharedStorage &shared_storage)
{
GemmUniversal op;
op(params, shared_storage);
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
ThreadblockSwizzle threadblock_swizzle;
run_with_swizzle(params, shared_storage, threadblock_swizzle);
}
/// Executes one GEMM with an externally-provided swizzling function
CUTLASS_DEVICE
void run_with_swizzle(Params const ¶ms, SharedStorage &shared_storage, ThreadblockSwizzle& threadblock_swizzle) {
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
int offset_k = 0;
int problem_size_k = params.problem_size.k();
ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A);
ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B);
//
// Fetch pointers based on mode.
//
if (params.mode == GemmUniversalMode::kGemm ||
params.mode == GemmUniversalMode::kGemmSplitKParallel) {
if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) {
problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size;
}
offset_k = threadblock_tile_offset.k() * params.gemm_k_size;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_A += threadblock_tile_offset.k() * params.batch_stride_A;
ptr_B += threadblock_tile_offset.k() * params.batch_stride_B;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_A = static_cast<ElementA * const *>(params.ptr_A)[threadblock_tile_offset.k()];
ptr_B = static_cast<ElementB * const *>(params.ptr_B)[threadblock_tile_offset.k()];
}
__syncthreads();
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
threadblock_tile_offset.m() * Mma::Shape::kM,
offset_k,
};
cutlass::MatrixCoord tb_offset_B{
offset_k,
threadblock_tile_offset.n() * Mma::Shape::kN
};
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(
params.params_A,
ptr_A,
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_A,
params.ptr_gather_A_indices);
typename Mma::IteratorB iterator_B(
params.params_B,
ptr_B,
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_B,
params.ptr_gather_B_indices);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = canonical_warp_idx_sync();
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
mma(
gemm_k_iterations,
accumulators,
iterator_A,
iterator_B,
accumulators);
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.n() * Mma::Shape::kN
);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C);
ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D);
//
// Fetch pointers based on mode.
//
// Construct the semaphore.
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
if (params.mode == GemmUniversalMode::kGemm) {
// If performing a reduction via split-K, fetch the initial synchronization
if (params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
}
}
else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) {
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_C += threadblock_tile_offset.k() * params.batch_stride_C;
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_C = static_cast<ElementC * const *>(params.ptr_C)[threadblock_tile_offset.k()];
ptr_D = static_cast<ElementC * const *>(params.ptr_D)[threadblock_tile_offset.k()];
}
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
ptr_C,
params.problem_size.mn(),
thread_idx,
threadblock_offset,
params.ptr_scatter_D_indices
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
ptr_D,
params.problem_size.mn(),
thread_idx,
threadblock_offset,
params.ptr_scatter_D_indices
);
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Wait on the semaphore - this latency may have been covered by iterator construction
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_offset.k()) {
iterator_C = iterator_D;
}
semaphore.wait(threadblock_tile_offset.k());
}
// Execute the epilogue operator to update the destination tensor.
epilogue(
output_op,
iterator_D,
accumulators,
iterator_C);
//
// Release the semaphore
//
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_offset.k() + 1;
}
semaphore.release(lock);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/kernel/gemm_universal.h/0 | {
"file_path": "include/cutlass/gemm/kernel/gemm_universal.h",
"repo_id": "include",
"token_count": 9704
} | 33 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/semaphore.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma1_, ///! Threadblock-scoped matrix multiply-accumulate (A*B^T)
typename Mma2_, ///! Threadblock-scoped matrix multiply-accumulate (B*A^T)
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_, ///! Threadblock swizzling function
FillMode FillModeC_, ///! Fill Mode for C (kLower or kUpper)
BlasMode BlasMode_ ///! Blas3 computation mode
>
struct Rank2KUniversal {
public:
using Mma1 = Mma1_;
using Mma2 = Mma2_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using ElementA = typename Mma1::IteratorA::Element;
using ElementB = typename Mma1::IteratorB::Element;
// Mma1 (A x B^T)
using LayoutA = typename Mma1::IteratorA::Layout;
using LayoutBT = typename Mma1::IteratorB::Layout;
static ComplexTransform const kMma1TransformA = Mma1::kTransformA;
static ComplexTransform const kMma1TransformB = Mma1::kTransformB;
// Mma2 (B x A^T)
using LayoutB = typename Mma2::IteratorA::Layout;
using LayoutAT = typename Mma2::IteratorB::Layout;
static ComplexTransform const kMma2TransformA = Mma2::kTransformA;
static ComplexTransform const kMma2TransformB = Mma2::kTransformB;
// Common type definitions for Mma1 and Mma2
using Operator = typename Mma1::Operator;
using OperatorClass = typename Mma1::Operator::OperatorClass;
using ThreadblockShape = typename Mma1::Shape;
using WarpShape = typename Mma1::Operator::Shape;
using InstructionShape = typename Mma1::Policy::Operator::InstructionShape;
using ArchTag = typename Mma1::ArchTag;
static int const kStages = Mma1::kStages;
static int const kAlignmentA = Mma1::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma1::IteratorB::AccessType::kElements;
// Output related typedefinitions
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename Epilogue::OutputTileIterator::Layout;
static FillMode const kFillModeC = FillModeC_;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
static BlasMode const kBlasMode = BlasMode_;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma1::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
//
// Structures
//
/// Argument structure
struct Arguments {
//
// Data members
//
GemmUniversalMode mode = cutlass::gemm::GemmUniversalMode::kGemm;
GemmCoord problem_size {};
int batch_count{1};
typename EpilogueOutputOp::Params epilogue{};
void const * ptr_A = nullptr;
void const * ptr_B = nullptr;
void const * ptr_C = nullptr;
void * ptr_D = nullptr;
int64_t batch_stride_A {0};
int64_t batch_stride_B {0};
int64_t batch_stride_C {0};
int64_t batch_stride_D {0};
typename LayoutA::Stride::Index lda{0};
typename LayoutB::Stride::Index ldb{0};
typename LayoutC::Stride::Index ldc{0};
typename LayoutC::Stride::Index ldd{0};
bool allow_early_exit{false};
//
// Methods
//
Arguments() = default;
/// constructs an arguments structure
Arguments(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_count,
typename EpilogueOutputOp::Params epilogue,
void const * ptr_A,
void const * ptr_B,
void const * ptr_C,
void * ptr_D,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C,
int64_t batch_stride_D,
typename LayoutA::Stride::Index lda,
typename LayoutB::Stride::Index ldb,
typename LayoutC::Stride::Index ldc,
typename LayoutC::Stride::Index ldd,
bool allow_early_exit = false
):
mode(mode),
problem_size(problem_size),
batch_count(batch_count),
epilogue(epilogue),
ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D),
batch_stride_A(batch_stride_A), batch_stride_B(0),
batch_stride_C(batch_stride_C), batch_stride_D(batch_stride_D),
lda(lda), ldb(ldb), ldc(ldc), ldd(ldd),
allow_early_exit(allow_early_exit) {
}
/// Returns arguments for a the transposed problem
Arguments transposed_problem() const {
Arguments args(*this);
std::swap(args.ptr_A, args.ptr_B);
std::swap(args.lda, args.ldb);
std::swap(args.batch_stride_A, args.batch_stride_B);
return args;
}
};
//
// Structure for precomputing values in host memory and passing to kernels
//
/// Parameters structure
struct Params {
cutlass::gemm::GemmCoord problem_size{};
cutlass::gemm::GemmCoord grid_tiled_shape{};
int swizzle_log_tile{0};
// Mma1 Iterator A and B params
typename Mma1::IteratorA::Params params_A{};
typename Mma1::IteratorB::Params params_BT{};
// Mma2 Iterator A and B params
typename Mma2::IteratorA::Params params_B{};
typename Mma2::IteratorB::Params params_AT{};
typename Epilogue::OutputTileIterator::Params params_C{};
typename Epilogue::OutputTileIterator::Params params_D{};
typename EpilogueOutputOp::Params output_op{};
GemmUniversalMode mode = cutlass::gemm::GemmUniversalMode::kGemm;
int batch_count{0};
int gemm_k_size{0};
void * ptr_A = nullptr;
void * ptr_B = nullptr;
void * ptr_C = nullptr;
void * ptr_D = nullptr;
int64_t batch_stride_A{0};
int64_t batch_stride_B{0};
int64_t batch_stride_C{0};
int64_t batch_stride_D{0};
int *semaphore = nullptr;
bool allow_early_exit {false};
//
// Methods
//
Params() = default;
CUTLASS_HOST_DEVICE
Params(
Arguments const &args,
cutlass::gemm::GemmCoord const & grid_tiled_shape,
int gemm_k_size,
void *workspace = nullptr
):
problem_size(args.problem_size),
grid_tiled_shape(grid_tiled_shape),
swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)),
params_A(args.lda),
params_BT(args.ldb),
params_B(args.ldb),
params_AT(args.lda),
params_C(args.ldc),
params_D(args.ldd),
output_op(args.epilogue),
mode(args.mode),
batch_count(args.batch_count),
gemm_k_size(gemm_k_size),
ptr_A(const_cast<void *>(args.ptr_A)),
ptr_B(const_cast<void *>(args.ptr_B)),
ptr_C(const_cast<void *>(args.ptr_C)),
ptr_D(const_cast<void *>(args.ptr_D)),
batch_stride_A(args.batch_stride_A),
batch_stride_B(args.batch_stride_B),
batch_stride_C(args.batch_stride_C),
batch_stride_D(args.batch_stride_D),
semaphore(static_cast<int *>(workspace)),
allow_early_exit(args.allow_early_exit) {
}
CUTLASS_HOST_DEVICE
void update(
Arguments const &args,
void *workspace = nullptr) {
ptr_A = const_cast<void *>(args.ptr_A);
ptr_B = const_cast<void *>(args.ptr_B);
ptr_C = const_cast<void *>(args.ptr_C);
ptr_D = args.ptr_D;
output_op = args.epilogue;
semaphore = static_cast<int *>(workspace);
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma1::SharedStorage mma1_main_loop;
typename Mma2::SharedStorage mma2_main_loop;
typename Epilogue::SharedStorage epilogue;
};
public:
//
// Methods
//
CUTLASS_DEVICE
Rank2KUniversal() { }
/// Determines whether kernel satisfies alignment
static Status can_implement(
cutlass::gemm::GemmCoord const & problem_size) {
static int const kAlignmentA = Mma1::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma1::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
if ((problem_size.m() % kAlignmentA) || (problem_size.k() % kAlignmentA) ||
(problem_size.n() % kAlignmentB) || (problem_size.k() % kAlignmentB) ||
(problem_size.m() % kAlignmentC) || (problem_size.n() % kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
return Status::kSuccess;
}
static Status can_implement(Arguments const &args) {
return can_implement(args.problem_size);
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Early exit following LAPACK's definition
if (params.allow_early_exit &&
(params.output_op.alpha == ElementC(0)) && (params.output_op.beta == ElementC(1))) {
return;
}
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
// Early exit if Fill Mode is Lower and
// if the entire tile is above the main diagonal (bottom-left corner is at or above the diagonal)
if (kFillModeC == cutlass::FillMode::kLower &&
(threadblock_tile_offset.m() + 1) * Mma1::Shape::kM <= threadblock_tile_offset.n() * Mma1::Shape::kN) {
return;
}
// Early exit if Fill Mode is Upper and
// if the entire tile is below the main diagonal (top-right corner is at or below the diagonal)
if (kFillModeC == cutlass::FillMode::kUpper &&
threadblock_tile_offset.m() * Mma1::Shape::kM >= (threadblock_tile_offset.n() + 1) * Mma1::Shape::kN) {
return;
}
bool tile_on_diagonal = false;
// Mark tiles that are being crossed by the main diagonal
// (top-right and bottom-left corners are on either side of the diagonal)
if ((threadblock_tile_offset.m() + 1) * Mma1::Shape::kM > threadblock_tile_offset.n() * Mma1::Shape::kN
&& threadblock_tile_offset.m() * Mma1::Shape::kM < (threadblock_tile_offset.n() + 1) * Mma1::Shape::kN) {
tile_on_diagonal = true;
}
int offset_k = 0;
int problem_size_k = params.problem_size.k();
ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A);
ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B);
//
// Fetch pointers based on mode.
//
if (params.mode == GemmUniversalMode::kGemm ||
params.mode == GemmUniversalMode::kGemmSplitKParallel) {
if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) {
problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size;
}
offset_k = threadblock_tile_offset.k() * params.gemm_k_size;
}
__syncthreads();
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_MxK{
threadblock_tile_offset.m() * Mma1::Shape::kM,
offset_k,
};
cutlass::MatrixCoord tb_offset_KxN{
offset_k,
threadblock_tile_offset.n() * Mma1::Shape::kN
};
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands for Mma1
typename Mma1::IteratorA iterator_A(
params.params_A,
ptr_A,
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_MxK);
typename Mma1::IteratorB iterator_BT(
params.params_BT,
ptr_B,
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_KxN);
// Construct iterators to A and B operands for Mma2
typename Mma2::IteratorA iterator_B(
params.params_B,
ptr_B,
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_MxK);
typename Mma2::IteratorB iterator_AT(
params.params_AT,
ptr_A,
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_KxN);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = canonical_warp_idx_sync();
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply for Mma1 (A x BT)
Mma1 mma1(shared_storage.mma1_main_loop, thread_idx, warp_idx, lane_idx);
// Construct thread-scoped matrix multiply for Mma2 (B x AT)
Mma2 mma2(shared_storage.mma2_main_loop, thread_idx, warp_idx, lane_idx);
typename Mma1::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size_k - offset_k + Mma1::Shape::kK - 1) / Mma1::Shape::kK;
// Compute threadblock-scoped matrix multiply-add (A x BT)
mma1(
gemm_k_iterations,
accumulators,
iterator_A,
iterator_BT,
accumulators);
// HER2K kernel needs Alpha to be complex and is conj(Alpha) is applied to the second HERK.
if (kBlasMode == BlasMode::kHermitian) {
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma1::Shape::kM,
threadblock_tile_offset.n() * Mma1::Shape::kN
);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C);
ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D);
//
// Fetch pointers based on mode.
//
// Construct the semaphore.
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
if (params.mode == GemmUniversalMode::kGemm) {
// If performing a reduction via split-K, fetch the initial synchronization
if (params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
}
}
else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) {
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_C += threadblock_tile_offset.k() * params.batch_stride_C;
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_C = static_cast<ElementC * const *>(params.ptr_C)[threadblock_tile_offset.k()];
ptr_D = static_cast<ElementC * const *>(params.ptr_D)[threadblock_tile_offset.k()];
}
// If CTA not on diagonal, FillMode doesn't apply.
FillMode kFillModeCTA = tile_on_diagonal ? kFillModeC : FillMode::kNone;
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
ptr_C,
params.problem_size.mn(),
thread_idx,
threadblock_offset,
kFillModeCTA
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
ptr_D,
params.problem_size.mn(),
thread_idx,
threadblock_offset,
kFillModeCTA
);
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Wait on the semaphore - this latency may have been covered by iterator construction
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_offset.k()) {
iterator_C = iterator_D;
}
semaphore.wait(threadblock_tile_offset.k());
__threadfence();
}
// Execute the epilogue operator to update the destination tensor.
epilogue(
output_op,
iterator_D,
accumulators,
iterator_C);
//
// Release the semaphore
//
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_offset.k() + 1;
}
semaphore.release(lock);
}
__syncthreads();
accumulators.clear();
}
// Compute threadblock-scoped matrix multiply-add (B x AT)
mma2(
gemm_k_iterations,
accumulators,
iterator_B,
iterator_AT,
accumulators);
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
/* Needed for HER2K where the second HERK is multiplied by conj(alpha) */
typename EpilogueOutputOp::Params second_her2k_params(conj(params.output_op.alpha), 1);
EpilogueOutputOp output_op_her2k(second_her2k_params);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma1::Shape::kM,
threadblock_tile_offset.n() * Mma1::Shape::kN
);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C);
// HER2K kernel needs Alpha to be complex and is conj(Alpha) is applied to the second HERK.
if (kBlasMode == BlasMode::kHermitian) {
ptr_C = static_cast<ElementC *>(params.ptr_D);
}
ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D);
//
// Fetch pointers based on mode.
//
// Construct the semaphore.
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
if (params.mode == GemmUniversalMode::kGemm) {
// If performing a reduction via split-K, fetch the initial synchronization
if (params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
if (kBlasMode == BlasMode::kSymmetric) {
output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
} else {
output_op_her2k.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
}
}
}
else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) {
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_C += threadblock_tile_offset.k() * params.batch_stride_C;
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_C = static_cast<ElementC * const *>(params.ptr_C)[threadblock_tile_offset.k()];
ptr_D = static_cast<ElementC * const *>(params.ptr_D)[threadblock_tile_offset.k()];
}
// If CTA not on diagonal, FillMode doesn't apply.
FillMode kFillModeCTA = tile_on_diagonal ? kFillModeC : FillMode::kNone;
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
ptr_C,
params.problem_size.mn(),
thread_idx,
threadblock_offset,
kFillModeCTA
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
ptr_D,
params.problem_size.mn(),
thread_idx,
threadblock_offset,
kFillModeCTA
);
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Wait on the semaphore - this latency may have been covered by iterator construction
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_offset.k()) {
iterator_C = iterator_D;
}
semaphore.wait(threadblock_tile_offset.k());
__threadfence();
}
// Execute the epilogue operator to update the destination tensor.
if (kBlasMode == BlasMode::kSymmetric) {
epilogue(
output_op,
iterator_D,
accumulators,
iterator_C);
} else {
epilogue(
output_op_her2k,
iterator_D,
accumulators,
iterator_C);
}
//
// Release the semaphore
//
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_offset.k() + 1;
}
semaphore.release(lock);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/kernel/rank_2k_universal.h/0 | {
"file_path": "include/cutlass/gemm/kernel/rank_2k_universal.h",
"repo_id": "include",
"token_count": 9747
} | 34 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Sparse GEMM with visitor.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/kernel/sparse_gemm.h"
#include "cutlass/gemm/kernel/params_sparse_base.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
// Sparse Gemm that compute the epilogue visitor functor
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_ ///! Threadblock swizzling function
>
struct SparseGemmWithEpilogueVisitor : public SparseGemm<Mma_, Epilogue_, ThreadblockSwizzle_, false> {
using Base = SparseGemm<Mma_, Epilogue_, ThreadblockSwizzle_, false>;
using Mma = Mma_;
using Epilogue = Epilogue_;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using FusionCallbacks = typename Epilogue::FusionCallbacks;
using ParamsA = typename Mma::IteratorA::Params;
using TensorRefA = typename Mma::IteratorA::TensorRef;
using ParamsB = typename Mma::IteratorB::Params;
using TensorRefB = typename Mma::IteratorB::TensorRef;
using ParamsE = typename Mma::IteratorE::Params;
using TensorRefE = typename Mma::IteratorE::TensorRef;
static int const kSparse = Base::kSparse;
static int const kElementsPerElementE = Base::kElementsPerElementE;
using SharedStorage = typename Base::SharedStorage;
/// Parameters structure
struct Params : public SparseParamsBase<
ThreadblockSwizzle, ParamsA, TensorRefA, ParamsB, TensorRefB,
ParamsE, TensorRefE> {
using Base = SparseParamsBase<
ThreadblockSwizzle, ParamsA, TensorRefA, ParamsB, TensorRefB,
ParamsE, TensorRefE>;
//
// Data members
//
typename FusionCallbacks::Params output_op;
cute::Shape<int32_t,int32_t,int32_t> problem_shape;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(
cutlass::gemm::GemmCoord const & problem_size,
cutlass::gemm::GemmCoord const & grid_tiled_shape,
typename Mma::IteratorA::TensorRef ref_A,
typename Mma::IteratorB::TensorRef ref_B,
typename Mma::IteratorE::TensorRef ref_E,
typename FusionCallbacks::Arguments output_op = typename FusionCallbacks::Arguments()
):
Base(problem_size, grid_tiled_shape, ref_A, ref_B, ref_E, Mma::Shape::kK),
output_op(FusionCallbacks::to_underlying_arguments(problem_size, output_op, nullptr /*workspace*/)),
problem_shape(problem_size.m(), problem_size.n(), 1) {
}
};
//
// Methods
//
CUTLASS_HOST_DEVICE
SparseGemmWithEpilogueVisitor() { }
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.k() * params.gemm_k_size / kSparse,
};
cutlass::MatrixCoord tb_offset_B{
threadblock_tile_offset.k() * params.gemm_k_size,
threadblock_tile_offset.n() * Mma::Shape::kN
};
cutlass::MatrixCoord tb_offset_E{
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.k() * params.gemm_k_size / kSparse,
};
// Problem size is a function of threadblock index in the K dimension
int problem_size_k = min(
params.problem_size.k(),
(threadblock_tile_offset.k() + 1) * params.gemm_k_size);
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size_k - tb_offset_B.row() + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A, B, and E operands
typename Mma::IteratorA iterator_A(
params.params_A,
params.ref_A.data(),
{params.problem_size.m(), problem_size_k / kSparse},
thread_idx,
tb_offset_A);
typename Mma::IteratorB iterator_B(
params.params_B,
params.ref_B.data(),
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_B);
typename Mma::IteratorE iterator_E(
params.params_E, params.ref_E.data(),
{params.problem_size.m(),
problem_size_k / kSparse / kElementsPerElementE},
thread_idx, tb_offset_E);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = canonical_warp_idx_sync();
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
if (gemm_k_iterations > 0) {
// Compute threadblock-scoped matrix multiply-add
mma(gemm_k_iterations, accumulators, iterator_A, iterator_B, iterator_E, accumulators);
}
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
//
// Epilogue
//
Epilogue epilogue(
params.output_op,
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Execute the epilogue operator to update the destination tensor.
epilogue(accumulators, threadblock_tile_offset, params.problem_shape, thread_idx);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
| include/cutlass/gemm/kernel/sparse_gemm_with_visitor.h/0 | {
"file_path": "include/cutlass/gemm/kernel/sparse_gemm_with_visitor.h",
"repo_id": "include",
"token_count": 2891
} | 35 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines basic properties needed by CTA-level GEMMs assuming expectations about data
layout of the global memory fragments, data types, and internal tile sizes.
Partial specializations for threadblock::Mma operations targeting TensorOp instructions.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/platform/platform.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/layout/tensor_op_multiplicand_sm75.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/transform/threadblock/regular_tile_iterator_tensor_op.h"
#include "cutlass/gemm/warp/default_mma_tensor_op.h"
#include "cutlass/gemm/threadblock/default_mma_core.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: column-major
/// B: row-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by GEMM
typename Operator_>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::ColumnMajor, ElementB_, layout::RowMajor,
ElementC_, LayoutC_, arch::OpClassTensorOp, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::ColumnMajor;
using ElementB = ElementB_;
using LayoutB = layout::RowMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassTensorOp;
/// Number of warps present
using WarpCount = GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
/// Default Operator
using Operator = Operator_;
// Warp thread arrangement
static int const kWarpThreadArrangementContiguousA =
platform::min(Shape::kM / (kAccessSizeInBits / sizeof_bits<ElementA>::value), 8);
static int const kWarpThreadArrangementStridedA =
kWarpSize / kWarpThreadArrangementContiguousA;
static int const kWarpThreadArrangementContiguousB =
platform::min(Shape::kN / (kAccessSizeInBits / sizeof_bits<ElementB>::value), 8);
static int const kWarpThreadArrangementStridedB =
kWarpSize / kWarpThreadArrangementContiguousB;
//
// Shared memory layouts
//
static int const Crosswise_A = platform::min(int(128 / sizeof(ElementA)),
Shape::kM);
using SmemLayoutA =
layout::ColumnMajorTensorOpMultiplicandCongruous<
sizeof_bits<ElementA>::value, Crosswise_A>;
// Shared memory layout
static int const Crosswise_B = platform::min(int(128 / sizeof(ElementB)),
Shape::kN);
using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous<
sizeof_bits<ElementB>::value, Crosswise_B>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>,
kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousA,
kWarpThreadArrangementStridedA>,
kAccessSizeInBits / sizeof_bits<ElementA>::value
>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
ElementA,
SmemLayoutA,
1,
IteratorThreadMapA
>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>,
kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousB,
kWarpThreadArrangementStridedB>,
kAccessSizeInBits / sizeof_bits<ElementB>::value
>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
ElementB,
SmemLayoutB,
0,
IteratorThreadMapB
>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaTensorOp,
MatrixShape<0, 0>,
MatrixShape<0, 0>,
WarpCount::kK
>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: row-major
/// B: column-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by MMA
typename Operator_>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::RowMajor, ElementB_, layout::ColumnMajor,
ElementC_, LayoutC_, arch::OpClassTensorOp, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassTensorOp;
/// Number of warps present
using WarpCount = GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
/// Default Operator
using Operator = Operator_;
// Warp thread arrangement
static int const kWarpThreadArrangementContiguousA =
Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementA>::value);
static int const kWarpThreadArrangementStridedA =
kWarpSize / kWarpThreadArrangementContiguousA;
static int const kWarpThreadArrangementContiguousB =
Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementB>::value);
static int const kWarpThreadArrangementStridedB =
kWarpSize / kWarpThreadArrangementContiguousB;
//
// Shared memory layouts
//
using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise<
sizeof_bits<ElementA>::value, Shape::kK>;
// Shared memory layout
using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise<
sizeof_bits<ElementB>::value, Shape::kK>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousA,
kWarpThreadArrangementStridedA>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
ElementA,
SmemLayoutA,
0,
IteratorThreadMapA
>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousB,
kWarpThreadArrangementStridedB>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
ElementB,
SmemLayoutB,
1,
IteratorThreadMapB
>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaTensorOp,
MatrixShape<0, 0>,
MatrixShape<0, 0>,
WarpCount::kK
>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: row-major
/// B: row-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by MMA
typename Operator_>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::RowMajor, ElementB_, layout::RowMajor, ElementC_,
LayoutC_, arch::OpClassTensorOp, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::RowMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassTensorOp;
/// Number of warps present
using WarpCount = GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
/// Default Operator
using Operator = Operator_;
// Warp thread arrangement
static int const kWarpThreadArrangementContiguousA =
Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementA>::value);
static int const kWarpThreadArrangementStridedA =
kWarpSize / kWarpThreadArrangementContiguousA;
static int const kWarpThreadArrangementContiguousB =
platform::min(Shape::kN / (kAccessSizeInBits / sizeof_bits<ElementB>::value), 8);
static int const kWarpThreadArrangementStridedB =
kWarpSize / kWarpThreadArrangementContiguousB;
//
// Shared memory layouts
//
using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise<
sizeof_bits<ElementA>::value, Shape::kK>;
// Shared memory layout
static int const Crosswise_B = platform::min(int(128 / sizeof(ElementB)),
Shape::kN);
using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous<
sizeof_bits<ElementB>::value, Crosswise_B>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousA,
kWarpThreadArrangementStridedA>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
ElementA,
SmemLayoutA,
0,
IteratorThreadMapA
>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>,
kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousB,
kWarpThreadArrangementStridedB>,
kAccessSizeInBits / sizeof_bits<ElementB>::value
>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
ElementB,
SmemLayoutB,
0,
IteratorThreadMapB
>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaTensorOp,
MatrixShape<0, 0>,
MatrixShape<0, 0>,
WarpCount::kK
>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: column-major
/// B: column-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by MMA
typename Operator_>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::ColumnMajor, ElementB_, layout::ColumnMajor,
ElementC_, LayoutC_, arch::OpClassTensorOp, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::ColumnMajor;
using ElementB = ElementB_;
using LayoutB = layout::ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassTensorOp;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
/// Default Operator
using Operator = Operator_;
// Warp thread arrangement
static int const kWarpThreadArrangementContiguousA =
platform::min(Shape::kM / (kAccessSizeInBits / sizeof_bits<ElementA>::value), 8);
static int const kWarpThreadArrangementStridedA =
kWarpSize / kWarpThreadArrangementContiguousA;
static int const kWarpThreadArrangementContiguousB =
Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementA>::value);
static int const kWarpThreadArrangementStridedB =
kWarpSize / kWarpThreadArrangementContiguousB;
//
// Shared memory layouts
//
static int const Crosswise_A = platform::min(int(128 / sizeof(ElementA)),
Shape::kM);
using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous<
sizeof_bits<ElementA>::value, Crosswise_A>;
// Shared memory layout
using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise<
sizeof_bits<ElementB>::value, Shape::kK>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousA,
kWarpThreadArrangementStridedA>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousB,
kWarpThreadArrangementStridedB>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
////////////////////////////////////////////////////////////////////////////////
/// Below is for arch::OpMultiplyAddFastF16
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: column-major
/// B: row-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Layout of accumulator
typename LayoutC_>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, float,
layout::ColumnMajor, float, layout::RowMajor, float,
LayoutC_, arch::OpClassTensorOp, 2,
arch::OpMultiplyAddFastF16> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = float;
using LayoutA = layout::ColumnMajor;
using ElementB = float;
using LayoutB = layout::RowMajor;
using ElementC = float;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassTensorOp;
/// Number of warps present
using WarpCount = GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 256;
/// Default Operator
using Operator = arch::OpMultiplyAdd;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous<
sizeof_bits<half_t>::value, int(128 / sizeof(half_t))>;
// Shared memory layout
using SmemLayoutB =
layout::RowMajorTensorOpMultiplicandCongruous<sizeof_bits<half_t>::value,
int(128 / sizeof(half_t))>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>,
kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementA>::value
>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
half_t,
SmemLayoutA,
1,
IteratorThreadMapA
>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>,
kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementB>::value
>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
half_t,
SmemLayoutB,
0,
IteratorThreadMapB
>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, half_t, SmemLayoutA, half_t, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaTensorOp,
MatrixShape<0, 0>,
MatrixShape<0, 0>,
WarpCount::kK
>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: row-major
/// B: column-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Layout of accumulator
typename LayoutC_>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, float,
layout::RowMajor, float, layout::ColumnMajor, float,
LayoutC_, arch::OpClassTensorOp, 2,
arch::OpMultiplyAddFastF16> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = float;
using LayoutA = layout::RowMajor;
using ElementB = float;
using LayoutB = layout::ColumnMajor;
using ElementC = float;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassTensorOp;
/// Number of warps present
using WarpCount = GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 256;
/// Default Operator
using Operator = arch::OpMultiplyAdd;
// Warp thread arrangement
static int const kWarpThreadArrangementContiguousA =
Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementA>::value);
static int const kWarpThreadArrangementStridedA =
kWarpSize / kWarpThreadArrangementContiguousA;
static int const kWarpThreadArrangementContiguousB =
Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementA>::value);
static int const kWarpThreadArrangementStridedB =
kWarpSize / kWarpThreadArrangementContiguousB;
//
// Shared memory layouts
//
using SmemLayoutA =
layout::RowMajorTensorOpMultiplicandCrosswise<sizeof_bits<half_t>::value,
Shape::kK>;
// Shared memory layout
using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise<
sizeof_bits<half_t>::value, Shape::kK>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousA,
kWarpThreadArrangementStridedA>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
half_t,
SmemLayoutA,
0,
IteratorThreadMapA
>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousB,
kWarpThreadArrangementStridedB>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
half_t,
SmemLayoutB,
1,
IteratorThreadMapB
>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, half_t, SmemLayoutA, half_t, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaTensorOp,
MatrixShape<0, 0>,
MatrixShape<0, 0>,
WarpCount::kK
>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: row-major
/// B: row-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Layout of accumulator
typename LayoutC_>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, float,
layout::RowMajor, float, layout::RowMajor, float,
LayoutC_, arch::OpClassTensorOp, 2,
arch::OpMultiplyAddFastF16> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = float;
using LayoutA = layout::RowMajor;
using ElementB = float;
using LayoutB = layout::RowMajor;
using ElementC = float;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassTensorOp;
/// Number of warps present
using WarpCount = GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 256;
/// Default Operator
using Operator = arch::OpMultiplyAdd;
// Warp thread arrangement
static int const kWarpThreadArrangementContiguousA =
Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementA>::value);
static int const kWarpThreadArrangementStridedA =
kWarpSize / kWarpThreadArrangementContiguousA;
//
// Shared memory layouts
//
using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise<
sizeof_bits<half_t>::value, Shape::kK>;
// Shared memory layout
using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous<
sizeof_bits<half_t>::value, int(128 / sizeof(half_t))>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousA,
kWarpThreadArrangementStridedA>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
half_t,
SmemLayoutA,
0,
IteratorThreadMapA
>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>,
kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementB>::value
>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
half_t,
SmemLayoutB,
0,
IteratorThreadMapB
>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, half_t, SmemLayoutA, half_t, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaTensorOp,
MatrixShape<0, 0>,
MatrixShape<0, 0>,
WarpCount::kK
>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: column-major
/// B: column-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Layout of accumulator
typename LayoutC_>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, float,
layout::ColumnMajor, float, layout::ColumnMajor, float,
LayoutC_, arch::OpClassTensorOp, 2,
arch::OpMultiplyAddFastF16> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = float;
using LayoutA = layout::ColumnMajor;
using ElementB = float;
using LayoutB = layout::ColumnMajor;
using ElementC = float;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassTensorOp;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 256;
/// Default Operator
using Operator = arch::OpMultiplyAdd;
// Warp thread arrangement
static int const kWarpThreadArrangementContiguousB =
Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementA>::value);
static int const kWarpThreadArrangementStridedB =
kWarpSize / kWarpThreadArrangementContiguousB;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous<
sizeof_bits<half_t>::value, int(128 / sizeof(half_t))>;
// Shared memory layout
using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise<
sizeof_bits<half_t>::value, Shape::kK>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kM, Shape::kK>, half_t, SmemLayoutA, 1,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousB,
kWarpThreadArrangementStridedB>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kK, Shape::kN>, half_t, SmemLayoutB, 1,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, half_t, SmemLayoutA, half_t, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>,
WarpCount::kK>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: column-major-interleave
/// B: row-major-interleave
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
///
/// Column/RowMajorInterleved<InterleavedK>(m, n) is mapped to Column/RowMajor(m
/// x InterleavedK, n / InterleavedK) so that Column/RowMajor global iterators
/// can be reused. The shared store iterator is the same as the crosswise shared
/// store iterator. So, the only thing we need to do is to swap the coordinates
/// (contiguous <=> strided) used by the global iterator and the shared store
/// iterator.
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by MMA
typename Operator_,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor,
/// Number of interleaved k
int InterleavedK>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::ColumnMajorInterleaved<InterleavedK>, ElementB_,
layout::RowMajorInterleaved<InterleavedK>, ElementC_,
LayoutC_, arch::OpClassTensorOp, 2, Operator_,
AccumulatorsInRowMajor> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::ColumnMajorInterleaved<InterleavedK>;
using ElementB = ElementB_;
using LayoutB = layout::RowMajorInterleaved<InterleavedK>;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassTensorOp;
static int const kInterleavedK = InterleavedK;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
/// Default Operator
using Operator = Operator_;
// Warp thread arrangement
static int const kElementsPerAccess =
kAccessSizeInBits / sizeof_bits<ElementA>::value;
static int const kWarpThreadArrangementContiguous =
kInterleavedK / kElementsPerAccess;
static int const kWarpThreadArrangementStrided =
kWarpSize / kWarpThreadArrangementContiguous;
//
// Shared memory layouts
//
using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise<
sizeof_bits<ElementA>::value, kInterleavedK>;
// Shared memory layout
using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise<
sizeof_bits<ElementB>::value, kInterleavedK>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kM * kInterleavedK,
Shape::kK / kInterleavedK>,
kThreads, layout::PitchLinearShape<32, 1>, kElementsPerAccess>;
/// Transpose the ThreadMap of iterator A
using SmemThreadMapA = transform::TransposePitchLinearThreadMap<
IteratorThreadMapA,
layout::PitchLinearShape<kWarpThreadArrangementContiguous,
kWarpThreadArrangementStrided>>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 0,
SmemThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kN * kInterleavedK,
Shape::kK / kInterleavedK>,
kThreads, layout::PitchLinearShape<32, 1>, kElementsPerAccess>;
/// Transpose the ThreadMap of iterator A
using SmemThreadMapB = transform::TransposePitchLinearThreadMap<
IteratorThreadMapB,
layout::PitchLinearShape<kWarpThreadArrangementContiguous,
kWarpThreadArrangementStrided>>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1,
SmemThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK, AccumulatorsInRowMajor>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
| include/cutlass/gemm/threadblock/default_mma_core_sm75.h/0 | {
"file_path": "include/cutlass/gemm/threadblock/default_mma_core_sm75.h",
"repo_id": "include",
"token_count": 15728
} | 36 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a double-buffered threadblock-scoped GEMM kernel.
*/
#pragma once
#include "cutlass/aligned_buffer.h"
#include "cutlass/arch/memory.h"
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
#include "cutlass/gemm/threadblock/mma_sparse_base.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Iterates over tiles of A operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorA_,
/// Iterates over tiles of A operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorA_,
/// Cache operation for operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorB_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB_,
/// Cache operation for operand B
cutlass::arch::CacheOperation::Kind CacheOpB,
/// Data type of accumulator matrix
typename ElementC_,
/// Data type of accumulator matrix
typename LayoutC_,
/// Iterates over tiles of E operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorE_,
/// Iterates over tiles of E operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorE_,
/// Cache operation for operand E
cutlass::arch::CacheOperation::Kind CacheOpE,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// Number of stages,
int Stages,
/// Used for partial specialization
typename Enable = bool>
class SparseMmaMultistage :
public SparseMmaBase<Shape_, Policy_, Stages> {
public:
///< Base class
using Base = SparseMmaBase<Shape_, Policy_, Stages>;
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
///< Iterates over tiles of A operand in global memory
using IteratorA = IteratorA_;
///< Iterates over tiles of B operand in global memory
using IteratorB = IteratorB_;
///< Iterates over tiles of E operand in global memory
using IteratorE = IteratorE_;
///< Data type of accumulator matrix
using ElementC = ElementC_;
///< Layout of accumulator matrix
using LayoutC = LayoutC_;
///< Policy describing tuning details
using Policy = Policy_;
using SmemIteratorA = SmemIteratorA_;
using SmemIteratorB = SmemIteratorB_;
using SmemIteratorE = SmemIteratorE_;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB;
static cutlass::arch::CacheOperation::Kind const kCacheOpE = CacheOpE;
static int const kSparse = Policy::Operator::kSparse;
static int const kMetaSizeInBits = Policy::Operator::kMetaSizeInBits;
static int const kMaxID2 = Policy::Operator::kMaxID2;
static int const kElementsPerElementE =
Policy::Operator::kElementsPerElementE;
//
// Dependent types
//
/// Fragment of accumulator tile
using FragmentC = typename Policy::Operator::FragmentC;
/// Warp-level Mma
using Operator = typename Policy::Operator;
/// ElementE
using ElementE = typename IteratorE::Element;
/// LayoutE
using LayoutE = typename IteratorE::Layout;
/// Minimum architecture is Sm80 to support cp.async
using ArchTag = arch::Sm80;
/// Complex transform on A operand
static ComplexTransform const kTransformA = Operator::kTransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB = Operator::kTransformB;
/// Internal structure exposed for introspection.
struct Detail {
/// Number of async copies to load one stage of operand A
static int const TBLoadIterationsA =
IteratorA::ThreadMap::Iterations::kCount;
/// Number of async copies to load one stage of operand B
static int const TBLoadIterationsB =
IteratorB::ThreadMap::Iterations::kCount;
/// Number of async copies to load one stage of operand E
static int const TBLoadIterationsE =
IteratorE::ThreadMap::Iterations::kCount;
/// Number of stages
static int const kStages = Stages;
/// Number of async copies to load one group of operand A
static int const kAccessesPerGroupA =
(TBLoadIterationsA + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations;
/// Number of async copies to load one group of operand B
static int const kAccessesPerGroupB =
(TBLoadIterationsB + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations;
/// Number of async copies to load one group of operand E
static int const kAccessesPerGroupE =
(TBLoadIterationsE + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations;
/// E operand is tiny. For the most of time, not all the warps are needed
/// to load it from the global memory.
static int const kValidWarps = IteratorE::ThreadMap::kThreads / 32;
/// B operand is twice as big as A which brings very high register pressure.
/// We have to sacrifice the double buffer when the warp tile size is big.
static int const kBBufferSize =
((sizeof(typename Operator::ElementC) == 4) &&
((platform::is_same<typename Operator::Policy::Operator::ElementA,
typename Operator::ElementA>::value &&
platform::is_same<typename Operator::Policy::Operator::ElementB,
typename Operator::ElementB>::value)) &&
(Operator::Shape::kM >= 64 && Operator::Shape::kN >= 64))
? 1
: 2;
};
private:
using WarpLoadedFragmentA = typename Operator::FragmentA;
using WarpLoadedFragmentB = typename Operator::FragmentB;
using WarpTransformedFragmentA = typename Operator::TransformedFragmentA;
using WarpTransformedFragmentB = typename Operator::TransformedFragmentB;
using WarpFragmentE = typename Operator::FragmentE;
private:
//
// Data members
//
/// Iterator to write threadblock-scoped tile of A operand to shared memory
SmemIteratorA smem_iterator_A_;
/// Iterator to write threadblock-scoped tile of B operand to shared memory
SmemIteratorB smem_iterator_B_;
/// Iterator to write threadblock-scoped tile of E operand to shared memory
SmemIteratorE smem_iterator_E_;
/// Warp id
bool is_warp_valid_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
SparseMmaMultistage(
///< Shared storage needed for internal use by threadblock-scoped GEMM
typename Base::SharedStorage &shared_storage,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx
):
Base(shared_storage, thread_idx, warp_idx, lane_idx),
smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx),
smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx),
smem_iterator_E_(shared_storage.operand_E_ref(), thread_idx)
{
is_warp_valid_ = warp_idx < Detail::kValidWarps;
// Compute warp location within threadblock tile by mapping the warp_id to
// three coordinates:
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_m = warp_idx_mn % Base::WarpCount::kM;
int warp_idx_n = warp_idx_mn / Base::WarpCount::kM;
// Add per-warp offsets in units of warp-level tiles
this->warp_tile_iterator_A_.add_tile_offset(
{warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
this->warp_tile_iterator_B_.add_tile_offset(
{Base::kWarpGemmIterations * warp_idx_k, warp_idx_n});
this->warp_tile_iterator_E_.add_tile_offset(
{warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
}
CUTLASS_DEVICE
void copy_tiles_and_advance(IteratorA &iterator_A, IteratorB &iterator_B,
IteratorE &iterator_E, int group_start_A = 0,
int group_start_B = 0, int group_start_E = 0) {
iterator_A.set_iteration_index(group_start_A *
IteratorA::kAccessesPerVector);
this->smem_iterator_A_.set_iteration_index(group_start_A);
// async copy for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupA; ++j) {
if (group_start_A + j < Detail::TBLoadIterationsA) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(
this->smem_iterator_A_.get());
int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value *
IteratorA::ThreadMap::kElementsPerAccess /
IteratorA::kAccessesPerVector / 8;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) {
auto gmem_ptr = iterator_A.get();
cutlass::arch::cp_async<kSrcBytes, kCacheOpA>(
dst_ptr + v, gmem_ptr, iterator_A.valid());
++iterator_A;
}
++this->smem_iterator_A_;
}
}
iterator_B.set_iteration_index(group_start_B *
IteratorB::kAccessesPerVector);
this->smem_iterator_B_.set_iteration_index(group_start_B);
// async copy for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupB; ++j) {
if (group_start_B + j < Detail::TBLoadIterationsB) {
typename IteratorB::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB::AccessType *>(
this->smem_iterator_B_.get());
int const kSrcBytes = sizeof_bits<typename IteratorB::Element>::value *
IteratorB::ThreadMap::kElementsPerAccess /
IteratorB::kAccessesPerVector / 8;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) {
auto gmem_ptr = iterator_B.get();
cutlass::arch::cp_async<kSrcBytes, kCacheOpB>(
dst_ptr + v, gmem_ptr, iterator_B.valid());
++iterator_B;
}
++this->smem_iterator_B_;
}
}
iterator_E.set_iteration_index(group_start_E);
this->smem_iterator_E_.set_iteration_index(group_start_E);
// async copy for operand E
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupE; ++j) {
if (group_start_E + j < Detail::TBLoadIterationsE) {
typename IteratorE::AccessType *dst_ptr =
reinterpret_cast<typename IteratorE::AccessType *>(
this->smem_iterator_E_.get());
int const kSrcBytes = sizeof_bits<typename IteratorE::Element>::value *
IteratorE::ThreadMap::kElementsPerAccess / 8;
auto gmem_ptr = iterator_E.get();
cutlass::arch::cp_async<kSrcBytes, kCacheOpE>(
dst_ptr, gmem_ptr, iterator_E.valid() && is_warp_valid_);
++iterator_E;
++this->smem_iterator_E_;
}
}
}
/// Perform a threadblock-scoped matrix multiply-accumulate
CUTLASS_DEVICE
void operator()(
///< problem size of GEMM
int gemm_k_iterations,
///< destination accumulator tile
FragmentC &accum,
///< iterator over A operand in global memory
IteratorA iterator_A,
///< iterator over B operand in global memory
IteratorB iterator_B,
///< iterator over E operand in global memory
IteratorE iterator_E,
///< initial value of accumulator
FragmentC const &src_accum) {
//
// Prologue
//
// Issue several complete stages
CUTLASS_PRAGMA_UNROLL
for (int stage = 0; stage < Base::kStages - 1;
++stage, --gemm_k_iterations) {
iterator_A.clear_mask(gemm_k_iterations == 0);
iterator_B.clear_mask(gemm_k_iterations == 0);
iterator_E.clear_mask(gemm_k_iterations == 0);
iterator_A.set_iteration_index(0);
this->smem_iterator_A_.set_iteration_index(0);
// async copy for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::TBLoadIterationsA; ++j) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(
this->smem_iterator_A_.get());
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) {
int const kSrcBytes =
sizeof_bits<typename IteratorA::Element>::value *
IteratorA::ThreadMap::kElementsPerAccess /
IteratorA::kAccessesPerVector / 8;
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>(
dst_ptr + v, iterator_A.get(), iterator_A.valid());
++iterator_A;
}
++this->smem_iterator_A_;
}
iterator_B.set_iteration_index(0);
this->smem_iterator_B_.set_iteration_index(0);
// async copy for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::TBLoadIterationsB; ++j) {
typename IteratorB::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB::AccessType *>(
this->smem_iterator_B_.get());
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) {
int const kSrcBytes =
sizeof_bits<typename IteratorB::Element>::value *
IteratorB::ThreadMap::kElementsPerAccess /
IteratorB::kAccessesPerVector / 8;
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>(
dst_ptr + v, iterator_B.get(), iterator_B.valid());
++iterator_B;
}
++this->smem_iterator_B_;
}
iterator_E.set_iteration_index(0);
this->smem_iterator_E_.set_iteration_index(0);
// async copy for operand E
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::TBLoadIterationsE; ++j) {
typename IteratorE::AccessType *dst_ptr =
reinterpret_cast<typename IteratorE::AccessType *>(
this->smem_iterator_E_.get());
int const kSrcBytes = sizeof_bits<typename IteratorE::Element>::value *
IteratorE::ThreadMap::kElementsPerAccess / 8;
if (is_warp_valid_)
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpE>(
dst_ptr, iterator_E.get(), iterator_E.valid());
++iterator_E;
++this->smem_iterator_E_;
}
// Move to the next stage
iterator_A.add_tile_offset({0, 1});
iterator_B.add_tile_offset({1, 0});
iterator_E.add_tile_offset({0, 1});
this->smem_iterator_A_.add_tile_offset({0, 1});
this->smem_iterator_B_.add_tile_offset({1, 0});
this->smem_iterator_E_.add_tile_offset({0, 1});
// cp.async.commit_group - completes a stage
cutlass::arch::cp_async_fence();
}
// Perform accumulation in the 'd' output operand
accum = src_accum;
cutlass::arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
// Pair of fragments used to overlap shared memory loads and math
// instructions
WarpLoadedFragmentA warp_loaded_frag_A[2];
WarpLoadedFragmentB warp_loaded_frag_B[Detail::kBBufferSize];
WarpTransformedFragmentA warp_transformed_frag_A[2];
WarpTransformedFragmentB warp_transformed_frag_B[Detail::kBBufferSize];
WarpFragmentE warp_frag_E[2];
Operator warp_mma;
this->warp_tile_iterator_A_.set_kgroup_index(0);
this->warp_tile_iterator_B_.set_kgroup_index(0);
this->warp_tile_iterator_E_.set_kgroup_index(0);
this->warp_tile_iterator_A_.load(warp_loaded_frag_A[0]);
this->warp_tile_iterator_B_.load(warp_loaded_frag_B[0]);
this->warp_tile_iterator_E_.load(warp_frag_E[0]);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
++this->warp_tile_iterator_E_;
iterator_A.clear_mask(gemm_k_iterations == 0);
iterator_B.clear_mask(gemm_k_iterations == 0);
iterator_E.clear_mask(gemm_k_iterations == 0);
int smem_write_stage_idx = Base::kStages - 1;
int smem_read_stage_idx = 0;
warp_mma.transform(warp_transformed_frag_A[0], warp_transformed_frag_B[0],
warp_loaded_frag_A[0], warp_loaded_frag_B[0]);
//
// Mainloop
//
CUTLASS_GEMM_LOOP
for (; gemm_k_iterations > (-Base::kStages + 1);) {
//
// Loop over GEMM K dimension
//
// Computes a warp-level GEMM on data held in shared memory
// Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations;
++warp_mma_k) {
// Load warp-level tiles from shared memory, wrapping to k offset if
// this is the last group as the case may be.
this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_E_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_A_.load(warp_loaded_frag_A[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_E_.load(warp_frag_E[(warp_mma_k + 1) % 2]);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_E_;
if (Detail::kBBufferSize == 2) {
this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_B_.load(
warp_loaded_frag_B[(warp_mma_k + 1) % Detail::kBBufferSize]);
++this->warp_tile_iterator_B_;
}
if (warp_mma_k > 0)
warp_mma.transform(warp_transformed_frag_A[warp_mma_k % 2],
warp_transformed_frag_B[warp_mma_k % Detail::kBBufferSize],
warp_loaded_frag_A[warp_mma_k % 2],
warp_loaded_frag_B[warp_mma_k % Detail::kBBufferSize]);
warp_mma(
accum,
warp_transformed_frag_A[warp_mma_k % 2],
warp_transformed_frag_B[warp_mma_k % Detail::kBBufferSize], accum,
warp_frag_E[warp_mma_k % 2]
);
if (Detail::kBBufferSize == 1) {
this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_B_.load(warp_loaded_frag_B[0]);
++this->warp_tile_iterator_B_;
}
// Issue global->shared copies for the this stage
if (warp_mma_k < Base::kWarpGemmIterations - 1) {
int group_start_iteration_A, group_start_iteration_B, group_start_iteration_E;
group_start_iteration_A = warp_mma_k * Detail::kAccessesPerGroupA;
group_start_iteration_B = warp_mma_k * Detail::kAccessesPerGroupB;
group_start_iteration_E = warp_mma_k * Detail::kAccessesPerGroupE;
copy_tiles_and_advance(
iterator_A, iterator_B, iterator_E, group_start_iteration_A,
group_start_iteration_B, group_start_iteration_E);
}
if (warp_mma_k + 2 == Base::kWarpGemmIterations) {
int group_start_iteration_A, group_start_iteration_B, group_start_iteration_E;
group_start_iteration_A =
(warp_mma_k + 1) * Detail::kAccessesPerGroupA;
group_start_iteration_B =
(warp_mma_k + 1) * Detail::kAccessesPerGroupB;
group_start_iteration_E =
(warp_mma_k + 1) * Detail::kAccessesPerGroupE;
copy_tiles_and_advance(
iterator_A, iterator_B, iterator_E, group_start_iteration_A,
group_start_iteration_B, group_start_iteration_E);
// Inserts a memory fence between stages of cp.async instructions.
cutlass::arch::cp_async_fence();
// Waits until kStages-2 stages have committed.
arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
// Move to the next stage
iterator_A.add_tile_offset({0, 1});
iterator_B.add_tile_offset({1, 0});
iterator_E.add_tile_offset({0, 1});
this->smem_iterator_A_.add_tile_offset({0, 1});
this->smem_iterator_B_.add_tile_offset({1, 0});
this->smem_iterator_E_.add_tile_offset({0, 1});
// Add negative offsets to return iterators to the 'start' of the
// circular buffer in shared memory
if (smem_write_stage_idx == (Base::kStages - 1)) {
this->smem_iterator_A_.add_tile_offset({0, -Base::kStages});
this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0});
this->smem_iterator_E_.add_tile_offset({0, -Base::kStages});
smem_write_stage_idx = 0;
} else {
++smem_write_stage_idx;
}
if (smem_read_stage_idx == (Base::kStages - 1)) {
this->warp_tile_iterator_A_.add_tile_offset(
{0, -Base::kStages * Policy::kPartitionsK *
Base::kWarpGemmIterations});
this->warp_tile_iterator_B_.add_tile_offset(
{-Base::kStages * Policy::kPartitionsK *
Base::kWarpGemmIterations,
0});
this->warp_tile_iterator_E_.add_tile_offset(
{0, -Base::kStages * Policy::kPartitionsK *
Base::kWarpGemmIterations});
smem_read_stage_idx = 0;
} else {
++smem_read_stage_idx;
}
--gemm_k_iterations;
iterator_A.clear_mask(gemm_k_iterations == 0);
iterator_B.clear_mask(gemm_k_iterations == 0);
iterator_E.clear_mask(gemm_k_iterations == 0);
}
// Do any conversions feeding the first stage at the end of the loop so
// we can start right away on mma instructions
if (warp_mma_k + 1 == Base::kWarpGemmIterations)
warp_mma.transform(warp_transformed_frag_A[(warp_mma_k + 1) % 2],
warp_transformed_frag_B[(warp_mma_k + 1) % Detail::kBBufferSize],
warp_loaded_frag_A[(warp_mma_k + 1) % 2],
warp_loaded_frag_B[(warp_mma_k + 1) % Detail::kBBufferSize]);
}
}
// Commit and drain all pending and predicated cp.async pnz from the GEMM mainloop
cutlass::arch::cp_async_fence();
cutlass::arch::cp_async_wait<0>();
__syncthreads();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/threadblock/mma_sparse_multistage.h/0 | {
"file_path": "include/cutlass/gemm/threadblock/mma_sparse_multistage.h",
"repo_id": "include",
"token_count": 10700
} | 37 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines iterators used by warp-level matrix multiply operations targeting Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor_op_multiplicand_sm80.h"
#include "cutlass/gemm/warp/mma_complex_tensor_op_tile_iterator_sm80.h"
#include "cutlass/platform/platform.h"
#include "cutlass/fast_math.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Element type
typename Element_,
/// Layout of operand in memory
typename Layout_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions, concept: MatrixShape)
typename OpDelta_>
class MmaTensorOpGaussianComplexAccumulatorTileIterator;
////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
///
/// Partial specialization for complex<T>
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Data type of underlying field of reals.
typename RealElement,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions, concept: MatrixShape)
typename OpDelta_>
class MmaTensorOpGaussianComplexAccumulatorTileIterator<
Shape_, complex<RealElement>, cutlass::layout::RowMajor, InstructionShape_, OpDelta_> {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand::kC;
/// Element type
using Element = complex<RealElement>;
/// Layout of source tile
using Layout = cutlass::layout::RowMajor;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
using OpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Internal structure of iterator - made public to enable introspection
struct Policy {
static_assert(
!(Shape::kRow % InstructionShape::kM) &&
!(Shape::kColumn % InstructionShape::kN),
"Shape of warp-level Mma must be divisible by operator shape.");
static_assert(platform::is_same<TensorCoord, MatrixCoord>::value,
"Layouts must be defined for logical MatrixCoord coordinate space.");
/// Number of mma operations performed
using MmaIterations = MatrixShape<Shape::kRow / InstructionShape::kM,
Shape::kColumn / InstructionShape::kN>;
};
private:
// Assume accumulator tile is an arrangement of 8-by-8 tiles replicated over the entire
// shape, with each quad mapped to one row and each thread mapped to 1/4 of the elements
// of that row. The accumulators within one row are assumed to be consecutive.
static int const kElementsPerAccess = InstructionShape::kN / 4;
static int const kRowsPerTile = 8;
static int const kAccumulatorRows = InstructionShape::kM / kRowsPerTile;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile. It is assumed that the accumulators
/// are stored in a gaussian complex arrangement with parts 1, 2, and 3 as entirely contiguous
/// arranged as [part1, part2, part3]
using Fragment = Array<RealElement, (Shape::kCount / kThreads) * 3>;
static int const kPart1Index = (Shape::kCount / kThreads) * 0;
static int const kPart2Index = (Shape::kCount / kThreads) * 1;
static int const kPart3Index = (Shape::kCount / kThreads) * 2;
private:
/// Reference to output tensor
TensorRef ref_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpGaussianComplexAccumulatorTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpGaussianComplexAccumulatorTileIterator(
TensorRef const &ref,
int lane_id
):
ref_(ref) {
int quad = (lane_id >> 2);
int lane_in_quad = (lane_id & 3);
MatrixCoord lane_offset(quad, lane_in_quad * kElementsPerAccess);
ref_.add_coord_offset(lane_offset);
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpGaussianComplexAccumulatorTileIterator &add_pointer_offset(LongIndex offset) {
ref_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpGaussianComplexAccumulatorTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
ref_.add_coord_offset(tile_offset * make_Coord(Shape::kRow, Shape::kColumn));
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpGaussianComplexAccumulatorTileIterator & operator++() {
// deliberate no-op
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpGaussianComplexAccumulatorTileIterator & operator--() {
// deliberate no-op
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpGaussianComplexAccumulatorTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpGaussianComplexAccumulatorTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
Fragment &frag, ///< fragment to load from the tensor
Index pointer_offset) const { ///< loads a tile with a linear offset
TensorRef offset_ref(ref_);
offset_ref.add_pointer_offset(pointer_offset);
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) {
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) {
int mma_accum_start = kAccumulatorRows * kElementsPerAccess *
(mma_n * Policy::MmaIterations::kRow + mma_m);
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < kAccumulatorRows; ++row) {
CUTLASS_PRAGMA_UNROLL
for (int col = 0; col < kElementsPerAccess; ++col) {
int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow +
row * kRowsPerTile;
int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn + col;
Element z = offset_ref.at({accum_m, accum_n});
frag[mma_accum_start + row * kElementsPerAccess + col + kPart1Index] = z.real() + z.imag();
frag[mma_accum_start + row * kElementsPerAccess + col + kPart2Index] = -z.real();
frag[mma_accum_start + row * kElementsPerAccess + col + kPart3Index] = z.imag();
}
}
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
Fragment &frag, ///< fragment to load from the tensor
Index byte_offset) const { ///< loads a tile with a linear offset
load_with_pointer_offset(byte_offset / sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
Fragment &frag, ///< fragment to load from the tensor
TensorCoord const &tile_offset) const { ///< loads a tile with a logical offset in units of whole tiles
load(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
Fragment &frag, ///< fragment to load from the tensor
TensorCoord const &tile_offset, ///< loads a tile with a logical offset in units of whole tiles
Index pointer_offset) const { ///< loads a tile with a logical offset AND a pointer offset
load_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset);
}
/// Stores a fragment to memory
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) const {
store_with_pointer_offset(frag, 0);
}
/// Stores a fragment to memory with additional pointer offset
CUTLASS_DEVICE
void store_with_pointer_offset(
Fragment const &frag, ///< fragment to store from the tensor
Index pointer_offset) const { ///< store a tile with a linear offset
TensorRef offset_ref(ref_);
offset_ref.add_pointer_offset(pointer_offset);
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) {
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) {
int mma_accum_start = kAccumulatorRows * kElementsPerAccess *
(mma_n * Policy::MmaIterations::kRow + mma_m);
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < kAccumulatorRows; ++row) {
CUTLASS_PRAGMA_UNROLL
for (int col = 0; col < kElementsPerAccess; ++col) {
int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow +
row * kRowsPerTile;
int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn + col;
int idx = mma_accum_start + row * kElementsPerAccess + col;
Element z(frag[kPart1Index + idx] - frag[kPart3Index + idx],
frag[kPart1Index + idx] + frag[kPart2Index + idx]);
offset_ref.at({accum_m, accum_n}) = z;
}
}
}
}
}
/// Stores a fragment to memory with additional pointer offset
CUTLASS_DEVICE
void store_with_byte_offset(
Fragment const &frag, ///< fragment to store from the tensor
Index byte_offset) const { ///< store a tile with a linear offset
store_with_pointer_offset(byte_offset / sizeof(Element));
}
/// Stores a fragment to memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void store(
Fragment &frag, ///< fragment to store to the tensor
TensorCoord const &tile_offset) const { ///< stores a tile with a logical offset in units of whole tiles
store(frag, tile_offset, 0);
}
/// Stores a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void store(
/// fragment to store to the tensor
Fragment const &frag,
/// stores a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// stores a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
store_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/warp/mma_gaussian_complex_tensor_op_tile_iterator_sm80.h/0 | {
"file_path": "include/cutlass/gemm/warp/mma_gaussian_complex_tensor_op_tile_iterator_sm80.h",
"repo_id": "include",
"token_count": 5020
} | 38 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines iterators to load sparse meta data used by warp-level matrix multiply operations
targeting Sparse Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor_op_multiplicand_sm75.h"
#include "cutlass/platform/platform.h"
#include "cutlass/fast_math.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Data type of A elements
typename Element_,
/// Layout of operand
typename Layout_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Delta between *MMA operations (in units of *MMA operations, concept:
/// MatrixShape)
int OpDelta_,
/// Number of threads participating in one matrix operation
int Threads,
/// Number of partitions along K dimension
int PartitionsK_ = 1>
class SparseMmaTensorOpMetaTileIterator {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = Layout_;
/// Shape of one matrix product operation (concept: GemmShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept:
/// MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// Number of partitions along K dimension
static int const kPartitionsK = PartitionsK_;
static int const kSparse = 2;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Internal structure of iterator - made public to enable introspection
struct Policy {
static_assert(
!(Shape::kColumn % InstructionShape::kColumn),
"Shape of warp-level Mma must be divisible by operator shape.");
static int const kElementsPerAccess = 128 / sizeof_bits<Element>::value;
// Determine number of elements along outer dimension per individual LDSM op
static int const kLdsmOpOuter = InstructionShape::kColumn;
static int const kLdsmOpInner = 8 * kElementsPerAccess / kLdsmOpOuter;
static_assert(!(Shape::kColumn % kLdsmOpOuter),
"Shape of warp-level mma must be divisible by LDSM's "
"fundamental tile size.");
static_assert(!(Shape::kRow % kLdsmOpInner),
"Shape of warp-level mma must be divisible by LDSM's "
"fundamental tile size.");
/// Shape of one individual LDSM instruction
static int const LdsmShapeColumn =
InstructionShape::kColumn / kLdsmOpOuter;
static int const LdsmShapeRow =
((4 / LdsmShapeColumn * kLdsmOpInner) > Shape::kRow)
? (Shape::kRow / kLdsmOpInner)
: (4 / LdsmShapeColumn);
using LdsmShape =
layout::PitchLinearShape<LdsmShapeRow, LdsmShapeColumn>;
/// Number and arrangement of LDSM instructions
using LdsmIterations = layout::PitchLinearShape<
Shape::kRow / kLdsmOpInner / LdsmShapeRow,
1>;
/// Number of groups for each tile
static int const kGroupsPerTile =
Shape::kColumn / InstructionShape::kColumn;
};
private:
/// Not working on this feature at the moment.
static_assert(kOpDelta == 1,
"Alternative arrangements not supported at present.");
/// Pointer type used for accesses
using AccessType = Array<Element, Policy::kElementsPerAccess>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment =
Array<Element, Shape::kRow * InstructionShape::kColumn / kThreads>;
private:
/// Layout object storing stride values
Index stride_;
/// Shared memory base pointers - not advanced
AccessType const *pointer_;
/// Byte offset incremented as iterator advances
Index byte_offset_;
/// Internal counter used to determine when to increment byte offset and when
/// to XOR it
int k_group_idx_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
SparseMmaTensorOpMetaTileIterator()
: pointer_(nullptr),
stride_(0),
byte_offset_(0),
k_group_idx_(0) {}
/// Constructor from TensorRef
CUTLASS_DEVICE
SparseMmaTensorOpMetaTileIterator(TensorRef const &ref, int lane_id)
: pointer_(reinterpret_cast<AccessType const *>(ref.data())),
stride_(ref.stride(0) / Policy::kElementsPerAccess),
byte_offset_(0),
k_group_idx_(0) {
int access_contiguous = (lane_id % (Shape::kRow / Policy::kElementsPerAccess));
int access_strided = (lane_id / (Shape::kRow / Policy::kElementsPerAccess));
byte_offset_ = (access_contiguous + access_strided * stride_) *
sizeof_bits<Element>::value * Policy::kElementsPerAccess / 8;
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_DEVICE
SparseMmaTensorOpMetaTileIterator &add_pointer_offset(LongIndex offset) {
byte_offset_ += offset * sizeof_bits<Element>::value / 8;
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_DEVICE
SparseMmaTensorOpMetaTileIterator &add_tile_offset(
TensorCoord const &tile_offset) {
int offset = tile_offset.row() * Shape::kRow +
tile_offset.column() * InstructionShape::kColumn * stride_ *
Policy::kElementsPerAccess;
add_pointer_offset(offset);
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
SparseMmaTensorOpMetaTileIterator &operator++() {
add_tile_offset({0, 1});
if (kPartitionsK > 1) {
++k_group_idx_;
// Jump to next stage
if (k_group_idx_ == Policy::kGroupsPerTile) {
k_group_idx_ = 0;
add_tile_offset(
{0, ((kPartitionsK - 1) * Policy::kGroupsPerTile)});
}
}
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
SparseMmaTensorOpMetaTileIterator &operator--(){
byte_offset_ -= stride_ * InstructionShape::kColumn *
sizeof_bits<Element>::value * Policy::kElementsPerAccess /
8;
}
///< advances in units of whole tiles along the logical coordinate space of
///< the tensor
CUTLASS_DEVICE SparseMmaTensorOpMetaTileIterator &
operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of
///< the tensor
CUTLASS_DEVICE
SparseMmaTensorOpMetaTileIterator &operator-=(
TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const { load_with_byte_offset(frag, 0); }
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset in units of bytes
Index byte_offset) const {
Array<unsigned, Policy::LdsmShape::kCount> *fetch_ptr =
reinterpret_cast<Array<unsigned, Policy::LdsmShape::kCount> *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < Policy::LdsmIterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < Policy::LdsmIterations::kContiguous; ++c) {
int access_idx = c + s * Policy::LdsmIterations::kContiguous;
AccessType const *source_ptr =
pointer_ +
Policy::LdsmShape::kContiguous * Policy::kLdsmOpInner * c +
Policy::LdsmShape::kStrided * s * stride_;
char const *source_byte_ptr = reinterpret_cast<char const *>(source_ptr) +
byte_offset + byte_offset_;
cutlass::arch::ldsm<layout::RowMajor, Policy::LdsmShape::kCount>(
fetch_ptr[access_idx], source_byte_ptr);
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
load_with_byte_offset(frag, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
load_with_byte_offset(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
Index pointer_offset =
tile_offset.contiguous() * Shape::kRow / Layout::kElementsPerAccess +
tile_offset.strided() * InstructionShape::kColumn * stride_;
byte_offset += sizeof(AccessType) * pointer_offset;
load_with_byte_offset(frag, byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
// no op
}
};
} // namespace warp
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/warp/mma_tensor_op_tile_iterator_sparse.h/0 | {
"file_path": "include/cutlass/gemm/warp/mma_tensor_op_tile_iterator_sparse.h",
"repo_id": "include",
"token_count": 4473
} | 39 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines layout functions used by GEMM+permute path for common tensor or matrix formats.
Like Layout functions, permute layout functions map logical coordinates to linear memory. They often require additional
data to describe strides between elements.
Permute layout functions must implement all members in the interface of NoPermute<> defined in this file. Address offset
computation lies in operator() with private member variables {col_permute_, row_permute_ and stride_} as new addresses after permute op.
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include "assert.h"
#endif
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/coord.h"
#include "cutlass/tensor_coord.h"
namespace cutlass {
namespace layout {
// template<PermuteTag, typename Layout, bool Inverse>
// struct PermuteSelect {
// // Try to give a reasonable error message to the user
// static_assert(!platform::is_same<Permute, Permute>::value, // aka always_false<T>
// "You've tried to use a layout permutation for which the implementation is not availble. "
// "In order to provide an implementation for a particular combination of matrix layout "
// "and direction (direct/inverse), please specialize PermuteSelect trait.");
// };
// Base template for defining specializations of permutation inverses
template<typename Permute>
struct InversePermute
{
// Try to give a reasonable error message to the user
static_assert(!platform::is_same<Permute, Permute>::value, // aka always_false<T>
"To apply permutation to a GEMM input operand (A or B), an inverse permutation for the desired "
"permute class must be defined and enabled by specializing cutlass::layout::InversePermute trait.");
};
class PermuteBase {
public:
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
};
class NoPermute : public PermuteBase {
public:
//
// Methods
//
/// Constructor from matrix extent
CUTLASS_HOST_DEVICE
NoPermute(MatrixCoord extent, Index stride) { };
/// Constructor from pitch-linear extent
CUTLASS_HOST_DEVICE
NoPermute(PitchLinearCoord extent, Index stride) { };
/// Computes the offset after Permute Op in logical elements
CUTLASS_HOST_DEVICE
LongIndex operator()(MatrixCoord coord) const { return 0; } // not correct but should never be called
/// Computes the offset after Permute Op in logical elements
CUTLASS_HOST_DEVICE
LongIndex operator()(PitchLinearCoord coord) const { return 0; } // not correct but should never be called
};
template<>
struct InversePermute<NoPermute> {
using type = NoPermute;
};
/// Helper trait to detect if permute operation is a noop
template<typename Permute>
inline bool constexpr is_trivial_permute = platform::is_same<Permute, cutlass::layout::NoPermute>::value;
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Defines permute layouts of various tensor formats.
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
// Tensor4DPermute0213
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Permute layout function for 4-D permuted tensors with matrix (dimensions [M, N]) reshaped
/// as [M/D1, D1, D2, N/D2]. Then perform permute([0, 2, 1, 3]) on the corresponding tensor.
template <int D1, int D2>
class Tensor4DPermute0213RowMajor : public PermuteBase {
private:
//
// Data members
//
Index D3_;
Index stride_;
public:
//
// Methods
//
/// Constructor
CUTLASS_HOST_DEVICE
Tensor4DPermute0213RowMajor(MatrixCoord extent, Index stride) {
assert(extent.row() % D1 == 0);
assert(extent.column() % D2 == 0);
D3_ = extent.column() / D2;
stride_ = stride * D1 / D2;
}
/// Constructor
CUTLASS_HOST_DEVICE
Tensor4DPermute0213RowMajor(PitchLinearCoord extent, Index stride)
: Tensor4DPermute0213RowMajor(MatrixCoord(extent.strided(), extent.contiguous()), stride) {}
/// Computes the offset after Permute Op in logical elements
CUTLASS_HOST_DEVICE
LongIndex operator()(MatrixCoord coord) const {
// [i,j,k,l] -> [i,k,j,l]
Index l = coord.column() % D3_;
Index k = coord.column() / D3_;
Index j = coord.row() % D1;
Index i = coord.row() / D1;
MatrixCoord permuted{k + i * D2, l + j * D3_};
return LongIndex(permuted.row()) * LongIndex(stride_) + LongIndex(permuted.column());
}
/// Computes the offset after Permute Op in logical elements
CUTLASS_HOST_DEVICE
LongIndex operator()(PitchLinearCoord coord) const {
return operator()(MatrixCoord(coord.strided(), coord.contiguous()));
}
};
// Inverse for Tensor4DPermute0213 can be implemented by simply swapping D1 and D2
template <int D1, int D2>
class Tensor4DPermute0213RowMajorInverse : public Tensor4DPermute0213RowMajor<D2, D1> {
public:
using Base = Tensor4DPermute0213RowMajor<D2, D1>;
using Base::Base;
};
template<int D1, int D2>
struct InversePermute<Tensor4DPermute0213RowMajor<D1, D2>> {
using type = Tensor4DPermute0213RowMajorInverse<D1, D2>;
};
template<int D1, int D2>
struct InversePermute<Tensor4DPermute0213RowMajorInverse<D1, D2>> {
using type = Tensor4DPermute0213RowMajor<D1, D2>;
};
/// Permute layout function for 4-D permuted tensors with matrix (dimensions [M, N]) reshaped
/// as [M/D1, D1, D2, N/D2]. Then perform permute([0, 2, 1, 3]) on the corresponding tensor.
template <int D1, int D2>
class Tensor4DPermute0213ColumnMajor : public PermuteBase {
private:
//
// Data members
//
Index D0_;
Index stride_;
public:
//
// Methods
//
/// Constructor
CUTLASS_HOST_DEVICE
Tensor4DPermute0213ColumnMajor(MatrixCoord extent, Index stride) {
assert(extent.row() % D1 == 0);
assert(extent.column() % D2 == 0);
D0_ = extent.row() / D1;
stride_ = stride * D2 / D1;
}
/// Constructor
CUTLASS_HOST_DEVICE
Tensor4DPermute0213ColumnMajor(PitchLinearCoord extent, Index stride)
: Tensor4DPermute0213ColumnMajor(MatrixCoord(extent.contiguous(), extent.strided()), stride) {}
/// Computes the offset after Permute Op in logical elements
CUTLASS_HOST_DEVICE
LongIndex operator()(MatrixCoord coord) const {
// [i,j,k,l] -> [i,k,j,l]
Index l = coord.column() / D2;
Index k = coord.column() % D2;
Index j = coord.row() / D0_;
Index i = coord.row() % D0_;
MatrixCoord permuted{i + k * D0_, j + l * D1};
return LongIndex(permuted.row()) + LongIndex(permuted.column()) * LongIndex(stride_);
}
/// Computes the offset after Permute Op in logical elements
CUTLASS_HOST_DEVICE
LongIndex operator()(PitchLinearCoord coord) const {
return operator()(MatrixCoord(coord.contiguous(), coord.strided()));
}
};
// Inverse for Tensor4DPermute0213 can be implemented by simply swapping D1 and D2
template <int D1, int D2>
class Tensor4DPermute0213ColumnMajorInverse : public Tensor4DPermute0213ColumnMajor<D2, D1> {
public:
using Base = Tensor4DPermute0213ColumnMajor<D2, D1>;
using Base::Base;
};
template<int D1, int D2>
struct InversePermute<Tensor4DPermute0213ColumnMajor<D1, D2>> {
using type = Tensor4DPermute0213ColumnMajorInverse<D1, D2>;
};
template<int D1, int D2>
struct InversePermute<Tensor4DPermute0213ColumnMajorInverse<D1, D2>> {
using type = Tensor4DPermute0213ColumnMajor<D1, D2>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Tensor4DPermuteBMM0213
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Permute layout function for 4-D permuted tensors for BMM with BMM tensor (dimensions [B, M, N]) reshaped
/// as [B/D1, D1, M, N]. Then perform permute([0, 2, 1, 3]) on the corresponding whole BMM tensor.
template <int D1>
class Tensor4DPermuteBMM0213RowMajor : public PermuteBase {
private:
//
// Data members
//
Index D3_;
Index stride_;
Index batch_stride_;
public:
//
// Methods
//
/// Constructor
CUTLASS_HOST_DEVICE
Tensor4DPermuteBMM0213RowMajor(MatrixCoord extent, Index stride) {
Index D2 = extent.row();
D3_ = extent.column();
stride_ = stride * D1;
batch_stride_ = D2 * stride_;
}
/// Constructor
CUTLASS_HOST_DEVICE
Tensor4DPermuteBMM0213RowMajor(PitchLinearCoord extent, Index stride)
: Tensor4DPermuteBMM0213RowMajor(MatrixCoord(extent.strided(), extent.contiguous()), stride) {}
/// Computes the offset after Permute Op in logical elements
CUTLASS_HOST_DEVICE
LongIndex operator()(MatrixCoord coord) const {
// The batch index for BMM
Index BMM_batch_idx = blockIdx.z;
// [i,j,k,l] -> [i,k,j,l]
Index l = coord.column();
Index k = coord.row();
Index j = BMM_batch_idx % D1;
Index i = BMM_batch_idx / D1;
Index pbatch = i;
MatrixCoord pcoord{k, l + j * D3_};
return pbatch * LongIndex(batch_stride_) + pcoord.row() * LongIndex(stride_) + pcoord.column();
}
/// Computes the offset after Permute Op in logical elements
CUTLASS_HOST_DEVICE
LongIndex operator()(PitchLinearCoord coord) const {
return operator()(MatrixCoord(coord.strided(), coord.contiguous()));
}
};
template <int D1>
class Tensor4DPermuteBMM0213RowMajorInverse : public PermuteBase {
private:
//
// Data members
//
Index D3_;
Index stride_;
Index batch_stride_;
public:
//
// Methods
//
/// Constructor
CUTLASS_HOST_DEVICE
Tensor4DPermuteBMM0213RowMajorInverse(MatrixCoord extent, Index stride) {
assert(extent.column() % D1 == 0);
Index D2 = extent.row();
D3_ = extent.column() / D1;
stride_ = stride / D1;
batch_stride_ = D2 * stride_;
}
/// Constructor
CUTLASS_HOST_DEVICE
Tensor4DPermuteBMM0213RowMajorInverse(PitchLinearCoord extent, Index stride)
: Tensor4DPermuteBMM0213RowMajorInverse(MatrixCoord(extent.strided(), extent.contiguous()), stride) {}
/// Computes the offset after Permute Op in logical elements
CUTLASS_HOST_DEVICE
LongIndex operator()(MatrixCoord coord) const {
// The batch index for BMM
Index BMM_batch_idx = blockIdx.z;
// The following assumes grouping [(D0)->batch, (D2)->row, (D1,D3)->col]
Index l = coord.column() % D3_;
Index j = coord.column() / D3_;
Index k = coord.row();
Index i = BMM_batch_idx;
// compute original [batch, row, col] index
Index pbatch = j + i * D1;
MatrixCoord pcoord{k, l};
return pbatch * LongIndex(batch_stride_) + pcoord.row() * LongIndex(stride_) + pcoord.column();
}
/// Computes the offset after Permute Op in logical elements
CUTLASS_HOST_DEVICE
LongIndex operator()(PitchLinearCoord coord) const {
return operator()(MatrixCoord(coord.strided(), coord.contiguous()));
}
};
template<int D1>
struct InversePermute<Tensor4DPermuteBMM0213RowMajor<D1>> {
using type = Tensor4DPermuteBMM0213RowMajorInverse<D1>;
};
template<int D1>
struct InversePermute<Tensor4DPermuteBMM0213RowMajorInverse<D1>> {
using type = Tensor4DPermuteBMM0213RowMajor<D1>;
};
/// Permute layout function for 4-D permuted tensors for BMM with BMM tensor (dimensions [B, M, N]) reshaped
/// as [B/D1, D1, M, N]. Then perform permute([0, 3, 2, 1]) on the corresponding whole BMM tensor.
template <int D1>
class Tensor4DPermuteBMM0321ColumnMajor : public PermuteBase {
private:
//
// Data members
//
Index D2_;
Index stride_;
Index batch_stride_;
public:
//
// Methods
//
/// Constructor
CUTLASS_HOST_DEVICE
Tensor4DPermuteBMM0321ColumnMajor(MatrixCoord extent, Index stride) {
D2_ = extent.row();
Index D3 = extent.column();
stride_ = stride * D1;
batch_stride_ = stride_ * D3;
}
/// Constructor
CUTLASS_HOST_DEVICE
Tensor4DPermuteBMM0321ColumnMajor(PitchLinearCoord extent, Index stride)
: Tensor4DPermuteBMM0321ColumnMajor(MatrixCoord(extent.contiguous(), extent.strided()), stride) {}
/// Computes the offset after Permute Op in logical elements
CUTLASS_HOST_DEVICE
LongIndex operator()(MatrixCoord coord) const {
Index BMM_batch_idx = blockIdx.z;
// [i,j,k,l] -> [i,k,j,l]
Index l = coord.column();
Index k = coord.row();
Index j = BMM_batch_idx % D1;
Index i = BMM_batch_idx / D1;
Index pbatch = i;
MatrixCoord pcoord{k + j * D2_, l};
return pbatch * LongIndex(batch_stride_) + pcoord.row() + pcoord.column() * LongIndex(stride_);
}
/// Computes the offset after Permute Op in logical elements
CUTLASS_HOST_DEVICE
LongIndex operator()(PitchLinearCoord coord) const {
return operator()(MatrixCoord(coord.contiguous(), coord.strided()));
}
};
template <int D1>
class Tensor4DPermuteBMM0321ColumnMajorInverse : public PermuteBase {
private:
//
// Data members
//
Index D2_;
Index stride_;
Index batch_stride_;
public:
//
// Methods
//
/// Constructor
CUTLASS_HOST_DEVICE
Tensor4DPermuteBMM0321ColumnMajorInverse(MatrixCoord extent, Index stride) {
assert(extent.row() % D1 == 0);
D2_ = extent.row() / D1;
Index D3 = extent.column();
stride_ = stride / D1;
batch_stride_ = stride_ * D3;
}
/// Constructor
CUTLASS_HOST_DEVICE
Tensor4DPermuteBMM0321ColumnMajorInverse(PitchLinearCoord extent, Index stride)
: Tensor4DPermuteBMM0321ColumnMajorInverse(MatrixCoord(extent.contiguous(), extent.strided()), stride) {}
/// Computes the offset after Permute Op in logical elements
CUTLASS_HOST_DEVICE
LongIndex operator()(MatrixCoord coord) const {
Index BMM_batch_idx = blockIdx.z;
// The following assumes grouping [(D0)->batch, (D1,D2)->row, (D3)->col]
Index l = coord.column();
Index k = coord.row() % D2_;
Index j = coord.row() / D2_;
Index i = BMM_batch_idx;
Index pbatch = i * D1 + j;
MatrixCoord pcoord{k, l};
return pbatch * LongIndex(batch_stride_) + pcoord.row() + pcoord.column() * LongIndex(stride_);
}
/// Computes the offset after Permute Op in logical elements
CUTLASS_HOST_DEVICE
LongIndex operator()(PitchLinearCoord coord) const {
return operator()(MatrixCoord(coord.contiguous(), coord.strided()));
}
};
template<int D1>
struct InversePermute<Tensor4DPermuteBMM0321ColumnMajor<D1>> {
using type = Tensor4DPermuteBMM0321ColumnMajorInverse<D1>;
};
template<int D1>
struct InversePermute<Tensor4DPermuteBMM0321ColumnMajorInverse<D1>> {
using type = Tensor4DPermuteBMM0321ColumnMajor<D1>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Tensor5DPermute20314
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Permute layout function for 5-D permuted tensors with output matrix (dimension as [M, N]) reshaped
/// as [M/T1, T1, T2, T3, N/T2/T3]. Then perform permute([2, 0, 3, 1, 4]) on the corresponding output tensor.
template <int T1, int T2, int T3>
class Tensor5DPermute20314RowMajor : public PermuteBase {
private:
//
// Data members
//
Index T0_;
Index T4_;
Index stride_;
public:
//
// Methods
//
/// Constructor
CUTLASS_HOST_DEVICE
Tensor5DPermute20314RowMajor(MatrixCoord extent, Index stride) {
assert(extent.row() % T1 == 0);
assert(extent.column() % (T2 * T3) == 0);
T0_ = extent.row() / T1;
T4_ = extent.column() / (T2 * T3);
/// Update stride_permute with stride
stride_ = stride / T2 * T1; // stride in Elements
}
/// Constructor
CUTLASS_HOST_DEVICE
Tensor5DPermute20314RowMajor(PitchLinearCoord extent, Index stride)
: Tensor5DPermute20314RowMajor(MatrixCoord(extent.strided(), extent.contiguous()), stride) {}
/// Computes the offset after Permute Op in logical elements
CUTLASS_HOST_DEVICE
LongIndex operator()(MatrixCoord coord) const {
// Permute as torch.permute(X1, [2, 0, 3, 1, 4]) -> 5D Tensor indices as [i,j,k,l,m], the dimension of X
// is [T0, T1, T2, T3, T4], after permutation the dim of X1 is [T2, T0, T3, T1, T4].
Index m = coord.column() % T4_;
Index l = (coord.column() / T4_) % T3;
Index k = (coord.column() / T4_) / T3;
Index j = coord.row() % T1;
Index i = coord.row() / T1;
MatrixCoord permuted{i + k * T0_, m + j * T4_ + l * T1 * T4_};
return LongIndex(permuted.row()) * LongIndex(stride_) + LongIndex(permuted.column());
}
/// Computes the offset after Permute Op in logical elements
CUTLASS_HOST_DEVICE
LongIndex operator()(PitchLinearCoord coord) const {
return operator()(MatrixCoord(coord.strided(), coord.contiguous()));
}
};
/// Inverse for Tensor5DPermute20314 (could also be given a proper name, e.g. Tensor5DPermute13024).
template <int T1, int T2, int T3>
class Tensor5DPermute20314RowMajorInverse : public PermuteBase {
private:
//
// Data members
//
Index T0_;
Index T4_;
// Permuted stride in units of elements
Index stride_;
public:
//
// Methods
//
/// Constructor
CUTLASS_HOST_DEVICE
Tensor5DPermute20314RowMajorInverse(MatrixCoord extent, Index stride) {
assert(extent.row() % T2 == 0);
assert(extent.column() % (T1 * T3) == 0);
T0_ = extent.row() / T2;
T4_ = extent.column() / (T1 * T3);
stride_ = stride / T1 * T2;
}
/// Constructor
CUTLASS_HOST_DEVICE
Tensor5DPermute20314RowMajorInverse(PitchLinearCoord extent, Index stride)
: Tensor5DPermute20314RowMajorInverse(MatrixCoord(extent.strided(), extent.contiguous()), stride) {}
/// Computes the offset after the inverse of permute operation in logical elements
CUTLASS_HOST_DEVICE
LongIndex operator()(MatrixCoord coord) const {
Index m = coord.column() % T4_;
Index j = (coord.column() / T4_) % T1;
Index l = (coord.column() / T4_) / T1;
Index i = coord.row() % T0_;
Index k = coord.row() / T0_;
MatrixCoord permuted{j + i * T1, m + l * T4_ + k * T3 * T4_};
return LongIndex(permuted.row()) * LongIndex(stride_) + LongIndex(permuted.column());
}
/// Computes the offset after Permute Op in logical elements
CUTLASS_HOST_DEVICE
LongIndex operator()(PitchLinearCoord coord) const {
return operator()(MatrixCoord(coord.strided(), coord.contiguous()));
}
};
template<int T1, int T2, int T3>
struct InversePermute<Tensor5DPermute20314RowMajor<T1, T2, T3>> {
using type = Tensor5DPermute20314RowMajorInverse<T1, T2, T3>;
};
template<int T1, int T2, int T3>
struct InversePermute<Tensor5DPermute20314RowMajorInverse<T1, T2, T3>> {
using type = Tensor5DPermute20314RowMajor<T1, T2, T3>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Tensor5DPermute02413
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Permute layout function for 5-D permuted tensors with matrix (dimensions [M, N]) reshaped
/// as [M/T1, T1, T2, T3, N/T2/T3]. Then perform permute([0, 2, 4, 1, 3]) on the corresponding tensor.
template <int T1, int T2, int T3>
class Tensor5DPermute02413ColumnMajor : public PermuteBase {
private:
//
// Data members
//
Index T0_;
Index T4_;
Index stride_;
public:
//
// Methods
//
/// Constructor
CUTLASS_HOST_DEVICE
Tensor5DPermute02413ColumnMajor(MatrixCoord extent, Index stride) {
assert(extent.row() % T1 == 0);
assert(extent.column() % (T2 * T3) == 0);
T0_ = extent.row() / T1;
T4_ = extent.column() / (T2 * T3);
/// Update stride_permute with stride
stride_ = stride / T1 * T2; // stride in Elements
}
/// Constructor
CUTLASS_HOST_DEVICE
Tensor5DPermute02413ColumnMajor(PitchLinearCoord extent, Index stride)
: Tensor5DPermute02413ColumnMajor(MatrixCoord(extent.contiguous(), extent.strided()), stride) {}
/// Computes the offset after Permute Op in logical elements
CUTLASS_HOST_DEVICE
LongIndex operator()(MatrixCoord coord) const {
// Permute as torch.permute(X1, [2, 0, 3, 1, 4]) -> 5D Tensor indices as [i,j,k,l,m], the dimension of X
// is [T0, T1, T2, T3, T4], after permutation the dim of X1 is [T0, T2, T4, T1, T3].
Index m = (coord.column() / T2) / T3;
Index l = (coord.column() / T2) % T3;
Index k = coord.column() % T2;
Index j = coord.row() / T0_;
Index i = coord.row() % T0_;
MatrixCoord permuted{i + k * T0_, m + j * T4_ + l * T4_ * T1};
return LongIndex(permuted.row()) + LongIndex(permuted.column()) * LongIndex(stride_);
}
/// Computes the offset after Permute Op in logical elements
CUTLASS_HOST_DEVICE
LongIndex operator()(PitchLinearCoord coord) const {
return operator()(MatrixCoord(coord.contiguous(), coord.strided()));
}
};
/// Inverse for Tensor5DPermute02413ColumnMajor
template <int T1, int T2, int T3>
class Tensor5DPermute02413ColumnMajorInverse : public PermuteBase {
private:
//
// Data members
//
Index T0_;
Index T4_;
// Permuted stride in units of elements
Index stride_;
public:
//
// Methods
//
/// Constructor
CUTLASS_HOST_DEVICE
Tensor5DPermute02413ColumnMajorInverse(MatrixCoord extent, Index stride) {
assert(extent.row() % T2 == 0);
assert(extent.column() % (T1 * T3) == 0);
T0_ = extent.row() / T2;
T4_ = extent.column() / (T1 * T3);
stride_ = stride / T2 * T1;
}
/// Constructor
CUTLASS_HOST_DEVICE
Tensor5DPermute02413ColumnMajorInverse(PitchLinearCoord extent, Index stride)
: Tensor5DPermute02413ColumnMajorInverse(MatrixCoord(extent.contiguous(), extent.strided()), stride) {}
/// Computes the offset after the inverse of permute operation in logical elements
CUTLASS_HOST_DEVICE
LongIndex operator()(MatrixCoord coord) const {
Index m = coord.column() % T4_;
Index j = (coord.column() / T4_) % T1;
Index l = (coord.column() / T4_) / T1;
Index i = coord.row() % T0_;
Index k = coord.row() / T0_;
MatrixCoord permuted{i + j * T0_, k + l * T2 + m * T2 * T3};
return LongIndex(permuted.row()) + LongIndex(permuted.column()) * LongIndex(stride_);
}
/// Computes the offset after Permute Op in logical elements
CUTLASS_HOST_DEVICE
LongIndex operator()(PitchLinearCoord coord) const {
return operator()(MatrixCoord(coord.contiguous(), coord.strided()));
}
};
template<int T1, int T2, int T3>
struct InversePermute<Tensor5DPermute02413ColumnMajor<T1, T2, T3>> {
using type = Tensor5DPermute02413ColumnMajorInverse<T1, T2, T3>;
};
template<int T1, int T2, int T3>
struct InversePermute<Tensor5DPermute02413ColumnMajorInverse<T1, T2, T3>> {
using type = Tensor5DPermute02413ColumnMajor<T1, T2, T3>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace layout
} // namespace cutlass
| include/cutlass/layout/permute.h/0 | {
"file_path": "include/cutlass/layout/permute.h",
"repo_id": "include",
"token_count": 8996
} | 40 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
/**
* \file
* \brief C++ features that may be otherwise unimplemented for CUDA device functions.
*
* This file has three components:
*
* (1) Macros:
* - Empty macro defines for C++ keywords not supported by the current
* version of C++. These simply allow compilation to proceed (but do
* not provide the added semantics).
* - \p noexcept
* - \p constexpr
* - \p nullptr
* - \p static_assert
*
* - Macro functions that we need in constant expressions because the
* C++ equivalents require constexpr compiler support. These are
* prefixed with \p __NV_STD_*
* - \p __NV_STD_MAX
* - \p __NV_STD_MIN
*
* (2) Re-implementations of STL functions and types:
* - C++ features that need the \p __device__ annotation. These are
* placed into the \p platform namespace.
* - \p abs
* - \p plus
* - \p less
* - \p greater
* - \p min
* - \p max
* - \p methods on std::pair (==, !=, <, <=, >, >=, and make_pair())
*
* (3) Stop-gap implementations of unsupported STL functions and types:
* - STL functions and types defined by C++ 11/14/17/etc. that are not
* provided by the current version of C++. These are placed into the
* \p platform namespace
* - \p integral_constant
* - \p nullptr_t
* - \p true_type
* - \p false_type
* - \p bool_constant
* - \p enable_if
* - \p conditional
* - \p is_same
* - \p is_base_of
* - \p remove_const
* - \p remove_volatile
* - \p remove_cv
* - \p is_volatile
* - \p is_pointer
* - \p is_void
* - \p is_integral
* - \p is_floating_point
* - \p is_arithmetic
* - \p is_fundamental
* - \p is_trivially_copyable
* - \p alignment_of
* - \p aligned_storage
*
* The idea is that, as we drop support for older compilers, we can simply #define
* the \p __NV_STD_XYZ macros and \p platform namespace to alias their C++
* counterparts (or trivially find-and-replace their occurrences in code text).
*/
//-----------------------------------------------------------------------------
// Dependencies
//-----------------------------------------------------------------------------
#if defined(__CUDACC_RTC__)
#include <cuda/std/type_traits>
#include <cuda/std/utility>
#include <cuda/std/cstddef>
#include <cuda/std/cstdint>
#include <cuda/std/limits>
#else
#include <stdint.h>
#endif
#if !defined(__CUDACC_RTC__)
//-----------------------------------------------------------------------------
// Include STL files that platform provides functionality for
//-----------------------------------------------------------------------------
#include <algorithm> // Minimum/maximum operations
#include <cstddef> // nullptr_t
#include <functional> // Arithmetic operations
#include <utility> // For methods on std::pair
#include <limits> // float_round_style, float_denorm_style
#if (!defined(_MSC_VER) && (__cplusplus >= 201103L)) || (defined(_MSC_VER) && (_MS_VER >= 1500))
#include <type_traits> // For integral constants, conditional metaprogramming, and type traits
#endif
#include <vector_types.h>
#include <cutlass/cutlass.h>
#endif
//-----------------------------------------------------------------------------
// OS
//-----------------------------------------------------------------------------
#if defined(WIN32) || defined(_WIN32) || defined(__WIN32) && !defined(__CYGWIN__)
#define CUTLASS_OS_WINDOWS
#endif
/******************************************************************************
* Macros
******************************************************************************/
/// std
#if !defined(CUTLASS_STL_NAMESPACE)
#if defined(__CUDACC_RTC__)
#define CUTLASS_STL_NAMESPACE cuda::std
#else
#define CUTLASS_STL_NAMESPACE std
#endif
#endif
/// builtin_unreachable
#if !defined(CUTLASS_GCC_UNREACHABLE)
# if defined(__GNUC__)
# define CUTLASS_GCC_UNREACHABLE __builtin_unreachable()
# else
# define CUTLASS_GCC_UNREACHABLE
# endif
#endif
//-----------------------------------------------------------------------------
// Keywords
//-----------------------------------------------------------------------------
/// noexcept, constexpr
#if (!defined(_MSC_VER) && (__cplusplus < 201103L)) || (defined(_MSC_VER) && (_MSC_VER < 1900))
#ifndef noexcept
#define noexcept
#endif
#ifndef constexpr
#define constexpr
#endif
#endif
/// nullptr
#if (!defined(_MSC_VER) && (__cplusplus < 201103L)) || (defined(_MSC_VER) && (_MSC_VER < 1310))
#ifndef nullptr
#define nullptr 0
#endif
#endif
/// static_assert
#if (!defined(_MSC_VER) && (__cplusplus < 201103L)) || (defined(_MSC_VER) && (_MSC_VER < 1600))
#ifndef static_assert
#define __platform_cat_(a, b) a##b
#define __platform_cat(a, b) __platform_cat_(a, b)
#define static_assert(__e, __m) typedef int __platform_cat(AsSeRt, __LINE__)[(__e) ? 1 : -1]
#endif
#endif
//-----------------------------------------------------------------------------
// Functions
//-----------------------------------------------------------------------------
/// Select maximum(a, b)
#ifndef __NV_STD_MAX
#define __NV_STD_MAX(a, b) (((b) > (a)) ? (b) : (a))
#endif
/// Select minimum(a, b)
#ifndef __NV_STD_MIN
#define __NV_STD_MIN(a, b) (((b) < (a)) ? (b) : (a))
#endif
/******************************************************************************
* Re-implementations
******************************************************************************/
namespace cutlass {
namespace platform {
//-----------------------------------------------------------------------------
// Abs operations <algorithm>
//-----------------------------------------------------------------------------
#if defined(__CUDACC_RTC__)
/// std::abs
CUTLASS_HOST_DEVICE constexpr int abs(int a) {
return (a < 0) ? -a : a;
}
CUTLASS_HOST_DEVICE constexpr long long abs(long long a) {
return (a < 0) ? -a : a;
}
#else
using std::abs;
#endif
//-----------------------------------------------------------------------------
// Minimum/maximum operations <algorithm>
//-----------------------------------------------------------------------------
/// std::min
template <typename T>
CUTLASS_HOST_DEVICE constexpr const T& min(const T& a, const T& b) {
return (b < a) ? b : a;
}
/// std::max
template <typename T>
CUTLASS_HOST_DEVICE constexpr const T& max(const T& a, const T& b) {
return (a < b) ? b : a;
}
#if !defined(__CUDACC_RTC__)
//-----------------------------------------------------------------------------
// Methods on std::pair
//-----------------------------------------------------------------------------
using std::pair;
template <class T1, class T2>
CUTLASS_HOST_DEVICE constexpr bool operator==(const pair<T1, T2>& lhs, const pair<T1, T2>& rhs) {
return (lhs.first == rhs.first) && (lhs.second == rhs.second);
}
template <class T1, class T2>
CUTLASS_HOST_DEVICE constexpr bool operator!=(const pair<T1, T2>& lhs, const pair<T1, T2>& rhs) {
return (lhs.first != rhs.first) && (lhs.second != rhs.second);
}
template <class T1, class T2>
CUTLASS_HOST_DEVICE constexpr bool operator<(const pair<T1, T2>& lhs, const pair<T1, T2>& rhs) {
return (lhs.first < rhs.first) ? true : (rhs.first < lhs.first) ? false
: (lhs.second < rhs.second);
}
template <class T1, class T2>
CUTLASS_HOST_DEVICE constexpr bool operator<=(const pair<T1, T2>& lhs, const pair<T1, T2>& rhs) {
return !(rhs < lhs);
}
template <class T1, class T2>
CUTLASS_HOST_DEVICE constexpr bool operator>(const pair<T1, T2>& lhs, const pair<T1, T2>& rhs) {
return (rhs < lhs);
}
template <class T1, class T2>
CUTLASS_HOST_DEVICE constexpr bool operator>=(const pair<T1, T2>& lhs, const pair<T1, T2>& rhs) {
return !(lhs < rhs);
}
template <class T1, class T2>
CUTLASS_HOST_DEVICE std::pair<T1, T2> make_pair(T1 t, T2 u) {
std::pair<T1, T2> retval;
retval.first = t;
retval.second = u;
return retval;
}
#endif
} // namespace platform
/******************************************************************************
* Implementations of C++ 11/14/17/... STL features
******************************************************************************/
namespace platform {
//-----------------------------------------------------------------------------
// Integral constant helper types <type_traits>
//-----------------------------------------------------------------------------
#if defined(__CUDACC_RTC__) || (!defined(_MSC_VER) && (__cplusplus < 201103L)) || (defined(_MSC_VER) && (_MSC_VER < 1500))
/// std::integral_constant
template <typename value_t, value_t V>
struct integral_constant;
/// std::integral_constant
template <typename value_t, value_t V>
struct integral_constant {
static const value_t value = V;
typedef value_t value_type;
typedef integral_constant<value_t, V> type;
CUTLASS_HOST_DEVICE operator value_type() const { return value; }
CUTLASS_HOST_DEVICE const value_type operator()() const { return value; }
};
#else
using std::integral_constant;
using std::pair;
#endif
/// The type used as a compile-time boolean with true value.
typedef integral_constant<bool, true> true_type;
/// The type used as a compile-time boolean with false value.
typedef integral_constant<bool, false> false_type;
#if defined(__CUDACC_RTC__) || (!defined(_MSC_VER) && (__cplusplus <= 201402L)) || (defined(_MSC_VER) && (_MSC_VER < 1900))
/// std::bool_constant
template <bool V>
struct bool_constant : platform::integral_constant<bool, V> {};
#else
using std::bool_constant;
#endif
#if defined(__CUDACC_RTC__) || (!defined(_MSC_VER) && (__cplusplus < 201103L)) || (defined(_MSC_VER) && (_MSC_VER < 1700))
/// std::nullptr_t
struct nullptr_t {};
#else
using std::nullptr_t;
#endif
//-----------------------------------------------------------------------------
// Conditional metaprogramming <type_traits>
//-----------------------------------------------------------------------------
#if defined(__CUDACC_RTC__) || (!defined(_MSC_VER) && (__cplusplus < 201700L)) || (defined(_MSC_VER) && (_MSC_VER < 1600))
/// std::enable_if (true specialization)
template <bool C, typename T = void>
struct enable_if {
typedef T type;
};
/// std::enable_if (false specialization)
template <typename T>
struct enable_if<false, T> {};
/// std::conditional (true specialization)
template <bool B, class T, class F>
struct conditional {
typedef T type;
};
/// std::conditional (false specialization)
template <class T, class F>
struct conditional<false, T, F> {
typedef F type;
};
template <class...>
using void_t = void;
#else
using std::enable_if;
using std::conditional;
using std::void_t;
#endif
#if (201703L <=__cplusplus)
/// std::conditional_t
using CUTLASS_STL_NAMESPACE::conditional_t;
#endif
//-----------------------------------------------------------------------------
// Const/volatility specifiers <type_traits>
//-----------------------------------------------------------------------------
#if defined(__CUDACC_RTC__) || (!defined(_MSC_VER) && (__cplusplus < 201703L)) || (defined(_MSC_VER) && (_MSC_VER < 1500))
/// std::remove_const (non-const specialization)
template <typename T>
struct remove_const {
typedef T type;
};
/// std::remove_const (const specialization)
template <typename T>
struct remove_const<const T> {
typedef T type;
};
/// std::remove_volatile (non-volatile specialization)
template <typename T>
struct remove_volatile {
typedef T type;
};
/// std::remove_volatile (volatile specialization)
template <typename T>
struct remove_volatile<volatile T> {
typedef T type;
};
/// std::remove_cv
template <typename T>
struct remove_cv {
typedef typename remove_volatile<typename remove_const<T>::type>::type type;
};
#else
using std::remove_const;
using std::remove_volatile;
using std::remove_cv;
#endif
#if (201703L <=__cplusplus)
/// std::remove_cv_t
using CUTLASS_STL_NAMESPACE::remove_cv_t;
/// std::remove_reference_t
using CUTLASS_STL_NAMESPACE::remove_reference_t;
// C++20
// using std::remove_cvref;
template <class T>
struct remove_cvref {
using type = remove_cv_t<remove_reference_t<T>>;
};
// C++20
// using std::remove_cvref_t;
template <class T>
using remove_cvref_t = typename remove_cvref<T>::type;
#endif
//-----------------------------------------------------------------------------
// Type relationships <type_traits>
//-----------------------------------------------------------------------------
#if defined(__CUDACC_RTC__) || (!defined(_MSC_VER) && (__cplusplus < 201103L)) || (defined(_MSC_VER) && (_MSC_VER < 1500))
/// std::is_same (false specialization)
template <typename A, typename B>
struct is_same : false_type {};
/// std::is_same (true specialization)
template <typename A>
struct is_same<A, A> : true_type {};
/// Helper for std::is_base_of
template <typename BaseT, typename DerivedT>
struct is_base_of_helper {
typedef char (&yes)[1];
typedef char (&no)[2];
template <typename B, typename D>
struct dummy {
CUTLASS_HOST_DEVICE operator B*() const;
CUTLASS_HOST_DEVICE operator D*();
};
template <typename T>
CUTLASS_HOST_DEVICE static yes check(DerivedT*, T);
CUTLASS_HOST_DEVICE static no check(BaseT*, int);
static const bool value = sizeof(check(dummy<BaseT, DerivedT>(), int())) == sizeof(yes);
};
/// std::is_base_of
template <typename BaseT, typename DerivedT>
struct is_base_of
: integral_constant<bool,
(is_base_of_helper<typename remove_cv<BaseT>::type,
typename remove_cv<DerivedT>::type>::value) ||
(is_same<typename remove_cv<BaseT>::type,
typename remove_cv<DerivedT>::type>::value)> {};
#else
using std::is_same;
using std::is_base_of;
#endif
//-----------------------------------------------------------------------------
// Type properties <type_traits>
//-----------------------------------------------------------------------------
#if defined(__CUDACC_RTC__) || (!defined(_MSC_VER) && (__cplusplus < 201103L)) || (defined(_MSC_VER) && (_MSC_VER < 1500))
/// std::is_volatile
template <typename T>
struct is_volatile : false_type {};
template <typename T>
struct is_volatile<volatile T> : true_type {};
/// Helper for std::is_pointer (false specialization)
template <typename T>
struct is_pointer_helper : false_type {};
/// Helper for std::is_pointer (true specialization)
template <typename T>
struct is_pointer_helper<T*> : true_type {};
/// std::is_pointer
template <typename T>
struct is_pointer : is_pointer_helper<typename remove_cv<T>::type> {};
/// std::is_void
template <typename T>
struct is_void : is_same<void, typename remove_cv<T>::type> {};
/// std::is_integral
template <typename T>
struct is_integral : false_type {};
template <>
struct is_integral<char> : true_type {};
template <>
struct is_integral<signed char> : true_type {};
template <>
struct is_integral<unsigned char> : true_type {};
template <>
struct is_integral<short> : true_type {};
template <>
struct is_integral<unsigned short> : true_type {};
template <>
struct is_integral<int> : true_type {};
template <>
struct is_integral<unsigned int> : true_type {};
template <>
struct is_integral<long> : true_type {};
template <>
struct is_integral<unsigned long> : true_type {};
template <>
struct is_integral<long long> : true_type {};
template <>
struct is_integral<unsigned long long> : true_type {};
template <typename T>
struct is_integral<volatile T> : is_integral<T> {};
template <typename T>
struct is_integral<const T> : is_integral<T> {};
template <typename T>
struct is_integral<const volatile T> : is_integral<T> {};
/// std::is_floating_point
template <typename T>
struct is_floating_point
: integral_constant<bool,
(is_same<float, typename remove_cv<T>::type>::value ||
is_same<double, typename remove_cv<T>::type>::value)> {};
/// std::is_arithmetic
template <typename T>
struct is_arithmetic
: integral_constant<bool, (is_integral<T>::value || is_floating_point<T>::value)> {};
/// std::is_fundamental
template <typename T>
struct is_fundamental
: integral_constant<bool,
(is_arithmetic<T>::value || is_void<T>::value ||
is_same<nullptr_t, typename remove_cv<T>::type>::value)> {};
#else
using std::is_volatile;
using std::is_pointer;
using std::is_void;
using std::is_integral;
using std::is_floating_point;
using std::is_arithmetic;
using std::is_fundamental;
#endif
#if defined(__CUDACC_RTC__) || (!defined(_MSC_VER) && (__cplusplus < 201103L)) || (defined(_MSC_VER) && (_MSC_VER < 1800)) || \
(defined(__GNUG__) && (__GNUC__ < 5))
/**
* std::is_trivially_copyable
*
* This implementation only evaluates true if T is fundamental or pointer
*
* Without help from partial template specializations provided by the user for
* a specific class or struct, this trait will never report that the specified
* class or struct is trivially-copyable ; this is always safe,
* if possibly sub-optimal.
*/
template <typename T>
struct is_trivially_copyable
: integral_constant<bool, (is_fundamental<T>::value || is_pointer<T>::value)> {};
#else
using std::is_trivially_copyable;
#endif
#if (201703L <=__cplusplus)
/// std::is_unsigned_v
using CUTLASS_STL_NAMESPACE::is_integral_v;
/// std::is_unsigned_v
using CUTLASS_STL_NAMESPACE::is_unsigned_v;
#endif
//-----------------------------------------------------------------------------
// bit_cast <bit>
//-----------------------------------------------------------------------------
template< class To, class From >
constexpr To CUTLASS_HOST_DEVICE bit_cast(const From& from ) noexcept;
template <class To, class From>
constexpr To CUTLASS_HOST_DEVICE bit_cast(const From& src) noexcept
{
static_assert(sizeof(To) == sizeof(From), "sizes must match");
return reinterpret_cast<To const &>(src);
}
//-----------------------------------------------------------------------------
// Alignment and layout utilities
//-----------------------------------------------------------------------------
#if defined(__CUDACC_RTC__) || (!defined(_MSC_VER) && (__cplusplus < 201103L)) || (defined(_MSC_VER) && (_MSC_VER < 1500))
/// std::alignment_of
template <typename value_t>
struct alignment_of {
struct pad {
value_t val;
char byte;
};
enum { value = sizeof(pad) - sizeof(value_t) };
};
#else
template <typename value_t>
struct alignment_of : std::alignment_of<value_t> {};
#endif
/* 16B specializations where 32-bit Win32 host compiler disagrees with device compiler */
template <>
struct alignment_of<int4> {
enum { value = 16 };
};
template <>
struct alignment_of<uint4> {
enum { value = 16 };
};
template <>
struct alignment_of<float4> {
enum { value = 16 };
};
template <>
struct alignment_of<long4> {
enum { value = 16 };
};
template <>
struct alignment_of<ulong4> {
enum { value = 16 };
};
template <>
struct alignment_of<longlong2> {
enum { value = 16 };
};
template <>
struct alignment_of<ulonglong2> {
enum { value = 16 };
};
template <>
struct alignment_of<double2> {
enum { value = 16 };
};
template <>
struct alignment_of<longlong4> {
enum { value = 16 };
};
template <>
struct alignment_of<ulonglong4> {
enum { value = 16 };
};
template <>
struct alignment_of<double4> {
enum { value = 16 };
};
// Specializations for volatile/const qualified types
template <typename value_t>
struct alignment_of<volatile value_t> : alignment_of<value_t> {};
template <typename value_t>
struct alignment_of<const value_t> : alignment_of<value_t> {};
template <typename value_t>
struct alignment_of<const volatile value_t> : alignment_of<value_t> {};
#if defined(__CUDACC_RTC__) || (!defined(_MSC_VER) && (__cplusplus < 201103L)) || (defined(_MSC_VER) && (_MSC_VER < 1800))
template <size_t Align>
struct aligned_chunk;
template <>
struct __align__(1) aligned_chunk<1> {
uint8_t buff;
};
template <>
struct __align__(2) aligned_chunk<2> {
uint16_t buff;
};
template <>
struct __align__(4) aligned_chunk<4> {
uint32_t buff;
};
template <>
struct __align__(8) aligned_chunk<8> {
uint32_t buff[2];
};
template <>
struct __align__(16) aligned_chunk<16> {
uint32_t buff[4];
};
template <>
struct __align__(32) aligned_chunk<32> {
uint32_t buff[8];
};
template <>
struct __align__(64) aligned_chunk<64> {
uint32_t buff[16];
};
template <>
struct __align__(128) aligned_chunk<128> {
uint32_t buff[32];
};
template <>
struct __align__(256) aligned_chunk<256> {
uint32_t buff[64];
};
template <>
struct __align__(512) aligned_chunk<512> {
uint32_t buff[128];
};
template <>
struct __align__(1024) aligned_chunk<1024> {
uint32_t buff[256];
};
template <>
struct __align__(2048) aligned_chunk<2048> {
uint32_t buff[512];
};
template <>
struct __align__(4096) aligned_chunk<4096> {
uint32_t buff[1024];
};
/// std::aligned_storage
template <size_t Len, size_t Align>
struct aligned_storage {
typedef aligned_chunk<Align> type[Len / sizeof(aligned_chunk<Align>)];
};
#else
using std::aligned_storage;
#endif
#if !defined(__CUDACC_RTC__)
/// Default deleter
template <typename T>
struct default_delete {
void operator()(T* ptr) const { delete ptr; }
};
/// Partial specialization for deleting array types
template <typename T>
struct default_delete<T[]> {
void operator()(T* ptr) const { delete[] ptr; }
};
/// std::unique_ptr
template <class T, class Deleter = default_delete<T> >
class unique_ptr {
public:
typedef T* pointer;
typedef T element_type;
typedef Deleter deleter_type;
private:
/// Pointer to memory
pointer _ptr;
/// Deleter
deleter_type _deleter;
public:
unique_ptr() : _ptr(nullptr) {}
unique_ptr(pointer p) : _ptr(p) {}
~unique_ptr() {
if (_ptr) {
_deleter(_ptr);
}
}
/// Returns a pointer to the managed object or nullptr if no object is owned.
pointer get() const noexcept { return _ptr; }
/// Releases ownership of the managed object, if any
pointer release() noexcept {
pointer p(_ptr);
_ptr = nullptr;
return p;
}
/// Replaces the managed object, deleting the old object.
void reset(pointer p = pointer()) noexcept {
pointer old_ptr = _ptr;
_ptr = p;
if (old_ptr != nullptr) {
get_deleter()(old_ptr);
}
}
/// Swaps the managed objects with *this and another unique_ptr
void swap(unique_ptr& other) noexcept { std::swap(_ptr, other._ptr); }
/// Returns the deleter object
Deleter& get_deleter() noexcept { return _deleter; }
/// Returns the deleter object
Deleter const& get_deleter() const noexcept { return _deleter; }
/// Checks whether an object is owned
operator bool() const noexcept { return _ptr != nullptr; }
/// Dereferences the unique_ptr
T& operator*() const { return *_ptr; }
/// Returns a pointer to the managed object
pointer operator->() const noexcept { return _ptr; }
/// Array access to managed object
T& operator[](size_t i) const { return _ptr[i]; }
};
/// Specializes the swap algorithm
template <typename T, typename Deleter>
void swap(unique_ptr<T, Deleter>& lhs, unique_ptr<T, Deleter>& rhs) noexcept {
lhs.swap(rhs);
}
#endif
/// std::numeric_limits
template <class T>
struct numeric_limits;
template <>
struct numeric_limits<int32_t> {
CUTLASS_HOST_DEVICE
static constexpr int32_t lowest() noexcept { return -2147483647 - 1;}
CUTLASS_HOST_DEVICE
static constexpr int32_t max() noexcept { return 2147483647;}
static constexpr bool is_integer = true;
};
template <>
struct numeric_limits<int16_t> {
CUTLASS_HOST_DEVICE
static constexpr int16_t lowest() noexcept { return -32768;}
CUTLASS_HOST_DEVICE
static constexpr int16_t max() noexcept { return 32767;}
static constexpr bool is_integer = true;
};
template <>
struct numeric_limits<int8_t> {
CUTLASS_HOST_DEVICE
static constexpr int8_t lowest() noexcept { return -128;}
CUTLASS_HOST_DEVICE
static constexpr int8_t max() noexcept { return 127;}
static constexpr bool is_integer = true;
};
template <>
struct numeric_limits<uint32_t> {
CUTLASS_HOST_DEVICE
static constexpr uint32_t lowest() noexcept { return 0;}
CUTLASS_HOST_DEVICE
static constexpr uint32_t max() noexcept { return 4294967295U;}
static constexpr bool is_integer = true;
};
template <>
struct numeric_limits<uint16_t> {
CUTLASS_HOST_DEVICE
static constexpr uint16_t lowest() noexcept { return 0;}
CUTLASS_HOST_DEVICE
static constexpr uint16_t max() noexcept { return 65535U;}
static constexpr bool is_integer = true;
};
template <>
struct numeric_limits<uint8_t> {
CUTLASS_HOST_DEVICE
static constexpr uint8_t lowest() noexcept { return 0;}
CUTLASS_HOST_DEVICE
static constexpr uint8_t max() noexcept { return 255U;}
static constexpr bool is_integer = true;
};
template <>
struct numeric_limits<float> {
CUTLASS_HOST_DEVICE
static constexpr float infinity() noexcept { return bit_cast<float, int32_t>(0x7f800000);}
static constexpr bool is_integer = false;
static constexpr bool has_infinity = true;
};
/// std::float_round_style
using CUTLASS_STL_NAMESPACE::float_round_style;
using CUTLASS_STL_NAMESPACE::round_indeterminate;
using CUTLASS_STL_NAMESPACE::round_toward_zero;
using CUTLASS_STL_NAMESPACE::round_to_nearest;
using CUTLASS_STL_NAMESPACE::round_toward_infinity;
using CUTLASS_STL_NAMESPACE::round_toward_neg_infinity;
/// std::float_denorm_style
using CUTLASS_STL_NAMESPACE::float_denorm_style;
using CUTLASS_STL_NAMESPACE::denorm_indeterminate;
using CUTLASS_STL_NAMESPACE::denorm_absent;
using CUTLASS_STL_NAMESPACE::denorm_present;
} // namespace platform
} // namespace cutlass
| include/cutlass/platform/platform.h/0 | {
"file_path": "include/cutlass/platform/platform.h",
"repo_id": "include",
"token_count": 9707
} | 41 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Implementation of a CTA-wide semaphore for inter-CTA synchronization.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/gemm/gemm.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// CTA-wide semaphore for inter-CTA synchronization.
class Semaphore {
public:
int *lock;
bool wait_thread;
int state;
public:
/// Implements a semaphore to wait for a flag to reach a given value
CUTLASS_HOST_DEVICE
Semaphore(int *lock_, int thread_id):
lock(lock_),
wait_thread(thread_id < 0 || thread_id == 0),
state(-1) {
}
/// Permit fetching the synchronization mechanism early
CUTLASS_DEVICE
void fetch() {
if (wait_thread) {
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
asm volatile ("ld.global.acquire.gpu.b32 %0, [%1];\n" : "=r"(state) : "l"(lock));
#else
asm volatile ("ld.global.cg.b32 %0, [%1];\n" : "=r"(state) : "l"(lock));
#endif
}
}
/// Gets the internal state
CUTLASS_DEVICE
int get_state() const {
return state;
}
/// Waits until the semaphore is equal to the given value
CUTLASS_DEVICE
void wait(int status = 0) {
while( __syncthreads_and(state != status) ) {
fetch();
}
__syncthreads();
}
/// Updates the lock with the given result
CUTLASS_DEVICE
void release(int status = 0) {
__syncthreads();
if (wait_thread) {
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
asm volatile ("st.global.release.gpu.b32 [%0], %1;\n" : : "l"(lock), "r"(status));
#else
asm volatile ("st.global.cg.b32 [%0], %1;\n" : : "l"(lock), "r"(status));
#endif
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/semaphore.h/0 | {
"file_path": "include/cutlass/semaphore.h",
"repo_id": "include",
"token_count": 1221
} | 42 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Ell iterator for Blocked-Ell matrix (ellValue matrix) used with EllMmaPipelined
*/
#pragma once
#include "cutlass/arch/memory.h"
#include "cutlass/transform/threadblock/predicated_tile_access_iterator.h"
#include "cutlass/transform/threadblock/ell_predicated_tile_access_iterator.h"
#include "cutlass/transform/threadblock/ell_iterator.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// EllPredicatedTileIterator
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
/// Regular tile iterator using a precomputed control structure to minimize register liveness
/// and integer arithmetic.
///
/// Layout is assumed to be invariant at the time the precomputed "Params" object is constructed.
///
/// Base pointer and tensor extents may be specified at the time the iterator is constructed.
/// Subsequently, they are assumed to be immutable.
///
/// Adding a logical coordinate offset may be performed at the time the iterator is constructed.
/// Subsequent additions to logical coordinate offset may be performed but are relatively expensive.
///
/// Visitation order is intended to first visit a "residual" tile that may be partially full in
/// both the advance dimension and the steady-state dimension. This is assumed to be the last
/// tile in the iteration sequence. Advancing an iterator that has just been constructed moves to
/// the first tile that is full in the advance dimension and recomputes predicates. Subsequent
/// accesses may be performed without updating internal predicates and are efficient in terms of
/// live register state and pointer arithmetic instructions.
///
/// To be efficient, this assumes the iterator will be dereferenced and advanced at least once
/// outside any looping structure to minimize integer arithmetic.
///
/// Acceses out of bounds are safe so long as `clear_mask()` is called prior to dereferencing
/// the iterator.
///
///
/// Example:
///
/// An efficient pipeline structure may be constructed as follows:
///
// template <typename Iterator>
// __global__ void kernel(
// typename Iterator::Params params,
// typename Iterator::Element *ptr,
// TensorCoord extent) {
//
// typename Iterator::Fragment fragment;
//
// TensorCoord threadblock_offset(0, 0);
//
// Iterator iter(params, ptr, extent, threadIdx.x, threadblock_offsets);
//
//
// fragment = *iter; // load "residue" tile first
// ++iter; // advance to first "steady state" tile and update internal masks
//
//
// #pragma unroll
// for (int i = Remaining - 1; i >= 0; --i) {
//
// f(fragment);
//
// if (!i) {
// iter.clear_mask(); // light-weight operation to clear masks - subsequent loads become NO-OPs.
// }
//
// fragment = *iter; // load tile during "steady state" phase
// ++iter; // advance to next tile - lightweight due to steady-state masks
// }
// }
//
// void host(TensorView<Element, 2, layout::PitchLinear> view) {
//
// using Iterator = transform::threadblock::EllPredicatedTileIterator;
//
// typename Iterator::Params params(view.layout());
//
// kernel<Iterator>(params, view.data());
// }
///
///
template <
typename Shape,
typename Element,
typename Layout,
int AdvanceRank,
typename ThreadMap,
int AccessSize = ThreadMap::kElementsPerAccess
>
class EllPredicatedTileIterator;
////////////////////////////////////////////////////////////////////////////////
/// Specialization of EllPredicatedTileIterator for pitch-linear data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int AccessSize>
class EllPredicatedTileIterator<Shape_, Element_, layout::PitchLinear, AdvanceRank,
ThreadMap_, AccessSize> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::PitchLinear;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
/// Type used for internal memory accesses
using AccessType = AlignedArray<Element, AccessSize, (AccessSize * sizeof_bits<Element>::value / 8)>;
/// Underlying iterator to compute the addresses
using TileAccessIterator =
EllPredicatedTileAccessIterator<Shape, Element, Layout, kAdvanceRank,
ThreadMap, AccessType>;
static int const kAccessesPerVector = TileAccessIterator::kAccessesPerVector;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<Element, ThreadMap::Iterations::kCount *
ThreadMap::kElementsPerAccess>;
/// Predicate vector stores mask to guard accesses
using Mask = typename TileAccessIterator::Mask;
/// Iterator for ELL storage
using EllIterator = typename cutlass::transform::threadblock::ell::Iterator;
/// Parameters object is precomputed state and is host-constructible
class Params {
public:
friend EllPredicatedTileIterator;
private:
/// Parameters object
typename TileAccessIterator::Params params_;
public:
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout) : params_(layout) { }
CUTLASS_HOST_DEVICE
Params() { }
};
private:
/// Internal pointer type permits fast address arithmetic
using BytePointer = char *;
private:
//
// Data members
//
/// Data member to the tile access iterator
TileAccessIterator address_iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset)
: address_iterator_(params.params_, pointer, extent, thread_id,
threadblock_offset) {}
/// Construct a EllPredicatedTileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: EllPredicatedTileIterator(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
address_iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator &operator++() {
if (kAdvanceRank)
address_iterator_.add_tile_offset({0, 1});
else
address_iterator_.add_tile_offset({1, 0});
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator operator++(int) {
EllPredicatedTileIterator self(*this);
operator++();
return self;
}
/// Returns a stride
CUTLASS_HOST_DEVICE
int get_stride() const { return address_iterator_.get_stride(); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { address_iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { address_iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { address_iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { address_iterator_.get_mask(mask); }
/// add mask for small tiles in ELL
CUTLASS_HOST_DEVICE
void ell_add_mask(int blocksize) { address_iterator_.ell_add_mask(blocksize); }
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
load_with_byte_offset(frag, pointer_offset * sizeof_bits<Element>::value / 8);
}
CUTLASS_DEVICE
void load_with_byte_offset(Fragment &frag, LongIndex byte_offset) {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous);
address_iterator_.set_iteration_index(idx);
char const *byte_ptr = reinterpret_cast<char const *>(address_iterator_.get()) + byte_offset;
AccessType const *access_ptr = reinterpret_cast<AccessType const *>(byte_ptr);
cutlass::arch::global_load<AccessType,
sizeof(AccessType)
>(
frag_ptr[idx], access_ptr, address_iterator_.valid());
++address_iterator_;
}
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) { load_with_byte_offset(frag, 0); }
CUTLASS_DEVICE
void load_with_ell_index(Fragment &frag, EllIterator &ell_iter) {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous);
address_iterator_.set_iteration_index(idx);
LongIndex ell_offset = 0;
int k_offset = address_iterator_.get_k();
ell_offset = ell_iter.get_offset(k_offset) * sizeof(Element);
char const *byte_ptr = reinterpret_cast<char const *>(address_iterator_.get()) + ell_offset;
AccessType const *access_ptr = reinterpret_cast<AccessType const *>(byte_ptr);
bool is_valid = address_iterator_.valid();
is_valid = is_valid && (ell_offset >= 0);
cutlass::arch::global_load<AccessType,
sizeof(AccessType)
>(
frag_ptr[idx], access_ptr, is_valid);
++address_iterator_;
}
}
}
}
CUTLASS_DEVICE
void load_with_ell_index_fast(Fragment &frag, EllIterator &ell_iter) {
LongIndex ell_offset = ell_iter.get_offset_fast() * sizeof(Element);
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous);
address_iterator_.set_iteration_index(idx);
char const *byte_ptr = reinterpret_cast<char const *>(address_iterator_.get()) + ell_offset;
AccessType const *access_ptr = reinterpret_cast<AccessType const *>(byte_ptr);
bool is_valid = address_iterator_.valid();
is_valid = is_valid && (ell_offset >= 0);
cutlass::arch::global_load<AccessType,
sizeof(AccessType)
>(
frag_ptr[idx], access_ptr, is_valid);
++address_iterator_;
}
}
}
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
store_with_byte_offset(frag, pointer_offset * sizeof_bits<Element>::value / 8);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_byte_offset(Fragment const &frag, LongIndex byte_offset) {
address_iterator_.set_iteration_index(0);
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous);
char *byte_ptr = reinterpret_cast<char *>(address_iterator_.get()) + byte_offset;
AccessType *access_ptr = reinterpret_cast<AccessType *>(byte_ptr);
if (address_iterator_.valid()) {
*access_ptr = frag_ptr[idx];
}
++address_iterator_;
}
}
}
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) { store_with_byte_offset(frag, 0); }
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of EllPredicatedTileIterator for pitch-linear data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
int AccessSize
>
class EllPredicatedTileIterator<Shape_, Element_, layout::ColumnMajor, AdvanceRank, ThreadMap_, AccessSize> {
public:
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = EllPredicatedTileIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>,
Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 0 : 1),
ThreadMap,
AccessSize
>;
using AccessType = typename UnderlyingIterator::AccessType;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Iterator for ELL storage
using EllIterator = typename cutlass::transform::threadblock::ell::Iterator;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend EllPredicatedTileIterator;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
CUTLASS_HOST_DEVICE
Params() { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout): params_(layout::PitchLinear(layout.stride(0))) {
}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset, and thread ID
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id, ///< ID of each participating thread
TensorCoord const &threadblock_offset ///< Initial offset of threadblock
):
iterator_(
params.params_,
pointer,
layout::PitchLinearCoord(extent.row(), extent.column()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.row(), threadblock_offset.column())
) { }
/// Construct a EllPredicatedTileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
): EllPredicatedTileIterator(params, pointer, extent, thread_id, make_Coord(0, 0)) { }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the iterator's
/// internal pointer is reverted to the first "steady state" tile. Subsequent calls
/// are lightweight and must only update the internal pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the iterator's
/// internal pointer is reverted to the first "steady state" tile. Subsequent calls
/// are lightweight and must only update the internal pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator operator++(int) {
EllPredicatedTileIterator self(*this);
operator++();
return self;
}
/// Returns a stride
CUTLASS_HOST_DEVICE
int get_stride() const { return iterator_.get_stride(); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
iterator_.clear_mask(enable);
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
iterator_.enable_mask();
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) {
iterator_.set_mask(mask);
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) {
iterator_.get_mask(mask);
}
/// add mask for small tiles in ELL
CUTLASS_HOST_DEVICE
void ell_add_mask(int blocksize) {
iterator_.ell_add_mask(blocksize);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_byte_offset(Fragment &frag, LongIndex byte_offset) {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
CUTLASS_DEVICE
void load_with_ell_index(Fragment &frag, EllIterator& ell_iter) {
iterator_.load_with_ell_index(frag, ell_iter);
}
CUTLASS_DEVICE
void load_with_ell_index_fast(Fragment &frag, EllIterator& ell_iter) {
iterator_.load_with_ell_index_fast(frag, ell_iter);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_byte_offset(Fragment const &frag, LongIndex byte_offset) {
iterator_.store_with_byte_offset(frag, byte_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of EllPredicatedTileIterator for pitch-linear data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
int AccessSize
>
class EllPredicatedTileIterator<Shape_, Element_, layout::RowMajor, AdvanceRank, ThreadMap_, AccessSize> {
public:
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = EllPredicatedTileIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>,
Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 1 : 0),
ThreadMap,
AccessSize
>;
using AccessType = typename UnderlyingIterator::AccessType;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Iterator for ELL storage
using EllIterator = typename cutlass::transform::threadblock::ell::Iterator;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend EllPredicatedTileIterator;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
CUTLASS_HOST_DEVICE
Params() { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout): params_(layout::PitchLinear(layout.stride(0))) {
};
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset, and thread ID
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id, ///< ID of each participating thread
TensorCoord const &threadblock_offset ///< Initial offset of threadblock
):
iterator_(
params.params_,
pointer,
layout::PitchLinearCoord(extent.column(), extent.row()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.column(), threadblock_offset.row())
) { }
/// Construct a EllPredicatedTileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
): EllPredicatedTileIterator(params, pointer, extent, thread_id, make_Coord(0, 0)) { }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the iterator's
/// internal pointer is reverted to the first "steady state" tile. Subsequent calls
/// are lightweight and must only update the internal pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the iterator's
/// internal pointer is reverted to the first "steady state" tile. Subsequent calls
/// are lightweight and must only update the internal pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator operator++(int) {
EllPredicatedTileIterator self(*this);
operator++();
return self;
}
/// Returns a stride
CUTLASS_HOST_DEVICE
int get_stride() const { return iterator_.get_stride(); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
iterator_.clear_mask(enable);
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
iterator_.enable_mask();
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) {
iterator_.set_mask(mask);
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) {
iterator_.get_mask(mask);
}
/// add mask for small tiles in ELL
CUTLASS_HOST_DEVICE
void ell_add_mask(int blocksize) {
iterator_.ell_add_mask(blocksize);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_byte_offset(Fragment &frag, LongIndex byte_offset) {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
CUTLASS_DEVICE
void load_with_ell_index(Fragment &frag, EllIterator& ell_iter) {
iterator_.load_with_ell_index(frag, ell_iter);
}
CUTLASS_DEVICE
void load_with_ell_index_fast(Fragment &frag, EllIterator& ell_iter) {
iterator_.load_with_ell_index_fast(frag, ell_iter);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_byte_offset(Fragment const &frag, LongIndex byte_offset) {
iterator_.store_with_byte_offset(frag, byte_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of EllPredicatedTileIterator for interleaved data. It is mapped
/// to the congruous layout.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int AccessSize, int InterleavedK>
class EllPredicatedTileIterator<Shape_, Element_,
layout::ColumnMajorInterleaved<InterleavedK>,
AdvanceRank, ThreadMap_, AccessSize> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
static int const kInterleavedK = InterleavedK;
using Layout = layout::ColumnMajorInterleaved<kInterleavedK>;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = EllPredicatedTileIterator<
layout::PitchLinearShape<Shape::kRow * kInterleavedK,
Shape::kColumn / kInterleavedK>,
Element, layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap, AccessSize>;
using AccessType = typename UnderlyingIterator::AccessType;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<Element, ThreadMap::Iterations::kCount *
ThreadMap::kElementsPerAccess>;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Iterator for ELL storage
using EllIterator = typename cutlass::transform::threadblock::ell::Iterator;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend EllPredicatedTileIterator;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
CUTLASS_HOST_DEVICE
Params() {}
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout)
: params_(layout::PitchLinear(layout.stride(0))) {}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(params.params_, pointer,
layout::PitchLinearCoord(extent.row() * kInterleavedK,
extent.column() / kInterleavedK),
thread_id,
layout::PitchLinearCoord(
threadblock_offset.row() * kInterleavedK,
threadblock_offset.column() / kInterleavedK)) {}
/// Construct a EllPredicatedTileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: EllPredicatedTileIterator(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator operator++(int) {
EllPredicatedTileIterator self(*this);
operator++();
return self;
}
/// Returns a stride
CUTLASS_HOST_DEVICE
int get_stride() const { return iterator_.get_stride(); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { iterator_.get_mask(mask); }
/// add mask for small tiles in ELL
CUTLASS_HOST_DEVICE
void ell_add_mask(int blocksize) { iterator_.ell_add_mask(blocksize); }
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
CUTLASS_DEVICE
void load_with_ell_index(Fragment &frag, EllIterator& ell_iter) {
iterator_.load_with_ell_index(frag, ell_iter);
}
CUTLASS_DEVICE
void load_with_ell_index_fast(Fragment &frag, EllIterator& ell_iter) {
iterator_.load_with_ell_index_fast(frag, ell_iter);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) { load_with_pointer_offset(frag, 0); }
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); }
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of EllPredicatedTileIterator for interleaved-32 data. It is
/// mapped to the congruous layout.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int AccessSize, int InterleavedK>
class EllPredicatedTileIterator<Shape_, Element_,
layout::RowMajorInterleaved<InterleavedK>,
AdvanceRank, ThreadMap_, AccessSize> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
static int const kInterleavedK = InterleavedK;
using Layout = layout::RowMajorInterleaved<kInterleavedK>;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = EllPredicatedTileIterator<
layout::PitchLinearShape<Shape::kColumn * kInterleavedK,
Shape::kRow / kInterleavedK>,
Element, layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap, AccessSize>;
using AccessType = typename UnderlyingIterator::AccessType;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<Element, ThreadMap::Iterations::kCount *
ThreadMap::kElementsPerAccess>;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend EllPredicatedTileIterator;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
CUTLASS_HOST_DEVICE
Params() {}
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout)
: params_(layout::PitchLinear(layout.stride(0))) {}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(params.params_, pointer,
layout::PitchLinearCoord(extent.column() * kInterleavedK,
extent.row() / kInterleavedK),
thread_id,
layout::PitchLinearCoord(
threadblock_offset.column() * kInterleavedK,
threadblock_offset.row() / kInterleavedK)) {}
/// Construct a EllPredicatedTileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: EllPredicatedTileIterator(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator operator++(int) {
EllPredicatedTileIterator self(*this);
operator++();
return self;
}
/// Returns a stride
CUTLASS_HOST_DEVICE
int get_stride() const { return iterator_.get_stride(); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { iterator_.get_mask(mask); }
/// add mask for small tiles in ELL
CUTLASS_HOST_DEVICE
void ell_add_mask(int blocksize) { iterator_.ell_add_mask(blocksize); }
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) { load_with_pointer_offset(frag, 0); }
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); }
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/transform/threadblock/ell_predicated_tile_iterator.h/0 | {
"file_path": "include/cutlass/transform/threadblock/ell_predicated_tile_iterator.h",
"repo_id": "include",
"token_count": 15559
} | 43 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing computing the addresses of storing of tiles
from pitch-linear rank=2 tensors.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor_op_multiplicand_sm75.h"
#include "cutlass/layout/tensor_op_multiplicand_sm80.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for congruous arrangements for TensorOps
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::TensorOpMultiplicandCongruous64b,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorOpMultiplicandCongruous64b;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using StrideIndex = typename Layout::Stride::Index;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
static_assert(ThreadMap::kThreads / 32 > 1,
"This tile iterator requires at least two warps.");
/// Internal details made public to facilitate introspection
struct Detail {
/// This iterator is specialized for an access size that is 128 bits in
/// length.
static int const kAccessSizeInBits = 64;
static_assert(sizeof_bits<Element_>::value *
ThreadMap::kElementsPerAccess ==
kAccessSizeInBits,
"This iterator requires a policy whose access size is 64b");
///< Number of pointers
static int const kPointerCount = 1;
};
/// Element type per access
using AccessType = Array<Element, Layout::kElementsPerAccess>;
private:
//
// Data members
//
/// Stride value
StrideIndex stride_;
/// Internal pointer to first access of tile
AccessType *pointer_;
/// Internal byte offset
Index byte_offset_;
/// Iteration in the contiguous dimension
int iteration_contiguous_;
/// Iteration in the strided dimension
int iteration_strided_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(
TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
):
stride_(ref.stride(0) / Layout::kElementsPerAccess),
byte_offset_(0) {
layout::PitchLinearCoord thread_offset_base = ThreadMap::initial_offset(thread_id);
// This is the offset of a thread within a threadblock tile for a specific
// pointer (units of elements)
layout::PitchLinearCoord thread_offset_in_threadblock_tile = thread_offset_base;
// initialize pointer
pointer_ = reinterpret_cast<AccessType *>(ref.data() + ref.offset(thread_offset_in_threadblock_tile));
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_offset_ += pointer_offset * sizeof(Element);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
AccessType *access_ptr = pointer_;
int access_offset = iteration_strided_ * ThreadMap::Delta::kStrided * stride_ +
iteration_contiguous_ * ThreadMap::Delta::kContiguous /
ThreadMap::kElementsPerAccess;
char *access_byte_ptr =
reinterpret_cast<char *>(access_ptr + access_offset);
return reinterpret_cast<AccessType *>(access_byte_ptr + byte_offset_);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous)
return *this;
// Enter here only if (iteration_contiguous_ ==
// ThreadMap::Iteration::kContiguous)
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
// Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
// which means we enter the next tile.
iteration_strided_ = 0;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
this->operator++();
return prev;
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
add_pointer_offset(
coord.contiguous() * Shape::kContiguous +
coord.strided() * Shape::kStrided * stride_ * Layout::kElementsPerAccess);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for column-major congruous TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::ColumnMajorTensorOpMultiplicandCongruous64b,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for column-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajorTensorOpMultiplicandCongruous64b;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::TensorOpMultiplicandCongruous64b,
(kAdvanceRank == 0 ? 0 : 1), ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.row(), coord.column()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for row-major congruous TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<Shape_, Element_,
layout::RowMajorTensorOpMultiplicandCongruous64b,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for row-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajorTensorOpMultiplicandCongruous64b;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::TensorOpMultiplicandCongruous64b,
(kAdvanceRank == 0 ? 1 : 0), ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.column(), coord.row()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for crosswise arrangements for TensorOps
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::TensorOpMultiplicand64bCrosswise,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorOpMultiplicand64bCrosswise;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using StrideIndex = typename Layout::Stride::Index;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
static_assert(ThreadMap::kThreads / 32 > 1,
"This tile iterator requires at least two warps.");
/// Internal details made public to facilitate introspection
struct Detail {
/// This iterator is specialized for an access size that is 128 bits in
/// length.
static int const kAccessSizeInBits = 64;
static_assert(sizeof_bits<Element_>::value *
ThreadMap::kElementsPerAccess ==
kAccessSizeInBits,
"This iterator requires a policy whose access size is 64b");
///< Number of pointers - two pointers are needed if making more than 4 iterations along
///< strided dimension
static int const kPointerCount = (ThreadMap::Iterations::kStrided > 4 ? 2 : 1);
};
/// Element type per access
using AccessType = Array<Element, Layout::kElementsPerAccess>;
private:
//
// Data members
//
/// Stride value
StrideIndex stride_;
/// Internal pointer to first access of tile
AccessType *pointer_;
/// Internal byte offset
Index byte_offset_[Detail::kPointerCount];
/// Iteration in the contiguous dimension
int iteration_contiguous_;
/// Iteration in the strided dimension
int iteration_strided_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_DEVICE
RegularTileAccessIterator(
TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
):
stride_(ref.stride(0) / ThreadMap::kElementsPerAccess) {
layout::PitchLinearCoord thread_offset_base = ThreadMap::initial_offset(thread_id);
// This is the offset of a thread within a threadblock tile for a specific
// pointer (units of elements)
layout::PitchLinearCoord thread_offset_in_threadblock_tile = thread_offset_base;
// initialize pointer
pointer_ = reinterpret_cast<AccessType *>(ref.data());
byte_offset_[0] = ref.offset(thread_offset_in_threadblock_tile) * sizeof(Element);
if (Detail::kPointerCount == 2) {
byte_offset_[1] = byte_offset_[0] ^ 8;
}
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset / ThreadMap::kElementsPerAccess;
}
/// Returns a pointer
CUTLASS_DEVICE
AccessType *get() const {
// Map the logical contiguous and strided access to the internal swizzled structure.
int uniform_offset = (iteration_strided_ & 0x3) * stride_ + (iteration_strided_ >> 3) * 16 + stride_ * ThreadMap::Delta::kContiguous * iteration_contiguous_;
char *access_byte_ptr = reinterpret_cast<char *>(pointer_ + uniform_offset);
int byte_offset;
// This iterator may require two byte offsets if it must load more than 8 rows (or 2 iterations)
// in the strided dimension
if (Detail::kPointerCount == 2 && (iteration_strided_ & 0x4)) {
byte_offset = byte_offset_[1];
}
else {
byte_offset = byte_offset_[0];
}
return reinterpret_cast<AccessType *>(access_byte_ptr + byte_offset);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous)
return *this;
// Enter here only if (iteration_contiguous_ ==
// ThreadMap::Iteration::kContiguous)
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
// Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
// which means we enter the next tile.
iteration_strided_ = 0;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
this->operator++();
return prev;
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
add_pointer_offset(coord.strided() * Shape::kStrided + coord.contiguous() * Shape::kContiguous * stride_);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for column-major crosswise TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::ColumnMajorTensorOpMultiplicand64bCrosswise,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for column-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajorTensorOpMultiplicand64bCrosswise;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::TensorOpMultiplicand64bCrosswise,
(kAdvanceRank == 0 ? 0 : 1), ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.row(), coord.column()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for row-major crosswise TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<Shape_, Element_,
layout::RowMajorTensorOpMultiplicand64bCrosswise,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for row-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajorTensorOpMultiplicand64bCrosswise;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::TensorOpMultiplicand64bCrosswise,
(kAdvanceRank == 0 ? 1 : 0), ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.column(), coord.row()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for congruous arrangements for TensorOps
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::TensorOpMultiplicandCongruous128b,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorOpMultiplicandCongruous128b;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using StrideIndex = typename Layout::Stride::Index;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
static_assert(ThreadMap::kThreads / 32 > 1,
"This tile iterator requires at least two warps.");
/// Internal details made public to facilitate introspection
struct Detail {
/// This iterator is specialized for an access size that is 128 bits in
/// length.
static int const kAccessSizeInBits = 128;
static_assert(sizeof_bits<Element_>::value *
ThreadMap::kElementsPerAccess ==
kAccessSizeInBits,
"This iterator requires a policy whose access size is 128b");
///< Number of pointers
static int const kPointerCount = 1;
};
/// Element type per access
using AccessType = Array<Element, Layout::kElementsPerAccess>;
private:
//
// Data members
//
/// Stride value
StrideIndex stride_;
/// Internal pointer to first access of tile
AccessType *pointer_;
/// Internal byte offset
Index byte_offset_;
/// Iteration in the contiguous dimension
int iteration_contiguous_;
/// Iteration in the strided dimension
int iteration_strided_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(
TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
):
stride_(ref.stride(0) / Layout::kElementsPerAccess),
byte_offset_(0) {
layout::PitchLinearCoord thread_offset_base = ThreadMap::initial_offset(thread_id);
// This is the offset of a thread within a threadblock tile for a specific
// pointer (units of elements)
layout::PitchLinearCoord thread_offset_in_threadblock_tile = thread_offset_base;
// initialize pointer
pointer_ = reinterpret_cast<AccessType *>(ref.data() + ref.offset(thread_offset_in_threadblock_tile));
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_offset_ += pointer_offset * sizeof(Element);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
AccessType *access_ptr = pointer_;
int access_offset = iteration_strided_ * ThreadMap::Delta::kStrided * stride_ +
iteration_contiguous_ * ThreadMap::Delta::kContiguous /
ThreadMap::kElementsPerAccess;
char *access_byte_ptr =
reinterpret_cast<char *>(access_ptr + access_offset);
return reinterpret_cast<AccessType *>(access_byte_ptr + byte_offset_);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous)
return *this;
// Enter here only if (iteration_contiguous_ ==
// ThreadMap::Iteration::kContiguous)
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
// Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
// which means we enter the next tile.
iteration_strided_ = 0;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
this->operator++();
return prev;
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
add_pointer_offset(
coord.contiguous() * Shape::kContiguous +
coord.strided() * Shape::kStrided * stride_ * Layout::kElementsPerAccess);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for column-major congruous TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::ColumnMajorTensorOpMultiplicandCongruous128b,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for column-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajorTensorOpMultiplicandCongruous128b;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::TensorOpMultiplicandCongruous128b,
(kAdvanceRank == 0 ? 0 : 1), ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.row(), coord.column()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for row-major congruous TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<Shape_, Element_,
layout::RowMajorTensorOpMultiplicandCongruous128b,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for row-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajorTensorOpMultiplicandCongruous128b;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::TensorOpMultiplicandCongruous128b,
(kAdvanceRank == 0 ? 1 : 0), ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(
TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
):
iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.column(), coord.row()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for congruous arrangements for TensorOps
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::TensorOpMultiplicandCrosswise128x4,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorOpMultiplicandCrosswise128x4;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using StrideIndex = typename Layout::Stride::Index;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
static_assert(ThreadMap::kThreads / 32 > 1,
"This tile iterator requires at least two warps.");
/// Internal details made public to facilitate introspection
struct Detail {
/// This iterator is specialized for an access size that is 128 bits in
/// length.
static int const kAccessSizeInBits = 128;
static_assert(sizeof_bits<Element_>::value *
ThreadMap::kElementsPerAccess ==
kAccessSizeInBits,
"This iterator requires a policy whose access size is 128b");
///< Number of pointers
static int const kPointerCount = 1;
};
static_assert(!(ThreadMap::Iterations::kStrided % 2), "This iterator requires at least two iterations along the strided dimension");
/// Element type per access
using AccessType = Array<Element, Layout::kElementsPerAccess>;
private:
//
// Data members
//
/// Stride value
StrideIndex stride_;
/// Internal pointer to first access of tile
AccessType *pointer_;
/// Internal byte offset
Index byte_offset_;
/// Iteration in the contiguous dimension
int iteration_contiguous_;
/// Iteration in the strided dimension
int iteration_strided_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_DEVICE
RegularTileAccessIterator(
TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
):
stride_(ref.stride(0) / Layout::kElementsPerAccess),
byte_offset_(0) {
layout::PitchLinearCoord thread_offset_base = ThreadMap::initial_offset(thread_id);
// This is the offset of a thread within a threadblock tile for a specific
// pointer (units of elements)
layout::PitchLinearCoord thread_offset_in_threadblock_tile = thread_offset_base;
// initialize pointer
pointer_ = reinterpret_cast<AccessType *>(ref.data() + ref.offset(thread_offset_in_threadblock_tile));
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_offset_ += pointer_offset * sizeof(Element);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
AccessType *access_ptr = pointer_;
int offset_c = (iteration_contiguous_ * ThreadMap::Delta::kContiguous + (iteration_strided_ & 1) * 2);
int offset_s = (iteration_strided_ / 2) * 8;
int access_offset = offset_c * stride_ + offset_s;
char *access_byte_ptr =
reinterpret_cast<char *>(access_ptr + access_offset);
return reinterpret_cast<AccessType *>(access_byte_ptr + byte_offset_);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous)
return *this;
// Enter here only if (iteration_contiguous_ ==
// ThreadMap::Iteration::kContiguous)
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
// Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
// which means we enter the next tile.
iteration_strided_ = 0;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
this->operator++();
return prev;
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
add_pointer_offset(
coord.contiguous() * Shape::kContiguous * stride_ +
coord.strided() * Shape::kStrided * Layout::kElementsPerAccess);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for column-major congruous TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::ColumnMajorTensorOpMultiplicandCrosswise128x4,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for column-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajorTensorOpMultiplicandCrosswise128x4;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::TensorOpMultiplicandCrosswise128x4,
(kAdvanceRank == 0 ? 0 : 1), ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.row(), coord.column()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for row-major congruous TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<Shape_, Element_,
layout::RowMajorTensorOpMultiplicandCrosswise128x4,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for row-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajorTensorOpMultiplicandCrosswise128x4;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::TensorOpMultiplicandCrosswise128x4,
(kAdvanceRank == 0 ? 1 : 0), ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(
TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
):
iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.column(), coord.row()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op_sm80.h/0 | {
"file_path": "include/cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op_sm80.h",
"repo_id": "include",
"token_count": 15729
} | 44 |
# CuTe Layouts
This document describes `Layout`, CuTe's core abstraction.
Fundamentally, a `Layout` maps from coordinate space(s)
to an index space.
`Layout`s present a common interface to multidimensional array access
that abstracts away the details of how the array's elements are organized in memory.
This lets users write algorithms that access multidimensional arrays generically,
so that layouts can change, without users' code needing to change. For example, a row-major MxN layout and a column-major MxN layout can be treated identically in software.
CuTe also provides an "algebra of `Layout`s."
`Layout`s can be combined and manipulated
to construct more complicated layouts
and to tile layouts across other layouts.
This can help users do things like partition layouts of data over layouts of threads.
## Fundamental Types and Concepts
### Integers
CuTe makes great use of dynamic (known only at run-time) and static (known at compile-time) integers.
* Dynamic integers (or "run-time integers") are just ordinary integral types like `int` or `size_t` or `uint16_t`. Anything that is accepted by `std::is_integral<T>` is considered a dynamic integer in CuTe.
* Static integers (or "compile-time integers") are instantiations of types like `std::integral_constant<Value>`. These types encode the value as a `static constexpr` member. They also support casting to their underlying dynamic types, so they can be used in expressions with dynamic integers. CuTe defines its own CUDA-compatibe static integer types `cute::C<Value>` along with overloaded math operators so that math on static integers results in static integers. CuTe defines shortcut aliases `Int<1>`, `Int<2>`, `Int<3>` and `_1`, `_2`, `_3` as conveniences, which you should see often within examples.
CuTe attempts to handle static and dynamic integers identically. In the examples that follow, all dynamic integers could be replaced with static integers and vice versa. When we say "integer" in CuTe, we almost always mean a static OR dynamic integer.
CuTe provides a number of traits to work with integers.
* `cute::is_integral<T>`: Checks whether `T` is a static or dynamic integer type.
* `cute::is_std_integral<T>`: Checks whether `T` is a dynamic integer type. Equivalent to `std::is_integral<T>`.
* `cute::is_static<T>`: Checks whether `T` is an empty type (so instantiations cannot depend on any dynamic information). Equivalent to `std::is_empty`.
* `cute::is_constant<N,T>`: Checks that `T` is a static integer AND its value is equivalent to `N`.
See the [`integral_constant` implementations](../../../include/cute/numeric/integral_constant.hpp) for more information.
### Tuple
A tuple is a finite ordered list of zero or more elements.
The [`cute::tuple` class](../../../include/cute/container/tuple.hpp) behaves like `std::tuple`, but works on device and host. It imposes restrictions on its template arguments and strips down the implementation for performance and simplicity.
### IntTuple
CuTe defines the IntTuple concept as either an integer, or a tuple of IntTuples. Note the recursive definition.
In C++, we define [operations on `IntTuple`](../../../include/cute/int_tuple.hpp).
Examples of `IntTuple`s include:
* `int{2}`, the dynamic integer 2.
* `Int<3>{}`, the static integer 3.
* `make_tuple(int{2}, Int<3>{})`, the tuple of dynamic-2, and static-3.
* `make_tuple(uint16_t{42}, make_tuple(Int<1>{}, int32_t{3}), Int<17>{})`, the tuple of dynamic-42, tuple of static-1 and dynamic-3, and static-17.
CuTe reuses the `IntTuple` concept for many different things,
including Shape, Stride, Step, and Coord
(see [`include/cute/layout.hpp`](../../../include/cute/layout.hpp)).
Operations defined on `IntTuple`s include the following.
* `rank(IntTuple)`: The number of elements in an `IntTuple`. A single integer has rank 1, and a tuple has rank `tuple_size`.
* `get<I>(IntTuple)`: The `I`th element of the `IntTuple`, with `I < rank`. For single integers, `get<0>` is just that integer.
* `depth(IntTuple)`: The number of hierarchical `IntTuple`s. A single integer has depth 0, a tuple of integers has depth 1, a tuple that contains a tuple of integers has depth 2, etc.
* `size(IntTuple)`: The product of all elements of the `IntTuple`.
We write `IntTuple`s with parentheses to denote the hierarchy. For example, `6`, `(2)`, `(4,3)`, and `(3,(6,2),8)` are all `IntTuple`s.
### Shapes and Strides
Both `Shape` and `Stride` are `IntTuple` concepts.
### Layout
A `Layout` is a tuple of (`Shape`, `Stride`).
Semantically, it implements a mapping from
any coordinate within the Shape to an index via the Stride.
### Tensor
A `Layout` can be composed with data -- e.g., a pointer or an array -- to create a `Tensor`. The index generated by the `Layout` is used to subscript an iterator to retrieve the appropriate data. For details on `Tensor`, please refer to the
[`Tensor` section of the tutorial](./03_tensor.md).
## Layout Creation and Use
A `Layout` is a pair of `IntTuple`s: the `Shape` and the `Stride`. The first element defines the abstract *shape* of the `Layout`, and the second element defines the *strides*, which map from coordinates within the shape to the index space.
We define many operations on `Layout`s analogous to those defined on `IntTuple`.
* `rank(Layout)`: The number of modes in a `Layout`. Equivalent to the tuple size of the `Layout`'s shape.
* `get<I>(Layout)`: The `I`th sub-layout of the `Layout`, with `I < rank`.
* `depth(Layout)`: The depth of the `Layout`'s shape. A single integer has depth 0, a tuple of integers has depth 1, a tuple of tuples of integers has depth 2, etc.
* `shape(Layout)`: The shape of the `Layout`.
* `stride(Layout)`: The stride of the `Layout`.
* `size(Layout)`: The size of the `Layout` function's domain. Equivalent to `size(shape(Layout))`.
* `cosize(Layout)`: The size of the `Layout` function's codomain (not necessarily the range). Equivalent to `A(size(A) - 1) + 1`.
### Hierarchical access functions
`IntTuple`s and `Layout`s can be arbitrarily nested.
For convenience, we define versions of some of the above functions
that take a sequence of integers, instead of just one integer.
This makes it possible to access elements
inside of nested `IntTuple` or `Layout` more easily.
For example, we permit `get<I...>(x)`, where `I...` is a "C++ parameter pack" that denotes zero or more (integer) template arguments. These hierarchical access functions include the following.
* `get<I0,I1,...,IN>(x) := get<IN>(...(get<I1>(get<I0>(x)))...)`. Extract the `IN`th of the ... of the `I1`st of the `I0`th element of `x`.
* `rank<I...>(x) := rank(get<I...>(x))`. The rank of the `I...`th element of `x`.
* `depth<I...>(x) := depth(get<I...>(x))`. The depth of the `I...`th element of `x`.
* `shape<I...>(x) := shape(get<I...>(x))`. The shape of the `I...`th element of `x`.
* `size<I...>(x) := size(get<I...>(x))`. The size of the `I...`th element of `x`.
In the following examples, you'll see use of `size<0>` and `size<1>` to determine loops bounds for the 0th and 1st mode of a layout or tensor.
### Constructing a Layout
A `Layout` can be constructed in many different ways.
It can include any combination of compile-time (static) integers
or run-time (dynamic) integers.
```c++
Layout s8 = make_layout(Int<8>{});
Layout d8 = make_layout(8);
Layout s2xs4 = make_layout(make_shape(Int<2>{},Int<4>{}));
Layout s2xd4 = make_layout(make_shape(Int<2>{},4));
Layout s2xd4_a = make_layout(make_shape (Int< 2>{},4),
make_stride(Int<12>{},Int<1>{}));
Layout s2xd4_col = make_layout(make_shape(Int<2>{},4),
LayoutLeft{});
Layout s2xd4_row = make_layout(make_shape(Int<2>{},4),
LayoutRight{});
Layout s2xh4 = make_layout(make_shape (2,make_shape (2,2)),
make_stride(4,make_stride(2,1)));
Layout s2xh4_col = make_layout(shape(s2xh4),
LayoutLeft{});
```
The `make_layout` function returns a `Layout`.
It deduces the types of the function's arguments and returns a `Layout` with the appropriate template arguments.
Similarly, the `make_shape` and `make_stride` functions
return a `Shape` resp. `Stride`.
CuTe often uses these `make_*` functions
due to restrictions around constructor template argument deduction (CTAD) and to avoid having to repeat static or dynamic integer types.
When the `Stride` argument is omitted, it is generated from the provided `Shape` with `LayoutLeft` as default. The `LayoutLeft` tag constructs strides as an exclusive prefix product of the `Shape` from left to right, without regard to the `Shape`'s hierarchy. This can be considered a "generalized column-major stride generation". The `LayoutRight` tag constructs strides as an exclusive prefix product of the `Shape` from right to left, without regard to the `Shape`'s hierarchy. For shapes of depth one, this can be considered a "row-major stride generation", but for hierarchical shapes the resulting strides may be surprising. For example, the strides of `s2xh4` above could be generated with `LayoutRight`.
Calling `print` on each layout above results in the following
```
s8 : _8:_1
d8 : 8:_1
s2xs4 : (_2,_4):(_1,_2)
s2xd4 : (_2,4):(_1,_2)
s2xd4_a : (_2,4):(_12,_1)
s2xd4_col : (_2,4):(_1,_2)
s2xd4_row : (_2,4):(4,_1)
s2xh4 : (2,(2,2)):(4,(2,1))
s2xh4_col : (2,(2,2)):(_1,(2,4))
```
The `Shape:Stride` notation is used quite often for `Layout`. The `_N` notation is shorthand for a static integer while other integers are dynamic integers. Observe that both `Shape` and `Stride` may be composed of both static and dynamic integers.
Also note that the `Shape` and `Stride` are assumed to be *congruent*. That is, `Shape` and `Stride` have the same tuple profiles. For every integer in `Shape`, there is a corresponding integer in `Stride`. This can be asserted with
```cpp
static_assert(congruent(my_shape, my_stride));
```
### Using a Layout
The fundamental use of a `Layout` is to map between coordinate space(s) defined by the `Shape` and an index space defined by the `Stride`. For example, to print an arbitrary rank-2 layout in a 2-D table, we can write the function
```c++
template <class Shape, class Stride>
void print2D(Layout<Shape,Stride> const& layout)
{
for (int m = 0; m < size<0>(layout); ++m) {
for (int n = 0; n < size<1>(layout); ++n) {
printf("%3d ", layout(m,n));
}
printf("\n");
}
}
```
which produces the following output for the above examples.
```
> print2D(s2xs4)
0 2 4 6
1 3 5 7
> print2D(s2xd4_a)
0 1 2 3
12 13 14 15
> print2D(s2xh4_col)
0 2 4 6
1 3 5 7
> print2D(s2xh4)
0 2 1 3
4 6 5 7
```
We can see static, dynamic, row-major, column-major, and hierarchical layouts printed here. The statement `layout(m,n)` provides the mapping of
the logical 2-D coordinate (m,n) to the 1-D index.
Interestingly, the `s2xh4` example isn't row-major or column-major. Furthermore, it has three modes but is still interpreted as rank-2 and we're using a 2-D coordinate. Specifically, `s2xh4` has a 2-D multi-mode in the second mode, but we're still able to use a 1-D coordinate for that mode. More on this in the next section, but first we can generalize this another step. Let's use a 1-D coordinate and treat all of the modes of each layout as a single multi-mode. For instance, the following `print1D` function
```c++
template <class Shape, class Stride>
void print1D(Layout<Shape,Stride> const& layout)
{
for (int i = 0; i < size(layout); ++i) {
printf("%3d ", layout(i));
}
}
```
produces the following output for the above examples.
```
> print1D(s2xs4)
0 1 2 3 4 5 6 7
> print1D(s2xd4_a)
0 12 1 13 2 14 3 15
> print1D(s2xh4_col)
0 1 2 3 4 5 6 7
> print1D(s2xh4)
0 4 2 6 1 5 3 7
```
Any multi-mode of a layout, including the entire layout itself, can accept a 1-D coordinate. More on this in the following sections.
CuTe provides more printing utilities for visualizing Layouts. The `print_layout` function produces a formatted 2-D table of the Layout's mapping.
```text
> print_layout(s2xh4)
(2,(2,2)):(4,(2,1))
0 1 2 3
+---+---+---+---+
0 | 0 | 2 | 1 | 3 |
+---+---+---+---+
1 | 4 | 6 | 5 | 7 |
+---+---+---+---+
```
The `print_latex` function generates LaTeX that can be compiled with `pdflatex` into a color-coded vector graphics image of the same 2-D table.
### Vector Layouts
We define a vector as any `Layout` with `rank == 1`.
For example, the layout `8:1` can be interpreted as an 8-element vector whose indices are contiguous.
```
Layout: 8:1
Coord : 0 1 2 3 4 5 6 7
Index : 0 1 2 3 4 5 6 7
```
Similarly,
the layout `8:2` can be interpreted as an 8-element vector where the indices of the elements are strided by `2`.
```
Layout: 8:2
Coord : 0 1 2 3 4 5 6 7
Index : 0 2 4 6 8 10 12 14
```
By the above rank-1 definition, we *also* interpret layout `((4,2)):((2,1))` as a vector, since its shape is rank-1. The inner shape looks like a 4x2 row-major matrix, but the extra pair of parenthesis suggest we can interpret those two modes as a 1-D 8-element vector. The strides tell us that the first `4` elements are strided by `2` and then there are `2` of those first elements strided by `1`.
```
Layout: ((4,2)):((2,1))
Coord : 0 1 2 3 4 5 6 7
Index : 0 2 4 6 1 3 5 7
```
We can see the second set of `4` elements are duplicates of the first `4` with an extra stride of `1`.
Consider the layout `((4,2)):((1,4))`. Again, it's `4` elements strided by `1` and then `2` of those first elements strided by `4`.
```
Layout: ((4,2)):((1,4))
Coord : 0 1 2 3 4 5 6 7
Index : 0 1 2 3 4 5 6 7
```
As a function from integers to integers, it's identical to `8:1`. It's the identity function.
### Matrix examples
Generalizing, we define a matrix as any `Layout` that is rank-2. For example,
```
Shape : (4,2)
Stride: (1,4)
0 4
1 5
2 6
3 7
```
is a 4x2 column-major layout with stride-1 down the columns and stride-4 across the rows, and
```
Shape : (4,2)
Stride: (2,1)
0 1
2 3
4 5
6 7
```
is a 4x2 row-major layout with stride-2 down the columns and stride-1 across the rows. Majorness is simply which mode has stride-1.
Just like the vector layouts, each of the modes of the matrix can also be split into *multi-modes*.
This lets us express more layouts beyond just row-major and column-major. For example,
```
Shape: ((2,2),2)
Stride: ((4,1),2)
0 2
4 6
1 3
5 7
```
is also logically 4x2, with stride-2 across the rows but a multi-stride down the columns. The first `2` elements down the column have a stride of `4` and then there is a copy of those with stride-1. Since this layout is logically 4x2,
like the column-major and row-major examples above,
we can _still_ use 2-D coordinates to index into it.
## Layout Concepts
In this section, we'll introduce the coordinate sets that `Layout`s accept and how the coordinate mappings and index mappings are computed.
### Layout compatibility
We say that layout A is *compatible* with layout B if the shape of A is compatible with the shape of B.
Shape A is compatible with shape B if
* the size of A is equal to the size of B and
* all coordinates within A are valid coordinates within B.
For example:
* Shape 24 is NOT compatible with Shape 32.
* Shape 24 is compatible with Shape (4,6).
* Shape (4,6) is compatible with Shape ((2,2),6).
* Shape ((2,2),6) is compatible with Shape ((2,2),(3,2)).
* Shape 24 is compatible with Shape ((2,2),(3,2)).
* Shape 24 is compatible with Shape ((2,3),4).
* Shape ((2,3),4) is NOT compatible with Shape ((2,2),(3,2)).
* Shape ((2,2),(3,2)) is NOT compatible with Shape ((2,3),4).
* Shape 24 is compatible with Shape (24).
* Shape (24) is NOT compatible with Shape 24.
* Shape (24) is NOT compatible with Shape (4,6).
That is, *compatible* is a weak partial order on Shapes as it is reflexive, antisymmetric, and transitive.
### Layouts Coordinates
With the notion of compatibility above, we emphasize that every `Layout` accepts multiple kinds of coordinates. Every `Layout` accepts coordinates for any `Shape` that is compatible with it. CuTe provides mappings between these sets of coordinates via a colexicographical order.
Thus, all Layouts provide two fundamental mappings:
* the map from an input coordinate to the corresponding natural coordinate via the `Shape`, and
* the map from a natural coordinate to the index via the `Stride`.
#### Coordinate Mapping
The map from an input coordinate to a natural coordinate is the application of a colexicographical order (reading right to left, instead of "lexicographical," which reads left to right) within the `Shape`.
Take the shape `(3,(2,3))`, for example. This shape has three coordinate sets: the 1-D coordinates, the 2-D coordinates, and the natural (h-D) coordinates.
| 1-D | 2-D | Natural | | 1-D | 2-D | Natural |
| ----- | ------- | ----------- |-| ----- | ------- | ----------- |
| `0` | `(0,0)` | `(0,(0,0))` | | `9` | `(0,3)` | `(0,(1,1))` |
| `1` | `(1,0)` | `(1,(0,0))` | | `10` | `(1,3)` | `(1,(1,1))` |
| `2` | `(2,0)` | `(2,(0,0))` | | `11` | `(2,3)` | `(2,(1,1))` |
| `3` | `(0,1)` | `(0,(1,0))` | | `12` | `(0,4)` | `(0,(0,2))` |
| `4` | `(1,1)` | `(1,(1,0))` | | `13` | `(1,4)` | `(1,(0,2))` |
| `5` | `(2,1)` | `(2,(1,0))` | | `14` | `(2,4)` | `(2,(0,2))` |
| `6` | `(0,2)` | `(0,(0,1))` | | `15` | `(0,5)` | `(0,(1,2))` |
| `7` | `(1,2)` | `(1,(0,1))` | | `16` | `(1,5)` | `(1,(1,2))` |
| `8` | `(2,2)` | `(2,(0,1))` | | `17` | `(2,5)` | `(2,(1,2))` |
Each coordinate into the shape `(3,(2,3))` has two *equivalent* coordinates and all equivalent coordinates map to the same natural coordinate. To emphasize again, because all of the above coordinates are valid inputs, a Layout with Shape `(3,(2,3))` can be used as if it is a 1-D array of 18 elements by using the 1-D coordinates, a 2-D matrix of 3x6 elements by using the 2-D coordinates, or a h-D tensor of 3x(2x3) elements by using the h-D (natural) coordinates.
The previous 1-D print demonstrates how CuTe identifies 1-D coordinates with a colexicographical ordering of 2-D coordinates. Iterating from `i = 0` to `size(layout)` and indexing into our layout with the single integer coordinate `i`, traverses the 2-D coordinates in this "generalized-column-major" order, even if the layout maps coordinates to indices in a row-major or more complex fashion.
The function `cute::idx2crd(idx, shape)` is responsible for the coordinate mapping. It will take any coordinate within the shape and compute the equivalent natural coordinate for that shape.
```cpp
auto shape = Shape<_3,Shape<_2,_3>>{};
print(idx2crd( 16, shape)); // (1,(1,2))
print(idx2crd(_16{}, shape)); // (_1,(_1,_2))
print(idx2crd(make_coord( 1,5), shape)); // (1,(1,2))
print(idx2crd(make_coord(_1{},5), shape)); // (_1,(1,2))
print(idx2crd(make_coord( 1,make_coord(1, 2)), shape)); // (1,(1,2))
print(idx2crd(make_coord(_1{},make_coord(1,_2{})), shape)); // (_1,(1,_2))
```
#### Index Mapping
The map from a natural coordinate to an index is performed by taking the inner product of the natural coordinate with the `Layout`'s `Stride`.
Take the layout `(3,(2,3)):(3,(12,1))`, for example. Then a natural coordinate `(i,(j,k))` will result in the index `i*3 + j*12 + k*1`. The indices this layout computes are shown in the 2-D table below where `i` is used as the row coordinate and `(j,k)` is used as the column coordinate.
```
0 1 2 3 4 5 <== 1-D col coord
(0,0) (1,0) (0,1) (1,1) (0,2) (1,2) <== 2-D col coord (j,k)
+-----+-----+-----+-----+-----+-----+
0 | 0 | 12 | 1 | 13 | 2 | 14 |
+-----+-----+-----+-----+-----+-----+
1 | 3 | 15 | 4 | 16 | 5 | 17 |
+-----+-----+-----+-----+-----+-----+
2 | 6 | 18 | 7 | 19 | 8 | 20 |
+-----+-----+-----+-----+-----+-----+
```
The function `cute::crd2idx(c, shape, stride)` is responsible for the index mapping. It will take any coordinate within the shape, compute the equivalent natural coordinate for that shape (if it is not already), and compute the inner product with the strides.
```cpp
auto shape = Shape <_3,Shape< _2,_3>>{};
auto stride = Stride<_3,Stride<_12,_1>>{};
print(crd2idx( 16, shape, stride)); // 17
print(crd2idx(_16{}, shape, stride)); // _17
print(crd2idx(make_coord( 1, 5), shape, stride)); // 17
print(crd2idx(make_coord(_1{}, 5), shape, stride)); // 17
print(crd2idx(make_coord(_1{},_5{}), shape, stride)); // _17
print(crd2idx(make_coord( 1,make_coord( 1, 2)), shape, stride)); // 17
print(crd2idx(make_coord(_1{},make_coord(_1{},_2{})), shape, stride)); // _17
```
## Layout Manipulation
### Sublayouts
Sublayouts can be retrieved with `layout<I...>`
```cpp
Layout a = Layout<Shape<_4,Shape<_3,_6>>>{}; // (4,(3,6)):(1,(4,12))
Layout a0 = layout<0>(a); // 4:1
Layout a1 = layout<1>(a); // (3,6):(4,12)
Layout a10 = layout<1,0>(a); // 3:4
Layout a11 = layout<1,1>(a); // 6:12
```
or `select<I...>`
```cpp
Layout a = Layout<Shape<_2,_3,_5,_7>>{}; // (2,3,5,7):(1,2,6,30)
Layout a13 = select<1,3>(a); // (3,7):(2,30)
Layout a01 = select<0,1,3>(a); // (2,3,7):(1,2,30)
Layout a2 = select<2>(a); // (5):(6)
```
or `take<ModeBegin, ModeEnd>`
```cpp
Layout a = Layout<Shape<_2,_3,_5,_7>>{}; // (2,3,5,7):(1,2,6,30)
Layout a13 = take<1,3>(a); // (3,5):(2,6)
Layout a14 = take<1,4>(a); // (3,5,7):(2,6,30)
// take<1,1> not allowed. Empty layouts not allowed.
```
### Concatenation
A `Layout` can be provided to `make_layout` to wrap and concatenate
```cpp
Layout a = Layout<_3,_1>{}; // 3:1
Layout b = Layout<_4,_3>{}; // 4:3
Layout row = make_layout(a, b); // (3,4):(1,3)
Layout col = make_layout(b, a); // (4,3):(3,1)
Layout q = make_layout(row, col); // ((3,4),(4,3)):((1,3),(3,1))
Layout aa = make_layout(a); // (3):(1)
Layout aaa = make_layout(aa); // ((3)):((1))
Layout d = make_layout(a, make_layout(a), a); // (3,(3),3):(1,(1),1)
```
or can be combined with `append`, `prepend`, or `replace`.
```cpp
Layout a = Layout<_3,_1>{}; // 3:1
Layout b = Layout<_4,_3>{}; // 4:3
Layout ab = append(a, b); // (3,4):(1,3)
Layout ba = prepend(a, b); // (4,3):(3,1)
Layout c = append(ab, ab); // (3,4,(3,4)):(1,3,(1,3))
Layout d = replace<2>(c, b); // (3,4,4):(1,3,3)
```
### Grouping and flattening
Layout modes can be grouped with `group<ModeBegin, ModeEnd>` and flattened with `flatten`.
```cpp
Layout a = Layout<Shape<_2,_3,_5,_7>>{}; // (_2,_3,_5,_7):(_1,_2,_6,_30)
Layout b = group<0,2>(a); // ((_2,_3),_5,_7):((_1,_2),_6,_30)
Layout c = group<1,3>(b); // ((_2,_3),(_5,_7)):((_1,_2),(_6,_30))
Layout f = flatten(b); // (_2,_3,_5,_7):(_1,_2,_6,_30)
Layout e = flatten(c); // (_2,_3,_5,_7):(_1,_2,_6,_30)
```
Grouping, flattening, and reordering modes allows the reinterpretation of tensors in place as matrices, matrices as vectors, vectors as matrices, etc.
### Slicing
`Layout`s can be sliced, but slicing is more appropriate to perform on `Tensor`s. See the [`Tensor` section](./03_tensor.md) for slicing details.
## Summary
* The `Shape` of a `Layout` defines its coordinate space(s).
* Every `Layout` has a 1-D coordinate space.
This can be used to iterate over the coordinate spaces in a colexicographical order.
* Every `Layout` has a R-D coordinate space,
where R is the rank of the layout.
The colexicographical enumeration of the R-D coordinates
correspond to the 1-D coordinates above.
* Every `Layout` has an h-D (natural) coordinate space where h is "hierarchical." These are ordered colexicographically and the enumeration of that order corresponds to the 1-D coordinates above. A natural coordinate is *congruent* to the `Shape` so that each element of the coordinate has a corresponding element of the `Shape`.
* The `Stride` of a `Layout` maps coordinates to indices.
* The inner product of the elements of the natural coordinate with the elements of the `Stride` produces the resulting index.
For each `Layout` there exists an integral `Shape` that is that compatible with that `Layout`. Namely, that integral shape is `size(layout)`. We can then observe that
> Layouts are functions from integers to integers.
If you're familiar with the C++23 feature `mdspan`,
this is an important difference between
`mdspan` layout mappings and CuTe `Layout`s. In CuTe, `Layout` is a first class citizen, is natively hierarchical to naturally represent functions beyond row-major and column-major, and can similarly be indexed with a hierarchy of coordinates.
(`mdspan` layout mappings can represent hierarchical functions as well,
but this requires defining a custom layout.)
Input coordinates for an `mdspan` must have the same shape as the `mdspan`;
a multidimensional `mdspan` does not accept 1-D coordinates.
| media/docs/cute/01_layout.md/0 | {
"file_path": "media/docs/cute/01_layout.md",
"repo_id": "media",
"token_count": 9602
} | 45 |

[README](../../README.md#documentation) > **Grouped Kernel Schedulers**
# CUTLASS Grouped Kernel Schedulers
CUTLASS's grouped kernel is a persistent kernel which launches multiple problems (e.g., GEMMs, SYR2Ks) within a
single CUDA kernel launch.
Unlike a conventional GEMMs in CUTLASS, which launch a number of threadblocks equal to the number
of tiles in the GEMM, CUTLASS grouped kernels typically launch a number of threadblocks that is
fewer than the total number of tiles across all problems in the group. Each threadblock is then
responsible for computing one or more tiles among the problems in the group. The grouped kernel
_scheduler_ (referred to as the _problem visitor_ in code) is responsible for assigning each
threadblock the sequence of tiles that it will compute within the group.
This document provides background on the functionality of the grouped kernel scheduler, and describes
various optimizations to the grouped kernel scheduler.
**Outline**
* [Introduction to Grouped Kernel Schedulers](grouped_scheduler.md#introduction-to-grouped-kernel-schedulers)
* [Grouped GEMM Scheduler](grouped_scheduler.md#grouped-gemm-scheduler)
* [Grouped Rank2K Scheduler](grouped_scheduler.md#grouped-rank2k-scheduler)
* [Scheduler Modes](grouped_scheduler.md#scheduler-modes)
* [Improving Load Balance by Sorting Problems](grouped_scheduler.md#improving-load-balance-by-sorting-problems)
# Introduction to Grouped Kernel Schedulers
Given a group of problem sizes and a grid of threadblocks, the scheduler's job is to assign
tiles from problems in the group to threadblocks. Threadblocks in a grouped kernel persistently
execute a loop of querying the scheduler for the next tile to compute and performing the
kernel-level operations for that tile (e.g., MMA and epilogue). In pseudocode, this looks as
follows:
```c++
ProblemVisitor problem_visitor;
while (problem_visitor.next_tile()) {
//
// Get next tile index from scheduler
//
//
// Compute MMA and epilogue
//
// Inform the scheduler that we are done with the current tile
problem_visitor.advance(gridDim.x);
}
```
The key functionality of the grouped kernel scheduler lies in the `next_tile()` method,
which determines which tile in the group the calling threadblock should compute next, if any.
# Grouped GEMM Scheduler
The scheduler used by grouped GEMM assigns tiles in the group to threadblocks in a round-robin
fashion.
Consider, for example, the threadblock-to-tile mapping that occurs for a group of four GEMMs
each consisting of a grid of 2x2 tiles. Suppose that eight threadblocks are launched. The
figure below illustrates the threadblock ID assigned to each tile in each GEMM in the group.

A similar mapping for problems that do not have the same number of tiles
is shown below:

## Computing the schedule for a given block
Each threadblock in the grouped GEMM computes its own schedule by calling
the `next_tile()` method described above.
To do this, the threadblock's `ProblemVisitor` maintains a `thread_idx`
member that is initialized to `blockIdx.x` and is incremented by
`gridDim.x` between each tile computed (only the x dimension is used)
in the launch configuration for grouped kernels). The scheduler must
then figure out which GEMM in the group `tile_idx` belongs to, and which tile
within that problem it maps to.
1. **Determining which GEMM `tile_idx` maps to:** The scheduler determines
the GEMM to which `tile_idx` belongs by iterating through GEMMs starting with
the most-recently visited GEMM, and adding the number of tiles within that
GEMM to a running variable `problem_tile_start`. The scheduler has found the
correct problem for this tile when `problem_tile_start <= tile_idx < problem_tile_start + tiles_in_problem`.
2. **Determining the tile within a GEMM `tile_idx` maps to:** Once the GEMM
to which `tile_idx` maps has been located, the specific tile within that
GEMM that this block should compute is given by `tile_idx - problem_tile_start`.
Simple rasterization is then performed to map this one-dimensional tile ID
into the two-dimensional coordinate of the tile to compute in the GEMM.
We describe how this search is accelerated in [Scheduler Modes](grouped_scheduler.md#scheduler-modes).
# Grouped Rank2K Scheduler
The previous section described the operation of the scheduler used
for grouped GEMM kernels. While this scheduler is sufficient for
correctly implementing grouped Rank2K operations (i.e., SYR2K and HER2K), it leads to significant inefficiencies.
We next describe these inefficiencies as well as how the CUTLASS
grouped Rank2K scheduler overcomes them.
## Inefficiency of grouped GEMM scheduler for grouped Rank2K problems
The grouped GEMM scheduler assumes that every tile in every GEMM in the group will
ultimately affect the output of the problem. This is not the case for Rank2K
problems, for which matrix C is either upper or lower triangular. Using the default
grouped GEMM scheduler for such problems will thus lead to threadblocks frequently
being assigned to tiles that exit early (e.g., due to being assigned to a tile in the
upper-triangular portion of a lower-triangular problem). This further leads to load
imbalance among threadblocks, as the grouped GEMM scheduler assigns nearly the same
number of tiles to all threadblocks, regardless of how many tiles are truly active.
Consider an example of a group of four SYR2K problems, each with matrix C consisting
of a grid of 2x2 tiles. Matrix C in each problem is lower triangular, indicated by
shaded tiles. Consider that eight threadblocks are launched to compute the grouped
problem. The default grouped GEMM scheduler will assign threadblocks to tiles in the following order:

In this case, threadblocks 1 and 5 are continuously assigned to inactive tiles. In
scenarios in which problems within the group have varying size, we have observed
this to still lead to significant load imbalance.
## Specializing the scheduler for triangular problems
We seek to design a scheduler that more efficiently maps threadblocks to active tiles
for kernels that use triangular output matrices. The scheduler should ideally assign
threadblocks only to those tiles within lower-triangular portion of a
lower-triangular problem (and vice-versa for upper-triangular problems).
Using the example above, the resulting assignment of threadblocks to tiles from
such a scheduler might be:

Achieving this schedule requires mapping from a threadblock ID to tile coordinates
`(i, j)`.
We will illustrate this by mapping a lower-triangular matrix with a 3x3 grid. We
first calculate row and column indices assuming one-indexed rows, tiles, and
threadblock IDs, and then subtract one to convert to zero-indexed versions. Our
description borrows heavily from the mapping described [here](https://stackoverflow.com/a/40954159).

### Calculating row `i` given threadblock ID `t`
For a given row i, all threadblock IDs t in that row satisfy the following:
```
t <= 1 + 2 + 3 + ... + (i-1) + i
```
The closed-form equation for the right-hand side is: `i(i+1)/2`.
Using this, we can solve for `i` given `t`:
```
t <= i(i+1)/2
2t <= i^2 + i
2t <= i^2 + i + 0.25 - 0.25
2t + 0.25 <= i^2 + i + 0.25
2t + 0.25 <= (i + 0.5)^2
sqrt(2t + 0.25) - 0.5 <= i
```
To account for fractional values, we set:
```
i = ceil(sqrt(2t + 0.25) - 0.5)
```
To turn this into a zero-indexed row and work with zero-indexed `t`, we perform:
```
i = ceil(sqrt(2(t+1) + 0.25) - 0.5) - 1
= ceil(sqrt(2t + 2.25) - 0.5) - 1
```
### Calculating column `j` given threadblock ID `t` and row `i`
For a given row `i`, all threadblock IDs `t` in that row also satisfy the following:
```
t > 1 + 2 + 3 + ... + (i-2) + (i-1)
--> t > i(i-1)/2
```
Threadblock IDs within a given row are sequential, so the one-indexed column ID
for one-indexed threadblock ID `t` and row `i` is:
```
j = t - (i(i-1)/2)
```
The zero-indexed version becomes:
```
j = (t+1) - (i(i+1)/2) -1
= t - (i(i+1)/2)
```
### Accounting for non-square grids
Though the overall output problem size for Rank2K problems is guaranteed to be square, the
grids used in computing may not be square due to using non-square threadblock shapes. For
example, a threadblock shape of 64x32 operating on a problem of output size 128x128 would
result in a grid of 2x4 tiles.
This case can be handled by noting that the output resembles a square grid of 2x2 "macro tiles"
each of which contains 2 "true tiles." We can thus first map a threadblock ID to its "macro tile"
using the equations above, and then map it to the "true tile" within its "macro tile." In the example
of a 2x4 grid, this mapping would look as follows:

A zero-indexed threadblock ID `t` is mapped to its "macro tile ID" `t_macro` as:
```
t_macro = t // r
```
Where `r` is the ratio of the maximum dimension of the grid to the
minimum dimension of the grid (i.e., `r = 4 / 2 = 2` in the previous example).
One uses `t_macro` and the calculations above to find the row and column in the square matrix to
obtain `i_macro` and `j_macro` (zero-indexed). The mapping from `(i_macro, j_macro) --> (i, j)`
is simply the following:
```
if (ThreadblockShape::M > ThreadblockShape::N):
r = ThreadblockShape::M / ThreadblockShape::N
i = i_macro
j = (j_macro * r) + (t % r)
elif (ThreadblockShape::M < ThreadblockShape::N):
r = ThreadblockShape::N / ThreadblockShape::M
i = (i_macro * r) + (t % r)
j = j_macro
else:
i = i_macro
j = j_macro
```
### Handling cases with grid dimensions that aren't multiples of each other
Even though threadblock shapes M and N are typically multiples of one another, the grid
for a given problem may not have dimensions of the same ratio as that of the threadblock.
For example, a problem of size 132x132 using a threadblock of shape 64x32 will result
in a grid of 3x5 tiles. In this case, there is not an integer number of "true tiles"
per "macro tile."
When this scenario arises, we simply pad the larger dimension of the grid such that
there are an integer number of "true tiles" per "macro tile." Thus, the 3x5 grid in
the example above will be treated as a 3x6 grid. Row and column positions for each
tile are calculated as above. Any threadblocks that map to tiles that are outside the
problem range or upper/lower triangular portion (e.g., (2, 5)) will exit early from
this problem and may proceed to the next problem in the group.
### Handling upper-triangular matrices
The only modification needed for upper-triangular matrices is to swap `i_macro` and `j_macro` in the calculations above.
# Scheduler modes
The grouped kernel schedulers come with two different modes for finding
the next tile for a block to compute. These techniques are controlled by
the [`cutlass::gemm::kernel::GroupScheduleMode`](../../include/cutlass/gemm/kernel/grouped_problem_visitor.h) enum.
We describe each mode in greater detail below.
## `GroupScheduleMode::kDeviceOnly` (default)
This scheduler mode performs all scheduling work on the device. It parallelizes
the search for the problem that `tile_idx` maps to by having each thread "own"
a different problem and determine whether `tile_idx` falls within the range of
that problem.
`GroupScheduleMode::kDeviceOnly` performs this parallelization in a warp-wide
fashion. Each thread in the warp loads a problem size indexed by its lane id and
computes the number of tiles in that problem. A warp-wide prefix sum is used to find
the starting tiles for the set of problems the warp is looking at. At the end of the
prefix sum, each thread holds the starting tile index and tile count for a unique
problem in the group.
While `tile_idx` remains within the range of the problems currently hosted by the
warp, each thread will check whether `tile_idx` is in the range of its current
problem. The matching problem index and its starting tile are then broadcasted to all
threads in the warp.
## Precomputing schedules on the host: `GroupScheduleMode::kHostPrecompute`
This scheduler attempts to reduce the amount of scheduling performed on the device
by precomputing on the host the sequence of problems that will
be accessed by each block. As described above, all that is needed to map tile_idx to
the specific tile within a problem to compute is the problem ID and the problem's
starting tile (among all of the tiles in the group). Thus, this scheduler precomputes
the problem index and problem starting tile for each tile computed by each block.
The schedule for an individual block is represented as an array of
`(problem_idx, problem_starting_tile)` tuples. There is one such array per block.
These arrays are produced on the host and copied over to the device. This
representation is optimized for the case in which blocks compute at most one
tile per problem. When a block computes multiple tiles per problem in the group,
the representation above will result in duplicate entries, and thus will be
suboptimal (e.g., `[(3, 20), (3, 20)]` for a block that computes two tiles in
problem 3, which has starting tile index 20).
We have chosen to use the representation described above because grouped kernels
themselves are typically most beneficial when problem sizes are small, and, thus,
blocks compute at most one tile per problem.
## Which scheduler mode should I use?
Consider the following questions when deciding which scheduling mode to use:
### How are the parameters used as input to the grouped kernel (e.g., ptrA, lda) set in my application?
If these are set by a previous kernel running on
the device (rather than by the host), you likely want to use `kDeviceOnly`,
as this will minimize additional host-device communication.
### Can host-side work be overlapped with other device kernels in my application?
For example, if a grouped GEMM is used as the Nth layer in a neural network,
host-side precomputation for the grouped GEMM can potentially be overlapped
with device-side work for layer N-1. In this case `kHostPrecompute` is likely
a good fit.
### How compute-intensive are the problems in my group?
The differences in performance between `kHostPrecompute` and `kDeviceOnly` are most
noticeable for grouped kernels with low computational intensity, for which time spent in
the scheduler accounts for a significant fraction of the grouped kernel's runtime.
Intuitively, as problems in a group decrease in computational intensity, a smaller
fraction of the overall runtime will be consumed in performing MMA operations, leading
to a larger fraction of the overall runtime being consumed by scheduling logic.
Since the scheduling modes affect only the scheduling logic of the grouped kernels,
one expects to see most benefit from `kHostPrecompute` for less computationally-intense
groups.
# Improving Load Balance by Sorting Problems
The grouped kernel schedulers assign a nearly equal number
of tiles to each block participating in the grouped kernel. Every tile in the
group has the same M and N dimensions. However, the K dimension of each
tile depends on the K dimension of the problem, so tiles may have different
K dimensions. Thus, the K dimension of the
tile plays a significant role in determining how long it takes for a given
tile to be computed.
## Potential problems with imbalanced K dimension
To ensure that compute load is balanced evenly across blocks, it is important
that the sum of the K dimensions among all tiles a block computes be similar
to that of other blocks; if one block computes far more tiles with a large
value of K than other blocks, it may take longer than the other blocks.
For example, consider the following group of GEMMs:
```
0 1152x768x128
1 1152x768x1024
2 768x1152x128
3 768x1152x1024
```
If a tile size of 128x128 is used, then each problem will have 54 tiles.
Thus, there are 216 tiles across the group.
Suppose this grouped GEMM is run on GA100, which has 108 SMs. Suppose that
the occupancy given the parameters of the grouped GEMM is one -- one threadblock
can be active at a time on an SM. The grouped GEMM will, thus, run with 108
persistent threadblocks, each of which computes (256 / 108) = 2 tiles.
Under the round-robin assignment of tiles to threadblocks employed by
the grouped GEMM scheduler, the assignment of tiles to threadblocks
in this GEMM will be as follows:
```
Threadblocks 0-53: Tiles of size 128x128x128 from problem 0
Threadblocks 54-107: Tiles of size 128x128x1024 from problem 1
Threadblocks 0-53: Tiles of size 128x128x128 from problem 2
Threadblocks 54-107: Tiles of size 128x128x1024 from problem 3
```
Following this assignment, threadblocks 54-107 perform significantly more
work than threadblocks 0-53 because they compute two tiles with a K
dimension of 1024, whereas threadblocks 0-53 compute two tiles with K
dimension of only 128.
Due to this imbalanced assignment, threadblocks 54-107 will run
significantly longer than threadblocks 0-53, leaving threadblocks
0-53 idle for a large fraction of time.
Clearly, a better assignment of tiles to threadblocks for this
example would involve all threadblocks computing one tile with
a K dimension of 1024 and one tile with a K dimension of 128.
This would better balance the workload among threadblocks.
## Potential for sorting problems to reduce imbalance
A simple way to potentially reduce load imbalance is to sort the problems in a group in
descending order of their K dimension. This can help to improve load balance
because tiles in a group are assigned in a round-robin fashion to blocks
sequentially, so every block will always be assigned next the tile with
the highest K dimension available.
Considering the example described above, sorting the problem sizes before
executing grouped GEMM improves the runtime of this grouped GEMM on GA100 with each
scheduling mode by around 30%.
To ease the process of sorting groups and their associated metadata in this
manner, the device-level grouped kernels provide a `sort_problems()` method.
An example of how to use this may be found in the [grouped GEMM example](../../examples/24_gemm_grouped/gemm_grouped.cu).
Finally, while sorting problems can be helpful in certain scenarios, it is
not guaranteed to improve performance. In some cases, performance can
decrease when sorting problems due to additional conflicting factors that
affect GEMM performance. We recommend profiling your grouped kernel with
and without sorting to see whether it helps in your case.
| media/docs/grouped_scheduler.md/0 | {
"file_path": "media/docs/grouped_scheduler.md",
"repo_id": "media",
"token_count": 5364
} | 46 |
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import ctypes
from cutlass_library import (
DataType,
KernelScheduleType,
TileSchedulerType
)
from cutlass.backend.library import DataTypeSizeBytes
class GemmCoord_(ctypes.Structure):
_fields_ = [
("m", ctypes.c_int),
("n", ctypes.c_int),
("k", ctypes.c_int)
]
def __init__(self, m, n, k) -> None:
self.m = m
self.n = n
self.k = k
class GemmCoordBatched_(ctypes.Structure):
"""
Wrapper around a GemmCoord that also contains batch count. This is used for encoding
batched GEMM inputs to CUTLASS 3 GEMMs.
"""
_fields_ = [
("m", ctypes.c_int),
("n", ctypes.c_int),
("k", ctypes.c_int),
("batch_count", ctypes.c_int)
]
def __init__(self, gemm_coord, batch_count) -> None:
self.m = gemm_coord.m
self.n = gemm_coord.n
self.k = gemm_coord.k
self.batch_count = batch_count
class MatrixCoord_(ctypes.Structure):
_fields_ = [
("row", ctypes.c_int),
("column", ctypes.c_int)
]
class dim3_(ctypes.Structure):
_fields_ = [
("x", ctypes.c_int),
("y", ctypes.c_int),
("z", ctypes.c_int)
]
class StrideBatched_(ctypes.Structure):
"""
CUTLASS 3.0 strides for operands contain one static dimension and two variable dimensions. The
variable dimensions represent the stride along non-unit-stride dimension of the row/column major
layout, and the batch stride. This structure encodes the two variable dimensions.
"""
_fields_ = [
("major_stride", ctypes.c_int64),
("batch_stride", ctypes.c_int64)
]
class GenericMainloopArguments3x_(ctypes.Structure):
"""
Structure representing the superset of possible mainloop arguments.
This structure should not be passed to kernels directly, but, rather,
be used as an input to one of the more specific schedule arguments, which
will each select those arguments relevant to the particular schedule.
"""
_fields_ = [
("ptr_A", ctypes.c_void_p),
("stride_A", StrideBatched_),
("ptr_B", ctypes.c_void_p),
("stride_B", StrideBatched_),
("mma_promotion_interval", ctypes.c_int)
]
class _PersistentTileSchedulerArguments(ctypes.Structure):
_fields_ = [
("max_swizzle_size", ctypes.c_int),
("raster_order_option", ctypes.c_int),
]
class _PersistentTileSchedulerStreamKArguments(ctypes.Structure):
_fields_ = [
("splits", ctypes.c_int),
("max_swizzle_size", ctypes.c_int),
("raster_order_option", ctypes.c_int),
("reduction_mode", ctypes.c_int),
("decomposition_mode", ctypes.c_int),
]
def get_tile_scheduler_arguments_3x(
tile_scheduler: TileSchedulerType,
splits: int = 1):
max_swizzle_size = 1
raster_order_option = 0 # Heuristic
if tile_scheduler == TileSchedulerType.Persistent:
return _PersistentTileSchedulerArguments(
max_swizzle_size,
raster_order_option,
)
elif tile_scheduler == TileSchedulerType.StreamK:
reduction_mode = 0 # Deterministic
decomposition_mode = 0 # Heuristic
return _PersistentTileSchedulerStreamKArguments(
splits,
max_swizzle_size,
raster_order_option,
reduction_mode,
decomposition_mode,
)
def get_mainloop_arguments_3x(
kernel_schedule: KernelScheduleType,
element_A,
element_B,
alignment_A: int,
alignment_B: int) -> ctypes.Structure:
"""
Returns the ctypes structure to be used for the 3.x kernel's mainloop parameters.
:param kernel_schedule: type of kernel schedule to be used in the mainloop
:type kernel_schedule: cutlass_library.KernelScheduleType
:param element_A: data type of operand A
:param element_B: data type of operand B
:param alignment_A: alignment of operand A
:type alignment_A: int
:param alignment_B: alignment of operand B
:type alignment_B: int
:returns: ctypes structure to be used for the 3.x kernel's mainloop parameters
:rtype: ctypes.Structure
"""
class _MainloopArgumentsTma(ctypes.Structure):
_fields_ = [
("ptr_A", ctypes.c_void_p),
("stride_A", StrideBatched_),
("ptr_B", ctypes.c_void_p),
("stride_B", StrideBatched_),
("mma_promotion_interval", ctypes.c_int)
]
@staticmethod
def from_generic_mainloop_args(args: GenericMainloopArguments3x_):
return _MainloopArgumentsTma(
args.ptr_A, args.stride_A, args.ptr_B, args.stride_B,
args.mma_promotion_interval
)
class _MainloopArgumentsMultistage(ctypes.Structure):
_fields_ = [
("ptr_A", ctypes.c_void_p),
("stride_A", StrideBatched_),
("ptr_B", ctypes.c_void_p),
("stride_B", StrideBatched_),
]
@staticmethod
def from_generic_mainloop_args(args: GenericMainloopArguments3x_):
return _MainloopArgumentsMultistage(
args.ptr_A, args.stride_A, args.ptr_B, args.stride_B,
)
# Currently all 3.x kernels (CpAsync and Tma) have the same argument structure.
# Should that become not the case, this is the place to return custom ctypes
# structures based on selected kernel schedule.
return _MainloopArgumentsTma
def get_gemm_arguments_3x(mainloop_arguments, epilogue_functor, scheduler_args, default_epilogue):
if not default_epilogue and hasattr(epilogue_functor, "epilogue_type_evt"):
_EpilogueOutputOpParams = epilogue_functor.epilogue_type_evt
else:
_EpilogueOutputOpParams = epilogue_functor.epilogue_type
if hasattr(epilogue_functor, "visitor"):
class _EpilogueArguments(ctypes.Structure):
_fields_ = [
("epilogue", _EpilogueOutputOpParams),
("arg_C", epilogue_functor.arg_c_type),
("arg_D", epilogue_functor.arg_d_type)
]
def __init__(self, output_op, ptr_c, stride_c, ptr_d, stride_d) -> None:
self.epilogue = output_op
self.arg_C = epilogue_functor.arg_c_type(ptr_c)
self.arg_D = epilogue_functor.arg_d_type(ptr_d)
else:
class _EpilogueArguments(ctypes.Structure):
_fields_ = [
("epilogue", _EpilogueOutputOpParams),
("ptr_C", ctypes.c_void_p),
("stride_C", StrideBatched_),
("ptr_D", ctypes.c_void_p),
("stride_D", StrideBatched_),
]
class _HardwareInfo(ctypes.Structure):
_fields_ = [
("device_id", ctypes.c_int),
("sm_count", ctypes.c_int),
]
class _GemmArguments(ctypes.Structure):
_fields_ = [
("mode", ctypes.c_int),
("problem_size", GemmCoordBatched_),
("mainloop", mainloop_arguments),
("epilogue", _EpilogueArguments),
("hw_info", _HardwareInfo),
("scheduler", type(scheduler_args)),
]
return _GemmArguments, _EpilogueArguments, _EpilogueOutputOpParams, _HardwareInfo
def get_gemm_arguments(epilogue_functor):
_EpilogueOutputOpParams = epilogue_functor.epilogue_type
class _GemmArguments(ctypes.Structure):
_fields_ = [
# Arguments from UniversalArgumentsBase
("mode", ctypes.c_int),
("problem_size", GemmCoord_),
("batch_count", ctypes.c_int),
("batch_stride_D", ctypes.c_longlong),
# Remaining arguments
("epilogue", _EpilogueOutputOpParams),
("ptr_A", ctypes.c_void_p),
("ptr_B", ctypes.c_void_p),
("ptr_C", ctypes.c_void_p),
("ptr_D", ctypes.c_void_p),
("batch_stride_A", ctypes.c_longlong),
("batch_stride_B", ctypes.c_longlong),
("batch_stride_C", ctypes.c_longlong),
("stride_a", ctypes.c_longlong),
("stride_b", ctypes.c_longlong),
("stride_c", ctypes.c_longlong),
("stride_d", ctypes.c_longlong),
("lda", ctypes.c_longlong),
("ldb", ctypes.c_longlong),
("ldc", ctypes.c_longlong),
("ldd", ctypes.c_longlong),
("ptr_gather_A_indices", ctypes.c_void_p),
("ptr_gather_B_indices", ctypes.c_void_p),
("ptr_scatter_D_indices", ctypes.c_void_p)
]
return _GemmArguments, _EpilogueOutputOpParams
def get_gemm_arguments_streamk(epilogue_functor):
_EpilogueOutputOpParams = epilogue_functor.epilogue_type
class _GemmArguments(ctypes.Structure):
_fields_ = [
("mode", ctypes.c_int),
("problem_size", GemmCoord_),
("batch_count", ctypes.c_int),
("epilogue", _EpilogueOutputOpParams),
("ptr_A", ctypes.c_void_p),
("ptr_B", ctypes.c_void_p),
("ptr_C", ctypes.c_void_p),
("ptr_D", ctypes.c_void_p),
("batch_stride_A", ctypes.c_longlong),
("batch_stride_B", ctypes.c_longlong),
("batch_stride_C", ctypes.c_longlong),
("batch_stride_D", ctypes.c_longlong),
("stride_a", ctypes.c_longlong),
("stride_b", ctypes.c_longlong),
("stride_c", ctypes.c_longlong),
("stride_d", ctypes.c_longlong),
("lda", ctypes.c_longlong),
("ldb", ctypes.c_longlong),
("ldc", ctypes.c_longlong),
("ldd", ctypes.c_longlong),
("avail_sms", ctypes.c_int)
]
return _GemmArguments, _EpilogueOutputOpParams
###########################################################################################
# GEMM Grouped
###########################################################################################
def get_gemm_grouped_arguments(epilogue_functor):
_EpilogueOutputOpParams = epilogue_functor.epilogue_type
class _GEMMGroupedArguments(ctypes.Structure):
_fields_ = [
("problem_sizes", ctypes.c_void_p),
("problem_count", ctypes.c_int),
("threadblock_count", ctypes.c_int),
("output_op", _EpilogueOutputOpParams),
("ptr_A", ctypes.c_void_p),
("ptr_B", ctypes.c_void_p),
("ptr_C", ctypes.c_void_p),
("ptr_D", ctypes.c_void_p),
("lda", ctypes.c_void_p),
("ldb", ctypes.c_void_p),
("ldc", ctypes.c_void_p),
("ldd", ctypes.c_void_p),
("host_problem_sizes", ctypes.c_void_p)
]
return _GEMMGroupedArguments, _EpilogueOutputOpParams
############################################################################################
# Convolution2D
############################################################################################
class Conv2DProblemSize_(ctypes.Structure):
_fields_ = [
("N", ctypes.c_int),
("H", ctypes.c_int),
("W", ctypes.c_int),
("C", ctypes.c_int),
("P", ctypes.c_int),
("Q", ctypes.c_int),
("K", ctypes.c_int),
("R", ctypes.c_int),
("S", ctypes.c_int),
("pad_h", ctypes.c_int),
("pad_w", ctypes.c_int),
("stride_h", ctypes.c_int),
("stride_w", ctypes.c_int),
("dilation_h", ctypes.c_int),
("dilation_w", ctypes.c_int),
("mode", ctypes.c_int), # kCrossCorrelation: 0, kConvolution: 1
("split_k_slices", ctypes.c_int),
("groups", ctypes.c_int)
]
def __init__(self, problem_size) -> None:
for field_name, _ in self._fields_:
setattr(self, field_name, getattr(problem_size, field_name))
class Layout4D(ctypes.Structure):
_fields_ = [("stride", ctypes.c_int * 3)]
def __init__(self, tensor_ref):
stride = tensor_ref.stride()
setattr(self, "stride", (stride.at(0), stride.at(1), stride.at(2)))
class TensorRef_(ctypes.Structure):
_fields_ = [
("ptr", ctypes.c_void_p),
("layout", Layout4D)
]
def __init__(self, tensor_ref):
setattr(self, "ptr", tensor_ref.data())
setattr(self, "layout", Layout4D(tensor_ref.layout()))
class TensorRef2D_(ctypes.Structure):
_fields_ = [
("ptr", ctypes.c_void_p),
("stride", ctypes.c_int)
]
def get_conv2d_arguments(epilogue_functor):
_EpilogueOutputOpParams = epilogue_functor.epilogue_type
class _Conv2dArguments(ctypes.Structure):
_fields_ = [
("conv_kind", ctypes.c_int),
("problem_size", Conv2DProblemSize_),
("ptr_A", ctypes.c_void_p),
("ptr_B", ctypes.c_void_p),
("ptr_C", ctypes.c_void_p),
("ptr_D", ctypes.c_void_p),
("tensor_C_numel", ctypes.c_int),
("output_op", _EpilogueOutputOpParams),
("split_k_mode", ctypes.c_int)
]
return _Conv2dArguments, _EpilogueOutputOpParams
############################################################################################
# Reduction
############################################################################################
def get_reduction_params(epilogue_functor):
_EpilogueOutputParams = epilogue_functor.epilogue_type
class _ReductionParams(ctypes.Structure):
_fields_ = [
("problem_size", MatrixCoord_),
("partitions", ctypes.c_int),
("partition_stride", ctypes.c_longlong),
("workspace", TensorRef2D_),
("destination", TensorRef2D_),
("source", TensorRef2D_),
("output_op", _EpilogueOutputParams),
]
return _ReductionParams, _EpilogueOutputParams
###########################################################################################
# Epilogue Visitor Type Factory
###########################################################################################
class Empty(ctypes.Structure):
_fields_ = []
def __init__(self, *arg) -> None:
pass
class EmptyByte(ctypes.Structure):
_fields_ = [
("byte", ctypes.c_byte)
]
def __init__(self, *arg) -> None:
pass
class EBO:
def __init__(self, index: int, type) -> None:
self.index = index
self.type = type
def __eq__(self, other) -> bool:
if isinstance(other, EBO):
return self.index == other.index and self.type == other.type
return False
def __hash__(self) -> int:
return hash((self.index, self.type))
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self) -> str:
return f"<{self.index}, {self.type}>"
def tuple_factory_(input_tuple, dtype, constants=[0,1]):
"""
The factory function generating cute::Tuple with input tuple
:param input_tuple: the input tuple
:type input_tuple: tuple
:param dtype: the data type for non-constant values
:type dtype: str, "int32_t", "int", "int64_t"
:param constant: the values that will be treated as constants
:type constant: list[int]
:return: ctype structure representing the cute::Tuple
:return: the empty base classes of the tuple
"""
# The empty base classes of the current tuple
empty_bases = []
# The first non empty base class
first_non_empty_base = None
# The ctype fields of the current tuple
ctype_fields = []
for idx, entry in enumerate(input_tuple):
# For nested tuples
if isinstance(entry, tuple):
sub_tuple_ctype, sub_empty_bases = tuple_factory_(entry, dtype, constants)
if ctypes.sizeof(sub_tuple_ctype) == 0:
# The empty tuple base class is also an empty EBO
empty_bases.append(EBO(idx, entry))
else:
if first_non_empty_base is None:
first_non_empty_base = sub_empty_bases
ctype_fields.append((f"entry_{idx}", sub_tuple_ctype))
else:
if entry in constants:
empty_bases.append(EBO(idx, entry))
ctype_fields.append((f"entry_{idx}", Empty))
else:
ctype_fields.append((f"entry_{idx}", dtype))
if first_non_empty_base is None:
first_non_empty_base = []
# Determine whether or not add an additional byte for empty base classes
additional_byte = False
# Special case for constant tuple
if first_non_empty_base is None:
additional_byte = False
else:
for base in first_non_empty_base:
if base in empty_bases:
additional_byte = True
break
if additional_byte:
ctype_fields = [("empty_byte", EmptyByte), ] + ctype_fields
# Create the ctype tuple
class TupleType(ctypes.Structure):
_fields_ = ctype_fields
def __init__(self, args) -> None:
if additional_byte:
fields = self._fields_[1:]
else:
fields = self._fields_
assert len(fields) == len(args)
for field, arg in zip(fields, args):
name = field[0]
field_type = field[1]
setattr(self, name, field_type(arg))
return TupleType, empty_bases
def tuple_factory(input_tuple, dtype: str, constants=[0,1]):
"""
The factory function generating cute::Tuple with input tuple
:param input_tuple: the input tuple
:type input_tuple: tuple
:param dtype: the data type for non-constant values
:type dtype: str, "int32_t", "int", "int64_t"
:param constant: the values that will be treated as constants
:type constant: list[int]
:return: ctype structure representing the cute::Tuple
:return: the empty base classes of the tuple
"""
# Step 1: convert the dtype
if dtype == "int64_t":
dtype = ctypes.c_longlong
elif dtype in ["int", "int32_t"]:
dtype = ctypes.c_int32
else:
raise NotImplementedError(f"Type {dtype} is not supported")
tuple_type, _ = tuple_factory_(input_tuple, dtype, constants)
if ctypes.sizeof(tuple_type) == 0:
return EmptyByte
return tuple_type
def visitor_factory(node_types, node_names):
"""
Creates the argument type of epilogue visitor type
:param node_types: list of argument types under ctypes
:param node_names: list of argument names under str
:return: tuple type in ctypes.Structure
"""
ctypes_field = []
# Struct is used when number of nodes < 4
# Because the Sm90VisitorImplBase has specification up to 4 nodes
# in `include/cutlass/epilogue/fusion/sm90_visitor_tma_warpspecialized.hpp`
if len(node_types) <= 4:
for idx, node_type in enumerate(node_types):
if ctypes.sizeof(node_type) == 0:
# Special case for empty struct
# 1 byte placeholder is used for correct alignment
ctypes_field.append((node_names[idx], ctypes.c_byte))
else:
ctypes_field.append((node_names[idx], node_type))
class VisitorType(ctypes.Structure):
_fields_ = ctypes_field
def __init__(self, kwargs) -> None:
for field in self._fields_:
fname, ftype = field
if ftype != ctypes.c_byte:
setattr(self, fname, ftype(kwargs))
# For cases with more than 4 nodes, tuple is used
else:
for idx, node_type in enumerate(node_types):
ctypes_field.append((node_names[idx], node_type))
class VisitorType(ctypes.Structure):
_fields_ = ctypes_field
def __init__(self, kwargs) -> None:
for field in self._fields_:
fname, ftype = field
setattr(self, fname, ftype(kwargs))
return VisitorType
| python/cutlass/backend/c_types.py/0 | {
"file_path": "python/cutlass/backend/c_types.py",
"repo_id": "python",
"token_count": 9749
} | 47 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Utilities for generating source for building a PyTorch CUDA extension that using a CUTLASS kernel.
If specified, the extension can be JIT compiled via PyTorch's ``cpp_extension.load`` method.
Example usage with JIT compilation:
.. highlight:: python
.. code-block:: python
plan = cutlass.op.Gemm(element=torch.float32, layout=cutlass_library.LayoutType.RowMajor)
op = plan.construct()
mod = cutlass.emit.pytorch(op, 'cutlass_gemm', 80, jit=True)
# Generate inputs for the GEMM
A, B, C = [torch.ones((512, 512)).to('cuda') for _ in range(3)]
# Run the module
D = mod.run(A, B, C)
Example usage without JIT compilation:
.. highlight:: python
.. code-block:: python
plan = cutlass.op.Gemm(element=torch.float32, layout=cutlass.LayoutType.RowMajor)
op = plan.construct()
cutlass.emit.pytorch(op, 'cutlass_gemm', 80, jit=False, sourcedir='output')
After this call, the directory ``output`` contains ``setup.py``,
``cutlass_gemm.cpp``, and ``cutlass_gemm_kernel.cu``. The module can be built from
within ``output`` by running: ``TORCH_CUDA_ARCH_LIST="8.0" python setup.py develop --user``.
The module can later be used in Python via:
.. highlight:: python
.. code-block:: python
import torch
import cutlass_gemm
# Generate inputs for the GEMM
A, B, C = [torch.ones((512, 512)).to('cuda') for _ in range(3)]
# Run the module
D = cutlass_gemm.run(A, B, C)
"""
import logging
import os
from cutlass_library import ConvKind, ConvKindNames, DataType, SubstituteTemplate
from cutlass import CUTLASS_PATH, logger, swizzle
from cutlass.backend.gemm_operation import GemmOperationGrouped, GemmOperationUniversal
from cutlass.backend.conv2d_operation import Conv2dOperation
from cutlass.backend.library import ApiVersion
from cutlass.emit import common
from cutlass.utils.datatypes import is_torch_available
if is_torch_available():
import torch
_PYTORCH_CUDA_TEMPLATE = common._CSTYLE_AUTOGEN_COMMENT + """
#include <cuda_runtime.h>
#include <torch/extension.h>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include "cutlass/cutlass.h"
#include "cutlass/util/device_memory.h"
// helper function allocating the memory
void* device_memory_allocation(size_t size, int device_id=0) {
if (size > 0) {
torch::Device device(torch::kCUDA, device_id);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
torch::TensorOptions options = torch::TensorOptions().dtype(torch::kI8).device(device);
at::Tensor device_tensor = torch::empty({(long)size,}, options);
return reinterpret_cast<void*>(device_tensor.data_ptr());
} else {
return nullptr;
}
}
${includes}
${declaration}
${impl}
"""
_PYTORCH_GEMM_CPP_TEMPLATE = common._CSTYLE_AUTOGEN_COMMENT + """
#include <torch/extension.h>
#include <ATen/ATen.h>
#include <pybind11/stl.h>
// CUDA forward declarations
at::Tensor ${name}_kernel(const at::Tensor& A, const at::Tensor& B, at::optional<const at::Tensor> C=at::nullopt, float alpha=1.f, float beta=0.f);
// C++ interface
at::Tensor ${name}(const at::Tensor& A, const at::Tensor& B, at::optional<const at::Tensor> C=at::nullopt, float alpha=1.f, float beta=0.f) {
return ${name}_kernel(A, B, C, alpha, beta);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("run", py::overload_cast<const at::Tensor&, const at::Tensor&, at::optional<const at::Tensor>, float, float>(&${name}), py::arg("A"), py::arg("B"), py::arg("C") = nullptr, py::arg("alpha") = 1.f, py::arg("beta") = 0.f);
}
"""
_PYTORCH_GROUPED_GEMM_CPP_TEMPLATE = common._CSTYLE_AUTOGEN_COMMENT + """
#include <torch/extension.h>
#include <ATen/ATen.h>
#include <pybind11/stl.h>
// CUDA forward declarations
std::vector<at::Tensor> ${name}_kernel(const std::vector<at::Tensor>& A, const std::vector<at::Tensor>& B, at::optional<const std::vector<at::Tensor>> C=at::nullopt, float alpha=1.f, float beta=0.f);
// C++ interface
std::vector<at::Tensor> ${name}(const std::vector<at::Tensor>& A, const std::vector<at::Tensor>& B, at::optional<const std::vector<at::Tensor>> C=at::nullopt, float alpha=1.f, float beta=0.f) {
return ${name}_kernel(A, B, C, alpha, beta);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("run", py::overload_cast<const std::vector<at::Tensor>&, const std::vector<at::Tensor>&, at::optional<const std::vector<at::Tensor>>, float, float>(&${name}),
py::arg("A"), py::arg("B"), py::arg("C") = nullptr, py::arg("alpha") = 1.f, py::arg("beta") = 0.f);
}
"""
_PYTORCH_CONV2D_FPROP_CPP_TEMPLATE = common._CSTYLE_AUTOGEN_COMMENT + """
#include <torch/extension.h>
#include <ATen/ATen.h>
#include <pybind11/stl.h>
// CUDA forward declarations
at::Tensor ${name}_kernel(
const at::Tensor& A, const at::Tensor& B, at::optional<const at::Tensor> C=at::nullopt,
std::tuple<int, int> stride={1, 1}, std::tuple<int, int> padding={0, 0}, std::tuple<int, int> dilation={1, 1},
float alpha=1.f, float beta=0.f,
std::string split_k_mode="serial", int split_k_slices=1);
// C++ interface
at::Tensor ${name}(
const at::Tensor& A, const at::Tensor& B, at::optional<const at::Tensor> C=at::nullopt,
std::tuple<int, int> stride={1, 1}, std::tuple<int, int> padding={0, 0}, std::tuple<int, int> dilation={1, 1},
float alpha=1.f, float beta=0.f,
std::string split_k_mode="serial", int split_k_slices=1) {
return ${name}_kernel(A, B, C, stride, padding, dilation, alpha, beta, split_k_mode, split_k_slices);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("run",
py::overload_cast<
const at::Tensor&, const at::Tensor&, at::optional<const at::Tensor>,
std::tuple<int, int>, std::tuple<int, int>, std::tuple<int, int>, float, float, std::string, int>(
&${name}), py::arg("A"), py::arg("B"), py::arg("C") = nullptr,
py::arg("stride") = std::make_tuple(1, 1), py::arg("padding") = std::make_tuple(1, 1), py::arg("dilation") = std::make_tuple(1, 1),
py::arg("alpha") = 1.f, py::arg("beta") = 0.f,
py::arg("split_k_mode") = "serial", py::arg("split_k_slices") = 1);
}
"""
_PYTORCH_CONV2D_GRAD_CPP_TEMPLATE = common._CSTYLE_AUTOGEN_COMMENT + """
#include <torch/extension.h>
#include <ATen/ATen.h>
#include <pybind11/stl.h>
// CUDA forward declarations
at::Tensor ${name}_kernel(
std::tuple<int, int, int, int> result_size, const at::Tensor& A, const at::Tensor& B, at::optional<const at::Tensor> C=at::nullopt,
std::tuple<int, int> stride={1, 1}, std::tuple<int, int> padding={0, 0}, std::tuple<int, int> dilation={1, 1},
float alpha=1.f, float beta=0.f,
std::string split_k_mode="serial", int split_k_slices=1);
// C++ interface
at::Tensor ${name}(
std::tuple<int, int, int, int> result_size, const at::Tensor& A, const at::Tensor& B, at::optional<const at::Tensor> C=at::nullopt,
std::tuple<int, int> stride={1, 1}, std::tuple<int, int> padding={0, 0}, std::tuple<int, int> dilation={1, 1},
float alpha=1.f, float beta=0.f,
std::string split_k_mode="serial", int split_k_slices=1) {
return ${name}_kernel(result_size, A, B, C, stride, padding, dilation, alpha, beta, split_k_mode, split_k_slices);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("run",
py::overload_cast<
std::tuple<int, int, int, int>, const at::Tensor&, const at::Tensor&, at::optional<const at::Tensor>,
std::tuple<int, int>, std::tuple<int, int>, std::tuple<int, int>, float, float, std::string, int>(
&${name}), py::arg("result_size"), py::arg("A"), py::arg("B"), py::arg("C") = nullptr,
py::arg("stride") = std::make_tuple(1, 1), py::arg("padding") = std::make_tuple(1, 1), py::arg("dilation") = std::make_tuple(1, 1),
py::arg("alpha") = 1.f, py::arg("beta") = 0.f,
py::arg("split_k_mode") = "serial", py::arg("split_k_slices") = 1);
}
"""
_PYTORCH_GEMM_INCLUDES = {
ApiVersion.v2x: """
#include "cutlass/gemm/device/gemm_universal.h"
""",
ApiVersion.v3x: """
#include "cutlass/gemm/device/gemm_universal_adapter.h"
#include "cutlass/gemm/collective/collective_builder.hpp"
#include "cutlass/gemm/device/gemm_universal_adapter.h"
#include "cutlass/gemm/kernel/gemm_universal.hpp"
#include "cutlass/epilogue/collective/collective_builder.hpp"
#include "cutlass/util/packed_stride.hpp"
""",
}
_PYTORCH_GROUPED_GEMM_INCLUDES = """
#include "cutlass/gemm/kernel/default_gemm_grouped.h"
#include "cutlass/gemm/device/gemm_grouped.h"
"""
_PYTORCH_CONV2D_INCLUDES = """
#include "cutlass/conv/kernel/default_conv2d_fprop.h"
#include "cutlass/conv/kernel/default_conv2d_dgrad.h"
#include "cutlass/conv/kernel/default_conv2d_wgrad.h"
#include "cutlass/conv/device/implicit_gemm_convolution.h"
"""
_CUTLASS_TYPE_TO_TORCH_TYPE = {
DataType.f16: "torch::kF16",
DataType.f32: "torch::kF32",
DataType.f64: "torch::kF64",
DataType.s8: "torch::I8",
DataType.s32: "torch::I32",
}
_PYTORCH_GEMM_IMPL_TEMPLATE_2x = (
common._CUTLASS_KERNEL_RUN_GEMM_2x
+ """
at::Tensor ${name}_kernel(const at::Tensor& A, const at::Tensor& B, at::optional<const at::Tensor> C, float alpha, float beta) {
int M = A.size(0);
int N = B.size(1);
int K = A.size(1);
typename DeviceKernel::ElementC* ptrC = (C == at::nullopt) ?
nullptr :
reinterpret_cast<typename DeviceKernel::ElementC*>(C->contiguous().data_ptr());
at::Tensor D = B.new_empty({M, N}, ${torch_type_C});
cutlass::Status status = ${name}_kernel_run(M, N, K,
reinterpret_cast<typename DeviceKernel::ElementA*>(A.contiguous().data_ptr()),
reinterpret_cast<typename DeviceKernel::ElementB*>(B.contiguous().data_ptr()),
ptrC,
reinterpret_cast<typename DeviceKernel::ElementC*>(D.contiguous().data_ptr()),
ElementCompute(alpha), ElementCompute(beta));
TORCH_CHECK(status == cutlass::Status::kSuccess, "CUTLASS kernel failed");
return D;
}
"""
)
_PYTORCH_GEMM_IMPL_TEMPLATE_3x = (
common._CUTLASS_KERNEL_RUN_GEMM_3x
+ """
bool hw_info_queried = false;
cutlass::KernelHardwareInfo hw_info;
at::Tensor ${name}_kernel(const at::Tensor& A, const at::Tensor& B, at::optional<const at::Tensor> C, float alpha, float beta) {
int M = A.size(0);
int N = B.size(1);
int K = A.size(1);
int L = 1;
// Query hardware info if we haven't already
if (!hw_info_queried) {
hw_info.device_id = 0;
hw_info.sm_count = cutlass::KernelHardwareInfo::query_device_multiprocessor_count(hw_info.device_id);
}
typename DeviceKernel::ElementC* ptrC = (C == at::nullopt) ?
nullptr :
reinterpret_cast<typename DeviceKernel::ElementC*>(C->contiguous().data_ptr());
at::Tensor D = B.new_empty({M, N}, ${torch_type_C});
cutlass::Status status = ${name}_kernel_run(M, N, K, L,
reinterpret_cast<typename DeviceKernel::ElementA*>(A.contiguous().data_ptr()),
reinterpret_cast<typename DeviceKernel::ElementB*>(B.contiguous().data_ptr()),
ptrC,
reinterpret_cast<typename DeviceKernel::ElementC*>(D.contiguous().data_ptr()),
ElementCompute(alpha), ElementCompute(beta),
hw_info);
TORCH_CHECK(status == cutlass::Status::kSuccess, "CUTLASS kernel failed");
return D;
}
"""
)
_PYTORCH_GROUPED_GEMM_IMPL_TEMPLATE = (
common._CUTLASS_KERNEL_RUN_GROUPED_GEMM_2x
+ """
std::vector<at::Tensor> ${name}_kernel(const std::vector<at::Tensor>& A, const std::vector<at::Tensor>& B, at::optional<const std::vector<at::Tensor>> C, float alpha, float beta) {
size_t num = A.size();
// To avoid performing many small cudaMallocs and host-to-device copies,
// we serialize the grouped GEMM arguments on the host, allocate one
// large chunk of device memory, and perform a single cudaMemcpy to
// copy the host data to the device. Allocation overheads could be
// avoided by using a memory pool.
// Calculate the total size of the data to be copied from host to device
size_t total_size = sizeof(cutlass::gemm::GemmCoord) +
sizeof(DeviceKernel::ElementA*) +
sizeof(DeviceKernel::ElementB*) +
sizeof(DeviceKernel::ElementC*) +
sizeof(DeviceKernel::ElementC*) +
sizeof(int64_t) +
sizeof(int64_t) +
sizeof(int64_t);
total_size *= num;
// num * sizeof(cutlass::gemm::GemmCoord) may leave one at a non-multiple
// of sizeof(DeviceKernel::ElementA*) (which will be 64 on a 64-bit system).
// To ensure that we don't end up having misaligned loads in the kernel,
// we pad to the nearest multiple of 8.
//
// Note that, even on a 32-bit system (for which sizeof(X*) will not equal
// sizeof(int64_t)), only padding between the list of GemmCoords and the
// list of ptr_As is sufficient because the set of four equal-length lists of pointers
// (A*, B*, C*, D*) will ensure that the first list of int64_ts will always
// start on a multiple of 8.
int64_t padding = 8 - (total_size % 8);
total_size += padding;
uint8_t* host_data = new uint8_t[total_size];
cutlass::DeviceAllocation<uint8_t> device_data(total_size);
uint8_t* start = host_data;
cutlass::gemm::GemmCoord* problem_sizes_host = reinterpret_cast<cutlass::gemm::GemmCoord*>(start);
// Apply the padding after the list of GemmCoords
start += num * sizeof(cutlass::gemm::GemmCoord) + padding;
int64_t ptr_A_offset = start - host_data;
DeviceKernel::ElementA** ptr_A_host = reinterpret_cast<DeviceKernel::ElementA**>(start);
start += num * sizeof(DeviceKernel::ElementA*);
int64_t ptr_B_offset = start - host_data;
DeviceKernel::ElementB** ptr_B_host = reinterpret_cast<DeviceKernel::ElementB**>(start);
start += num * sizeof(DeviceKernel::ElementB*);
int64_t ptr_C_offset = start - host_data;
DeviceKernel::ElementC** ptr_C_host = reinterpret_cast<DeviceKernel::ElementC**>(start);
start += num * sizeof(DeviceKernel::ElementC*);
int64_t ptr_D_offset = start - host_data;
DeviceKernel::ElementC** ptr_D_host = reinterpret_cast<DeviceKernel::ElementC**>(start);
start += num * sizeof(DeviceKernel::ElementC*);
int64_t lda_offset = start - host_data;
int64_t* lda_host = reinterpret_cast<int64_t*>(start);
start += num * sizeof(int64_t);
int64_t ldb_offset = start - host_data;
int64_t* ldb_host = reinterpret_cast<int64_t*>(start);
start += num * sizeof(int64_t);
int64_t ldc_offset = start - host_data;
int64_t* ldc_host = reinterpret_cast<int64_t*>(start);
start += num * sizeof(int64_t);
std::vector<at::Tensor> D(num);
bool need_C = (C != at::nullopt) && (beta != 0.f);
for (size_t i = 0; i < num; ++i) {
int M = A[i].size(0);
int N = B[i].size(1);
int K = A[i].size(1);
*(problem_sizes_host + i) = {M, N, K};
*(ptr_A_host + i) = reinterpret_cast<typename DeviceKernel::ElementA*>(A[i].contiguous().data_ptr());
*(ptr_B_host + i) = reinterpret_cast<typename DeviceKernel::ElementB*>(B[i].contiguous().data_ptr());
if (need_C) {
*(ptr_C_host + i) = reinterpret_cast<typename DeviceKernel::ElementC*>(C->at(i).contiguous().data_ptr());
}
else {
*(ptr_C_host + i) = nullptr;
}
D[i] = B[i].new_empty({M, N}, ${torch_type_C});
*(ptr_D_host + i) = reinterpret_cast<typename DeviceKernel::ElementC*>(D[i].contiguous().data_ptr());
*(lda_host + i) = DeviceKernel::LayoutA::packed({M, K}).stride(0);
*(ldb_host + i) = DeviceKernel::LayoutB::packed({K, N}).stride(0);
*(ldc_host + i) = DeviceKernel::LayoutC::packed({M, N}).stride(0);
}
device_data.copy_from_host(host_data);
cutlass::Status status = ${name}_kernel_run(
num,
reinterpret_cast<cutlass::gemm::GemmCoord*>(device_data.get()),
reinterpret_cast<DeviceKernel::ElementA**>(device_data.get() + ptr_A_offset),
reinterpret_cast<DeviceKernel::ElementB**>(device_data.get() + ptr_B_offset),
reinterpret_cast<DeviceKernel::ElementC**>(device_data.get() + ptr_C_offset),
reinterpret_cast<DeviceKernel::ElementC**>(device_data.get() + ptr_D_offset),
reinterpret_cast<int64_t*>(device_data.get() + lda_offset),
reinterpret_cast<int64_t*>(device_data.get() + ldb_offset),
reinterpret_cast<int64_t*>(device_data.get() + ldc_offset),
reinterpret_cast<int64_t*>(device_data.get() + ldc_offset),
ElementCompute(alpha), ElementCompute(beta));
delete[] host_data;
TORCH_CHECK(status == cutlass::Status::kSuccess, "CUTLASS kernel failed");
return D;
}
"""
)
_PYTORCH_CONV2D_IMPL_TEMPLATE_2x = """
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
cutlass::Status status = ${name}_kernel_run(
&problem_size,
reinterpret_cast<typename UnderlyingKernel::ElementA*>(A.data_ptr()),
reinterpret_cast<typename UnderlyingKernel::ElementB*>(B.data_ptr()),
ptrC,
reinterpret_cast<typename UnderlyingKernel::ElementC*>(D.data_ptr()),
alpha, beta,
split_k_mode, stream, B.device().index());
TORCH_CHECK(status == cutlass::Status::kSuccess, "CUTLASS kernel failed");
return D;
}
"""
_PYTORCH_CONV2D_FPROP_IMPL_TEMPLATE_2x = (
common._CUTLASS_KERNEL_RUN_CONV2D_2x
+ """
at::Tensor ${name}_kernel(const at::Tensor& A, const at::Tensor& B, at::optional<const at::Tensor> C=at::nullopt,
std::tuple<int, int> stride={1, 1}, std::tuple<int, int> padding={0, 0}, std::tuple<int, int> dilation={1, 1},
float alpha=1.f, float beta=0.f, std::string split_k_mode="serial", int split_k_slices=1) {
int N, H, W, C_, K, R, S, P, Q;
N = A.size(0);
C_ = A.size(1);
H = A.size(2);
W = A.size(3);
K = B.size(0);
R = B.size(2);
S = B.size(3);
cutlass::conv::Conv2dProblemSize problem_size(
cutlass::Tensor4DCoord(N, H, W, C_),
cutlass::Tensor4DCoord(K, R, S, C_),
cutlass::Tensor4DCoord(std::get<0>(padding), std::get<0>(padding), std::get<1>(padding), std::get<1>(padding)),
cutlass::MatrixCoord(std::get<0>(stride), std::get<1>(stride)),
cutlass::MatrixCoord(std::get<0>(dilation), std::get<1>(dilation)),
cutlass::conv::Mode::kCrossCorrelation,
split_k_slices
);
P = problem_size.P;
Q = problem_size.Q;
typename UnderlyingKernel::ElementC* ptrC = (C == at::nullopt) ?
nullptr :
reinterpret_cast<typename UnderlyingKernel::ElementC*>(C->data_ptr());
torch::TensorOptions options = torch::TensorOptions().dtype(${torch_type_C}).device(B.device()).memory_format(at::MemoryFormat::ChannelsLast);
at::Tensor D = torch::zeros({N, K, P, Q}, options);
""" + _PYTORCH_CONV2D_IMPL_TEMPLATE_2x
)
_PYTORCH_CONV2D_DGRAD_IMPL_TEMPLATE_2x = (
common._CUTLASS_KERNEL_RUN_CONV2D_2x
+ """
at::Tensor ${name}_kernel(std::tuple<int, int, int, int> input_size, const at::Tensor& A, const at::Tensor& B, at::optional<const at::Tensor> C=at::nullopt,
std::tuple<int, int> stride={1, 1}, std::tuple<int, int> padding={0, 0}, std::tuple<int, int> dilation={1, 1}, float alpha=1.f, float beta=0.f,
std::string split_k_mode="serial", int split_k_slices=1) {
int N, H, W, C_, K, R, S;
N = std::get<0>(input_size);
C_ = std::get<1>(input_size);
H = std::get<2>(input_size);
W = std::get<3>(input_size);
K = B.size(0);
R = B.size(2);
S = B.size(3);
cutlass::conv::Conv2dProblemSize problem_size(
cutlass::Tensor4DCoord(N, H, W, C_),
cutlass::Tensor4DCoord(K, R, S, C_),
cutlass::Tensor4DCoord(std::get<0>(padding), std::get<0>(padding), std::get<1>(padding), std::get<1>(padding)),
cutlass::MatrixCoord(std::get<0>(stride), std::get<1>(stride)),
cutlass::MatrixCoord(std::get<0>(dilation), std::get<1>(dilation)),
cutlass::conv::Mode::kCrossCorrelation,
split_k_slices
);
typename UnderlyingKernel::ElementC* ptrC = (C == at::nullopt) ?
nullptr :
reinterpret_cast<typename UnderlyingKernel::ElementC*>(C->data_ptr());
torch::TensorOptions options = torch::TensorOptions().dtype(${torch_type_C}).device(B.device()).memory_format(at::MemoryFormat::ChannelsLast);
at::Tensor D = torch::empty({N, C_, H, W}, options);
""" + _PYTORCH_CONV2D_IMPL_TEMPLATE_2x
)
_PYTORCH_CONV2D_WGRAD_IMPL_TEMPLATE_2x = (
common._CUTLASS_KERNEL_RUN_CONV2D_2x
+ """
at::Tensor ${name}_kernel(std::tuple<int, int, int, int> weight_size, const at::Tensor& A, const at::Tensor& B, at::optional<const at::Tensor> C=at::nullopt,
std::tuple<int, int> stride={1, 1}, std::tuple<int, int> padding={0, 0}, std::tuple<int, int> dilation={1, 1}, float alpha=1.f, float beta=0.f,
std::string split_k_mode="serial", int split_k_slices=1) {
int N, H, W, C_, K, R, S;
K = std::get<0>(weight_size);
C_ = std::get<1>(weight_size);
R = std::get<2>(weight_size);
S = std::get<3>(weight_size);
N = B.size(0);
H = B.size(2);
W = B.size(3);
cutlass::conv::Conv2dProblemSize problem_size(
cutlass::Tensor4DCoord(N, H, W, C_),
cutlass::Tensor4DCoord(K, R, S, C_),
cutlass::Tensor4DCoord(std::get<0>(padding), std::get<0>(padding), std::get<1>(padding), std::get<1>(padding)),
cutlass::MatrixCoord(std::get<0>(stride), std::get<1>(stride)),
cutlass::MatrixCoord(std::get<0>(dilation), std::get<1>(dilation)),
cutlass::conv::Mode::kCrossCorrelation,
split_k_slices
);
typename UnderlyingKernel::ElementC* ptrC = (C == at::nullopt) ?
nullptr :
reinterpret_cast<typename UnderlyingKernel::ElementC*>(C->data_ptr());
torch::TensorOptions options = torch::TensorOptions().dtype(${torch_type_C}).device(B.device()).memory_format(at::MemoryFormat::ChannelsLast);
at::Tensor D = torch::empty({K, C_, R, S}, options);
""" + _PYTORCH_CONV2D_IMPL_TEMPLATE_2x
)
_PYTORCH_SETUP_PY = common._PYSTYLE_AUTOGEN_COMMENT + """
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
setup(
name='${name}',
ext_modules=[
CUDAExtension('${name}', [
'${name}.cpp',
'${name}_kernel.cu',
],
include_dirs=['${cutlass_path}/include', '${cutlass_path}/tools/util/include'],
extra_compile_args={
'cxx': ['-std=c++17'],
'nvcc': ['-std=c++17', ${extra_compile_args}],
},
libraries=['cuda']
),
],
cmdclass={
'build_ext': BuildExtension
})
"""
def _generate_setup(name: str, sourcedir: str, extra_compile_args: str=""):
"""
Generates a setup.py file for the extension
:param name: name of the module to generate
:type name: str
:param sourcedir: directory to which generated source files should be written
:type sourcedir: str
:param extra_compile_args: additional arguments to pass to setup.py
:type extra_args: str
"""
setup_py_file = os.path.join(sourcedir, "setup.py")
setup_source = SubstituteTemplate(
_PYTORCH_SETUP_PY, {"name": name, "cutlass_path": CUTLASS_PATH, "extra_compile_args": extra_compile_args}
)
with open(setup_py_file, "w") as outfile:
outfile.write(setup_source)
class _ArchListSetter:
"""
Utility context manager for temporarily setting the value of the ``TORCH_CUDA_ARCH_LIST``
environment variable when building a PyTorch CUDA module.
``TORCH_CUDA_ARCH_LIST`` is a space-delmited list of compute capabilites for which a PyTorch
CUDA module should be compiled.
For example, ``TORCH_CUDA_ARCH_LIST="7.0 8.0"`` would result in the inclusion of
``-gencode=arch=compute_70,code=sm_70`` and ``-gencode=arch=compute_80,code=sm_80`` in the
compilation of the module.
This utility wraps the building of a PyTorch CUDA module with a setting of this environment
variable according to the current compute capability being targetted.
Example usage:
.. highlight:: python
.. code-block:: python
# Temporarily set TORCH_CUDA_ARCH_LIST="8.0"
with _ArchListSetter(80):
# Perform JIT compilation and loading of the module
mod = torch.utils.cpp_extension.load(...)
:param cc: compute capability
:type cc: int
"""
_TORCH_CUDA_ARCH_LIST = "TORCH_CUDA_ARCH_LIST"
def __init__(self, cc: int):
self.cc_str = ".".join(list(str(cc)))
def __enter__(self):
"""
Saves the old value of TORCH_CUDA_ARCH_LIST and reset it to the new value based on ``cc``
"""
self.old_arch_list = os.getenv(_ArchListSetter._TORCH_CUDA_ARCH_LIST)
os.environ[_ArchListSetter._TORCH_CUDA_ARCH_LIST] = self.cc_str
return self
def __exit__(self, exc_type, exc_val, traceback):
"""
Restores the old value of TORCH_CUDA_ARCH_LIST
"""
if self.old_arch_list is None:
del os.environ[_ArchListSetter._TORCH_CUDA_ARCH_LIST]
else:
os.environ[_ArchListSetter._TORCH_CUDA_ARCH_LIST] = self.old_arch_list
def _jit(name: str, cc: int, cpp_file: str, cuda_file: str):
"""
JIT compiles and loads a PyTorch CUDA extension.
:param name: name of the module to generate
:type name: str
:param cc: compute capability of the device the module should target
:type cc: int
:param cpp_file: path to file containing extension's C++ interface
:type cpp_file: str
:param cuda_file: path to file containing extension's CUDA interface
:type cuda_file: str
:return: loaded PyTorch module
"""
from torch.utils.cpp_extension import load
extra_cuda_cflags = ["-std=c++17"]
if cc == 90:
# PyTorch does not currently add the sm_90a target when compute capability
# 9.0 is set within TORCH_CUDA_ARCH_LIST. Thus, we manually add the sm_90a target.
extra_cuda_cflags.append("-gencode=arch=compute_90a,code=sm_90a")
with _ArchListSetter(cc):
jitmodule = load(
name,
[cpp_file, cuda_file],
extra_cuda_cflags=extra_cuda_cflags,
extra_include_paths=[
os.path.join(CUTLASS_PATH, "include"),
os.path.join(CUTLASS_PATH, "tools/util/include"),
],
extra_ldflags=["-lcuda"],
verbose=(logger.level == logging.DEBUG)
)
return jitmodule
def _pytorch_gemm(op, name: str, cc: int, jit: bool = False, sourcedir: str = ""):
"""
Generates source for building a PyTorch CUDA module that leverages the CUTLASS GEMM
specified by ``op``. If the ``jit`` parameter is set to true, the module is just-in-time
compiled, loaded, and returned.
:param op: operation to emit in the module
:param name: name of the module to generate
:type name: str
:param cc: compute capability of the device the module should target
:type cc: int
:param jit: whether the module should be just-in-time compiled
:type jit: bool
:param sourcedir: directory to which generated source files should be written
:type sourcedir: str
:return: loaded PyTorch module if ``jit=True`` or ``None`` otherwise
"""
if sourcedir != "" and not os.path.isdir(sourcedir):
os.makedirs(sourcedir)
cuda_file = os.path.join(sourcedir, name + "_kernel.cu")
extra_kw = {}
if op.api == ApiVersion.v3x:
impl_template = _PYTORCH_GEMM_IMPL_TEMPLATE_3x
else:
impl_template = _PYTORCH_GEMM_IMPL_TEMPLATE_2x
if op.swizzling_functor == swizzle.ThreadblockSwizzleStreamK:
extra_kw["args"] = common._CUTLASS_KERNEL_ARGS_2x_STREAM_K
else:
extra_kw["args"] = common._CUTLASS_KERNEL_ARGS_2x
impl_template = (
_PYTORCH_GEMM_IMPL_TEMPLATE_3x
if op.api == ApiVersion.v3x
else _PYTORCH_GEMM_IMPL_TEMPLATE_2x
)
cuda_impl = SubstituteTemplate(impl_template, {"name": name, **extra_kw})
cuda_source = SubstituteTemplate(
_PYTORCH_CUDA_TEMPLATE,
{
"includes": _PYTORCH_GEMM_INCLUDES[op.api],
"declaration": op.rt_module.emit(),
"procedural_name": op.procedural_name(),
"impl": cuda_impl,
"torch_type_C": _CUTLASS_TYPE_TO_TORCH_TYPE[op.C.element],
},
)
with open(cuda_file, "w") as outfile:
outfile.write(cuda_source)
cpp_file = os.path.join(sourcedir, name + ".cpp")
cpp_source = SubstituteTemplate(
_PYTORCH_GEMM_CPP_TEMPLATE,
{"name": name, "description": f"CUTLASS {op.procedural_name()} GEMM"},
)
with open(cpp_file, "w") as outfile:
outfile.write(cpp_source)
extra_compile_args = ""
if cc == 90:
extra_compile_args = "'--generate-code=arch=compute_90a,code=[sm_90a]'"
_generate_setup(name, sourcedir, extra_compile_args)
if jit:
return _jit(name, cc, cpp_file, cuda_file)
return None
def _pytorch_grouped_gemm(
op, name: str, cc: int, jit: bool = False, sourcedir: str = ""
):
"""
Generates source for building a PyTorch CUDA module that leverages the CUTLASS grouped GEMM
specified by ``op``. If the ``jit`` parameter is set to true, the module is just-in-time
compiled, loaded, and returned.
:param op: operation to emit in the module
:param name: name of the module to generate
:type name: str
:param cc: compute capability of the device the module should target
:type cc: int
:param jit: whether the module should be just-in-time compiled
:type jit: bool
:param sourcedir: directory to which generated source files should be written
:type sourcedir: str
:return: loaded PyTorch module if ``jit=True`` or ``None`` otherwise
"""
if op.api != ApiVersion.v2x:
raise Exception("Grouped GEMM is currently only supported for CUTLASS 2.x")
if sourcedir != "" and not os.path.isdir(sourcedir):
os.makedirs(sourcedir)
cuda_file = os.path.join(sourcedir, name + "_kernel.cu")
cuda_impl = SubstituteTemplate(_PYTORCH_GROUPED_GEMM_IMPL_TEMPLATE, {"name": name})
cuda_source = SubstituteTemplate(
_PYTORCH_CUDA_TEMPLATE,
{
"includes": _PYTORCH_GROUPED_GEMM_INCLUDES,
"declaration": op.rt_module.emit(),
"procedural_name": op.procedural_name(),
"impl": cuda_impl,
"torch_type_C": _CUTLASS_TYPE_TO_TORCH_TYPE[op.C.element],
},
)
with open(cuda_file, "w") as outfile:
outfile.write(cuda_source)
cpp_file = os.path.join(sourcedir, name + ".cpp")
cpp_source = SubstituteTemplate(
_PYTORCH_GROUPED_GEMM_CPP_TEMPLATE,
{"name": name, "description": f"CUTLASS {op.procedural_name()} grouped GEMM"},
)
with open(cpp_file, "w") as outfile:
outfile.write(cpp_source)
_generate_setup(name, sourcedir)
if jit:
return _jit(name, cc, cpp_file, cuda_file)
return None
def _pytorch_conv2d(op, name: str, cc: int, jit: bool = False, sourcedir: str = ""):
"""
Generates source for building a PyTorch CUDA module that leverages the CUTLASS Conv2d
specified by ``op``. If the ``jit`` parameter is set to true, the module is just-in-time
compiled, loaded, and returned.
:param op: operation to emit in the module
:param name: name of the module to generate
:type name: str
:param cc: compute capability of the device the module should target
:type cc: int
:param jit: whether the module should be just-in-time compiled
:type jit: bool
:param sourcedir: directory to which generated source files should be written
:type sourcedir: str
Note that the when conv kind is `dgrad` or `wgrad`, the size of the input `(N, C, H, W)` or
weight `(K, C, R, S)` should be provided. This is because there are multiple valid solutions
for H/W/R/S given the same P/Q.
:return: loaded PyTorch module if ``jit=True`` or ``None`` otherwise
"""
if sourcedir != "" and not os.path.isdir(sourcedir):
os.makedirs(sourcedir)
cuda_file = os.path.join(sourcedir, name + "_kernel.cu")
extra_kw = {}
if op.conv_kind == ConvKind.Fprop:
impl_template = _PYTORCH_CONV2D_FPROP_IMPL_TEMPLATE_2x
cpp_template = _PYTORCH_CONV2D_FPROP_CPP_TEMPLATE
elif op.conv_kind == ConvKind.Dgrad:
impl_template = _PYTORCH_CONV2D_DGRAD_IMPL_TEMPLATE_2x
cpp_template = _PYTORCH_CONV2D_GRAD_CPP_TEMPLATE
elif op.conv_kind == ConvKind.Wgrad:
impl_template = _PYTORCH_CONV2D_WGRAD_IMPL_TEMPLATE_2x
cpp_template = _PYTORCH_CONV2D_GRAD_CPP_TEMPLATE
extra_kw["conv_kind_name"] = ConvKindNames[op.conv_kind].capitalize()
extra_kw["torch_type_C"] = _CUTLASS_TYPE_TO_TORCH_TYPE[op.C.element]
cuda_impl = SubstituteTemplate(impl_template, {"name": name, **extra_kw})
cuda_source = SubstituteTemplate(
_PYTORCH_CUDA_TEMPLATE,
{
"includes": _PYTORCH_CONV2D_INCLUDES,
"declaration": op.rt_module.emit(),
"procedural_name": op.procedural_name(),
"impl": cuda_impl,
"torch_type_C": _CUTLASS_TYPE_TO_TORCH_TYPE[op.C.element],
},
)
with open(cuda_file, "w") as outfile:
outfile.write(cuda_source)
cpp_file = os.path.join(sourcedir, name + ".cpp")
cpp_source = SubstituteTemplate(
cpp_template,
{"name": name, "description": f"CUTLASS {op.procedural_name()} Conv2d"},
)
with open(cpp_file, "w") as outfile:
outfile.write(cpp_source)
_generate_setup(name, sourcedir)
if jit:
return _jit(name, cc, cpp_file, cuda_file)
return None
def pytorch(op, name: str, cc: int, jit: bool = False, sourcedir: str = ""):
"""
Generates source for building a PyTorch CUDA module that leverages the CUTLASS kernel
specified by ``op``. If the ``jit`` parameter is set to true, the module is just-in-time
compiled, loaded, and returned.
The result of this method is files within ``sourcedir`` that can be used for building
a PyTorch module.
:param op: operation to emit in the module
:param name: name of the module to generate
:type name: str
:param cc: compute capability of the device the module should target
:type cc: int
:param jit: whether the module should be just-in-time compiled
:type jit: bool
:param sourcedir: directory to which generated source files should be written
:type sourcedir: str
:return: loaded PyTorch module (if ``jit=True``) or None
"""
device_op = op.device_op()
if isinstance(op, GemmOperationUniversal):
return _pytorch_gemm(device_op, name, cc, jit, sourcedir)
elif isinstance(op, GemmOperationGrouped):
return _pytorch_grouped_gemm(device_op, name, cc, jit, sourcedir)
elif isinstance(op, Conv2dOperation):
return _pytorch_conv2d(device_op, name, cc, jit, sourcedir)
else:
raise Exception(
f"Operation type {type(op)} is not currently supported for PyTorch emission."
)
| python/cutlass/emit/pytorch.py/0 | {
"file_path": "python/cutlass/emit/pytorch.py",
"repo_id": "python",
"token_count": 16302
} | 48 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import os
import sys
from . import conv2d_operation
from . import conv3d_operation
from . import gemm_operation
if '-m' not in sys.argv:
# Do not import generator when running python -m cutlass_library.generator to
# avoid double-import warnings
from . import generator
from . import library
from . import manifest
from . import rank_2k_operation
from . import rank_k_operation
from . import symm_operation
from . import trmm_operation
# Make enum types from library.py accessible via cutlass_library.*
from .library import *
# Set up `source` to point to the path containing the CUTLASS source.
# Check first if the path cotains a `source` subdirectory -- this will
# be the case when the package has been installed via pip. Otherwise,
# default to the root of CUTLASS.
install_source_path = os.path.join(__path__[0], 'source')
if os.path.isdir(install_source_path):
source_path = install_source_path
else:
source_path = os.path.join(__path__[0], '../..')
| python/cutlass_library/__init__.py/0 | {
"file_path": "python/cutlass_library/__init__.py",
"repo_id": "python",
"token_count": 757
} | 49 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Methods for layout swizzling
"""
from .layout import *
def shiftr(a, s):
return a >> s if s > 0 else shiftl(a, -s)
def shiftl(a, s):
return a << s if s > 0 else shiftr(a, -s)
## A generic Swizzle functor
# 0bxxxxxxxxxxxxxxxYYYxxxxxxxZZZxxxx
# ^--^ Base is the number of least-sig bits to keep constant
# ^-^ ^-^ Bits is the number of bits in the mask
# ^---------^ Shift is the distance to shift the YYY mask
# (pos shifts YYY to the right, neg shifts YYY to the left)
#
# e.g. Given
# 0bxxxxxxxxxxxxxxxxYYxxxxxxxxxZZxxx
# the result is
# 0bxxxxxxxxxxxxxxxxYYxxxxxxxxxAAxxx where AA = ZZ xor YY
#
class Swizzle:
def __init__(self, bits, base, shift):
assert bits >= 0
assert base >= 0
assert abs(shift) >= bits
self.bits = bits
self.base = base
self.shift = shift
bit_msk = (1 << bits) - 1
self.yyy_msk = bit_msk << (base + max(0,shift))
self.zzz_msk = bit_msk << (base - min(0,shift))
# operator () (transform integer)
def __call__(self, offset):
return offset ^ shiftr(offset & self.yyy_msk, self.shift)
# Size of the domain
def size(self):
return 1 << (bits + base + abs(shift))
# Size of the codomain
def cosize(self):
return self.size()
# print and str
def __str__(self):
return f"SW_{self.bits}_{self.base}_{self.shift}"
# error msgs and representation
def __repr__(self):
return f"Swizzle({self.bits},{self.base},{self.shift})"
class ComposedLayout(LayoutBase):
def __init__(self, layoutB, offset, layoutA):
self.layoutB = layoutB
self.offset = offset
self.layoutA = layoutA
# operator ==
def __eq__(self, other):
return self.layoutB == other.layoutB and self.offset == other.offset and self.layoutA == other.layoutA
# operator len(L) (len [rank] like tuples)
def __len__(self):
return len(self.layoutA)
# operator () (map coord to idx)
def __call__(self, *args):
return self.layoutB(self.offset + self.layoutA(*args))
# operator [] (get-i like tuples)
def __getitem__(self, i):
return ComposedLayout(self.layoutB, self.offset, self.layoutA[i])
# size(layout) Size of the domain
def size(self):
return size(self.layoutA)
# cosize(layout) Size of the codomain
def cosize(self):
return cosize(self.layoutB)
# print and str
def __str__(self):
return f"{self.layoutB} o {self.offset} o {self.layoutA}"
# error msgs and representation
def __repr__(self):
return f"ComposedLayout({repr(self.layoutB)},{repr(self.offset)},{repr(self.layoutA)})"
| python/pycute/swizzle.py/0 | {
"file_path": "python/pycute/swizzle.py",
"repo_id": "python",
"token_count": 1533
} | 50 |
################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
Unit test for store nodes in SM90
"""
import logging
import unittest
import cutlass
from cutlass.backend import *
from cutlass.epilogue import *
from utils.evt_testbed import EVTTestBed, EVTTestCaseBase
cutlass.set_log_level(logging.WARNING)
@unittest.skipIf(device_cc() not in [80, 86, 89, 90], "This unittest is only supported on CC [80, 86, 89, 90]")
class TestEVTStore(EVTTestCaseBase):
def test_aux_store(self):
"""
Returning a tensor with shape [m, n]
"""
def evt_aux_store(accum, alpha, C):
F = alpha * accum
D = F + C
return D, F
for m, n, k, l in self.get_problem_sizes(8):
example_inputs = {
"accum": self.fake_tensor(self.element, (l, m, n)),
"alpha": 0.5,
"C": self.fake_tensor(self.element, (l, m, n)),
"F": self.fake_tensor(self.element, (l, m, n)),
"D": self.fake_tensor(self.element, (l, m, n)),
}
launcher = EVTTestBed(self.element, evt_aux_store, example_inputs)
input_keys = ["C", "alpha"]
result_keys = ["D", "F"]
launcher.verify((m, n, k), input_keys, result_keys, l)
def test_col_reduce(self):
"""
Reduction [m, n] -> [m, 1]
"""
def evt_row_reduce(accum, alpha, C):
acc_row_max = max(accum, dim=[2,])
F = alpha * accum
F_row_max = max(F, dim=[0, 2])
D = F + C
return D, F_row_max, acc_row_max
for m, n, k, l in self.get_problem_sizes(8):
example_inputs = {
"accum": self.fake_tensor(self.element, (l, m, n)),
"alpha": 2.0,
"C": self.fake_tensor(self.element, (l, m, n)),
"F_row_max": self.fake_tensor(np.float32, (m, 1)),
"acc_row_max": self.fake_tensor(np.float32, (l, m, 1)),
"D": self.fake_tensor(self.element, (l, m, n)),
}
launcher = EVTTestBed(self.element, evt_row_reduce, example_inputs)
input_keys = ["C", "alpha"]
result_keys = ["D", "F_row_max", "acc_row_max"]
launcher.verify((m, n, k), input_keys, result_keys, l)
def test_row_reduce(self):
"""
Reduction [m, n] -> [n]
"""
def evt_col_reduce(accum, alpha, C):
acc_col_max = max(accum, dim=[1,])
F = alpha * accum
F_col_max = max(F, dim=[0, 1])
D = F + C
return D, F_col_max, acc_col_max
for m, n, k, l in self.get_problem_sizes(8):
example_inputs = {
"accum": self.fake_tensor(self.element, (l, m, n)),
"alpha": 2.0,
"C": self.fake_tensor(self.element, (l, m, n)),
"F_col_max": self.fake_tensor(np.float32, (n,)),
"acc_col_max": self.fake_tensor(np.float32, (l, 1, n)),
"D": self.fake_tensor(self.element, (l, m, n)),
}
launcher = EVTTestBed(self.element, evt_col_reduce, example_inputs)
input_keys = ["C", "alpha"]
result_keys = ["D", "F_col_max", "acc_col_max"]
launcher.verify((m, n, k), input_keys, result_keys, l)
def test_scalar_reduce(self):
"""
Reduction [m, n] -> [1,]
"""
def evt_scalar_reduce(accum, alpha, C):
acc_max = max(accum, dim=[1, 2])
F = alpha * accum
F_max = max(F, dim=[0, 1, 2])
D = F + C
return D, F_max, acc_max
for m, n, k, l in self.get_problem_sizes(8):
example_inputs = {
"accum": self.fake_tensor(self.element, (l, m, n)),
"alpha": 2.0,
"C": self.fake_tensor(self.element, (l, m, n)),
"acc_max": self.fake_tensor(np.float32, (l, 1, 1)),
"F_max": self.fake_tensor(np.float32, (1,)),
"D": self.fake_tensor(self.element, (l, m, n)),
}
launcher = EVTTestBed(self.element, evt_scalar_reduce, example_inputs)
input_keys = ["C", "alpha"]
result_keys = ["D", "F_max", "acc_max"]
launcher.verify((m, n, k), input_keys, result_keys, l)
if __name__ == '__main__':
unittest.main()
| test/python/cutlass/evt/evt_store_sm80_90.py/0 | {
"file_path": "test/python/cutlass/evt/evt_store_sm80_90.py",
"repo_id": "test",
"token_count": 2849
} | 51 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#pragma warning (disable : 4068 ) /* disable unknown pragma warnings for visual studio */
#pragma nv_diag_suppress boolean_controlling_expr_is_constant
#include <gtest/gtest.h>
#pragma nv_diag_warning boolean_controlling_expr_is_constant
#pragma warning( disable : 4503)
#include <cstdlib>
#include <string>
#include <cuda_runtime_api.h>
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Gets a CUDA device
cudaDeviceProp GetCudaDevice();
/// Prints device properties
std::ostream &operator<<(std::ostream &out, cudaDeviceProp const &device);
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Sets flags for Unit test
void FilterArchitecture();
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Reads environment variable `CUTLASS_UNIT_TEST_PROBLEM_COUNT` to control the number and order
// of problem sizes run by CUTLASS unit tests
int CutlassUnitTestProblemCount();
/////////////////////////////////////////////////////////////////////////////////////////////////
// active test macro
#define CUTLASS_TEST_LEVEL_ACTIVE(LEVEL,NAME_STATIC,NAME_DYNAMIC,...) \
TEST(NAME_STATIC,L##LEVEL##_##NAME_DYNAMIC) __VA_ARGS__
// disabled test macro
#define CUTLASS_TEST_LEVEL_DISABLED(LEVEL,NAME_STATIC,NAME_DYNAMIC,...) \
TEST(NAME_STATIC,DISABLED_L##LEVEL##_##NAME_DYNAMIC) {}
#if CUTLASS_TEST_LEVEL == 0
#define CUTLASS_TEST_L0(NAME_STATIC,NAME_DYNAMIC,...) CUTLASS_TEST_LEVEL_ACTIVE(0,NAME_STATIC,NAME_DYNAMIC,__VA_ARGS__)
#define CUTLASS_TEST_L1(NAME_STATIC,NAME_DYNAMIC,...) CUTLASS_TEST_LEVEL_DISABLED(1,NAME_STATIC,NAME_DYNAMIC,__VA_ARGS__)
#define CUTLASS_TEST_L2(NAME_STATIC,NAME_DYNAMIC,...) CUTLASS_TEST_LEVEL_DISABLED(2,NAME_STATIC,NAME_DYNAMIC,__VA_ARGS__)
#elif CUTLASS_TEST_LEVEL == 1
#define CUTLASS_TEST_L0(NAME_STATIC,NAME_DYNAMIC,...) CUTLASS_TEST_LEVEL_ACTIVE(0,NAME_STATIC,NAME_DYNAMIC,__VA_ARGS__)
#define CUTLASS_TEST_L1(NAME_STATIC,NAME_DYNAMIC,...) CUTLASS_TEST_LEVEL_ACTIVE(1,NAME_STATIC,NAME_DYNAMIC,__VA_ARGS__)
#define CUTLASS_TEST_L2(NAME_STATIC,NAME_DYNAMIC,...) CUTLASS_TEST_LEVEL_DISABLED(2,NAME_STATIC,NAME_DYNAMIC,__VA_ARGS__)
#else
#define CUTLASS_TEST_L0(NAME_STATIC,NAME_DYNAMIC,...) CUTLASS_TEST_LEVEL_ACTIVE(0,NAME_STATIC,NAME_DYNAMIC,__VA_ARGS__)
#define CUTLASS_TEST_L1(NAME_STATIC,NAME_DYNAMIC,...) CUTLASS_TEST_LEVEL_ACTIVE(1,NAME_STATIC,NAME_DYNAMIC,__VA_ARGS__)
#define CUTLASS_TEST_L2(NAME_STATIC,NAME_DYNAMIC,...) CUTLASS_TEST_LEVEL_ACTIVE(2,NAME_STATIC,NAME_DYNAMIC,__VA_ARGS__)
#endif
#if !defined(CUTLASS_TEST_UNIT_ENABLE_WARNINGS)
#define CUTLASS_TEST_UNIT_ENABLE_WARNINGS false
#endif
#if (__CUDACC_VER_MAJOR__ >= 12)
#define CUDA_12_0_SM90_FEATURES_SUPPORTED true
#else
#define CUDA_12_0_SM90_FEATURES_SUPPORTED false
#endif
#include <cutlass/cutlass.h>
#include <cutlass/numeric_types.h>
#include <cutlass/trace.h>
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/common/cutlass_unit_test.h/0 | {
"file_path": "test/unit/common/cutlass_unit_test.h",
"repo_id": "test",
"token_count": 1640
} | 52 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Statically sized array of elements that accommodates all CUTLASS-supported numeric types
and is safe to use in a union.
*/
#include "../common/cutlass_unit_test.h"
#include "cutlass/array.h"
#include "cutlass/util/device_memory.h"
#pragma warning( disable : 4800)
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace core {
/// Each thread clears its array and writes to global memory. No PRMT instructions should
/// be generated if Array<T, N> is a multiple of 32 bits.
template <typename T, int N>
__global__ void test_array_clear(cutlass::Array<T, N> *ptr) {
cutlass::Array<T, N> storage;
storage.clear();
ptr[threadIdx.x] = storage;
}
/// Each thread writes its thread index into the elements of its array and then writes the result
/// to global memory.
template <typename T, int N>
__global__ void test_array_threadid(cutlass::Array<T, N> *ptr) {
cutlass::Array<T, N> storage;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
storage.at(i) = T(int(threadIdx.x));
}
ptr[threadIdx.x] = storage;
}
/// Each thread writes its thread index into the elements of its array and then writes the result
/// to global memory.
template <typename T, int N>
__global__ void test_array_sequence(cutlass::Array<T, N> *ptr) {
cutlass::Array<T, N> storage;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
storage.at(i) = T(i);
}
ptr[threadIdx.x] = storage;
}
} // namespace core
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T, int N>
class TestArray {
public:
//
// Data members
//
/// Number of threads
int const kThreads = 32;
typedef cutlass::Array<T, N> ArrayTy;
//
// Methods
//
/// Ctor
TestArray() {
}
/// Runs the test
void run() {
/// Device memory containing output
cutlass::device_memory::allocation< ArrayTy > output(static_cast<size_t>(kThreads));
std::vector< ArrayTy > output_host(static_cast<size_t>(kThreads));
dim3 grid(1,1);
dim3 block(kThreads, 1, 1);
test::core::test_array_clear<<< grid, block >>>(output.get());
cudaError_t result = cudaDeviceSynchronize();
ASSERT_EQ(result, cudaSuccess) << "CUDA error: " << cudaGetErrorString(result);
//
// Verify contains all zeros
//
cutlass::device_memory::copy_to_host(output_host.data(), output.get(), kThreads);
result = cudaGetLastError();
ASSERT_EQ(result, cudaSuccess) << "CUDA error: " << cudaGetErrorString(result);
char const *ptr_host = reinterpret_cast<char const *>(output_host.data());
for (size_t i = 0; i < sizeof(ArrayTy) * kThreads; ++i) {
EXPECT_FALSE(ptr_host[i]);
}
//
// Verify each element contains the low bits of the thread Id
//
test::core::test_array_threadid<<< grid, block >>>(output.get());
result = cudaDeviceSynchronize();
ASSERT_EQ(result, cudaSuccess) << "CUDA error: " << cudaGetErrorString(result);
cutlass::device_memory::copy_to_host(output_host.data(), output.get(), kThreads);
result = cudaGetLastError();
ASSERT_EQ(result, cudaSuccess) << "CUDA error: " << cudaGetErrorString(result);
for (int i = 0; i < kThreads; ++i) {
T tid = T(i);
ArrayTy thread = output_host.at(i);
// Element-wise access
for (int j = 0; j < N; ++j) {
EXPECT_TRUE(tid == thread[j]);
}
// Iterator access
for (auto it = thread.begin(); it != thread.end(); ++it) {
EXPECT_TRUE(tid == *it);
}
// Range-based for
for (auto const & x : thread) {
EXPECT_TRUE(tid == x);
}
}
//
// Verify each element
//
test::core::test_array_sequence<<< grid, block >>>(output.get());
result = cudaDeviceSynchronize();
ASSERT_EQ(result, cudaSuccess) << "CUDA error: " << cudaGetErrorString(result);
cutlass::device_memory::copy_to_host(output_host.data(), output.get(), kThreads);
result = cudaGetLastError();
ASSERT_EQ(result, cudaSuccess) << "CUDA error: " << cudaGetErrorString(result);
for (int i = 0; i < kThreads; ++i) {
ArrayTy thread = output_host.at(i);
// Element-wise access
for (int j = 0; j < N; ++j) {
T got = T(j);
EXPECT_TRUE(got == thread[j]);
}
// Iterator access
int j = 0;
for (auto it = thread.begin(); it != thread.end(); ++it, ++j) {
T got = T(j);
EXPECT_TRUE(got == *it);
}
// Range-based for
j = 0;
for (auto const & x : thread) {
T got = T(j);
EXPECT_TRUE(got == x);
++j;
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Array, Int8x16) {
TestArray<int8_t, 16>().run();
}
TEST(Array, Int32x4) {
TestArray<int, 4>().run();
}
#if __CUDA_ARCH__ >= 520
TEST(Array, Float16x8) {
TestArray<cutlass::half_t, 8>().run();
}
#endif
TEST(Array, FloatBF16x8) {
TestArray<cutlass::bfloat16_t, 8>().run();
}
TEST(Array, FloatTF32x4) {
TestArray<cutlass::tfloat32_t, 4>().run();
}
TEST(Array, Float32x4) {
TestArray<float, 4>().run();
}
TEST(Array, Int4x32) {
TestArray<cutlass::int4b_t, 32>().run();
}
TEST(Array, Uint4x32) {
TestArray<cutlass::uint4b_t, 32>().run();
}
TEST(Array, Bin1x128) {
TestArray<cutlass::bin1_t, 128>().run();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/core/array.cu/0 | {
"file_path": "test/unit/core/array.cu",
"repo_id": "test",
"token_count": 2606
} | 53 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for basic uint128 functionality
*/
#include "../common/cutlass_unit_test.h"
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/numeric_types.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/util/device_memory.h"
#include "cutlass/util/host_tensor.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Host
//
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(uint128_t, host_arithmetic) {
using T = cutlass::uint128_t;
// only low 64bit
for (uint64_t i = 0; i < 1024; ++i) {
for (uint64_t j = 0; j < 1024; ++j) {
T x = i;
T y = j;
EXPECT_TRUE(static_cast<uint64_t>(x + y) == (i + j));
}
}
// carry overflow for low uint64_t
{
for (uint64_t i = 0; i < 1024; ++i) {
T x = static_cast<uint64_t>(0xFFFFFFFFFFFFFFFF);
T y = i + 1;
T z = x + y;
EXPECT_EQ(z.hilo_.hi, static_cast<uint64_t>(0x1));
EXPECT_EQ(z.hilo_.lo, i);
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Device
//
/////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void uint128_add_operator(cutlass::uint128_t *output, cutlass::uint128_t const *input, cutlass::uint128_t base, int N) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < N) {
output[tid] = input[tid] + base;
}
}
TEST(uint128_t, device_arithmetic) {
using T = cutlass::uint128_t;
int const N = 1024;
cutlass::HostTensor<T, cutlass::layout::RowMajor> input({N, 1});
cutlass::HostTensor<T, cutlass::layout::RowMajor> sum({N, 1});
for (int i = 0; i < N; ++i) {
input.at({i, 0}) = static_cast<uint64_t>(i + 1);
}
T b = static_cast<uint64_t>(0xFFFFFFFFFFFFFFFF);
input.sync_device();
uint128_add_operator<<< dim3(1,1), dim3(N, 1) >>>(sum.device_data(), input.device_data(), b, N);
ASSERT_EQ(cudaGetLastError(), cudaSuccess) << "Kernel launch error.";
sum.sync_host();
for (int i = 0; i < N; ++i) {
T got = sum.at({i, 0});
uint64_t expected_hi = static_cast<uint64_t>(0x1);
uint64_t expected_lo = static_cast<uint64_t>(i);
EXPECT_EQ(got.hilo_.hi, expected_hi);
EXPECT_EQ(got.hilo_.lo, expected_lo);
}
}
| test/unit/core/uint128.cu/0 | {
"file_path": "test/unit/core/uint128.cu",
"repo_id": "test",
"token_count": 1371
} | 54 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cutlass_unit_test.h"
#include <cutlass/trace.h>
#include <iostream>
#include <cute/tensor.hpp>
using namespace cute;
template <class LayoutA, class LayoutB>
void
test_composition(LayoutA const& layoutA,
LayoutB const& layoutB)
{
auto layoutR = composition(layoutA, layoutB);
CUTLASS_TRACE_HOST("test_composition()");
CUTLASS_TRACE_HOST(layoutA << " o " << layoutB);
CUTLASS_TRACE_HOST(" => ");
CUTLASS_TRACE_HOST(layoutR);
// Test that layout B is compatible with layout R
EXPECT_TRUE(compatible(layoutB, layoutR));
// Test that R(c) = A(B(c)) for all coordinates c in layoutB
for (int c = 0; c < size(layoutB); ++c) {
EXPECT_EQ(layoutR(c), layoutA(layoutB(c)));
}
}
TEST(CuTe_core, Composition)
{
CUTLASS_TRACE_HOST("-------------------------------");
CUTLASS_TRACE_HOST("COMPOSITION" );
CUTLASS_TRACE_HOST("-------------------------------");
CUTLASS_TRACE_HOST("-------------------------------");
CUTLASS_TRACE_HOST("Simple tests" );
CUTLASS_TRACE_HOST("-------------------------------");
{
auto a = Layout<_1,_0>{};
auto b = Layout<_1,_0>{};
test_composition(a, b);
}
{
auto a = Layout<_1,_0>{};
auto b = Layout<_1,_1>{};
test_composition(a, b);
}
{
auto a = Layout<_1,_1>{};
auto b = Layout<_1,_0>{};
test_composition(a, b);
}
{
auto a = Layout<_1,_1>{};
auto b = Layout<_1,_1>{};
test_composition(a, b);
}
{
auto a = make_layout(Shape<_4>{});
auto b = make_layout(Shape<_4>{});
test_composition(a, b);
}
{
auto a = make_layout(Shape<_4>{}, Stride<_2>{});
auto b = make_layout(Shape<_4>{});
test_composition(a, b);
}
{
auto a = make_layout(Shape<_4>{}, Stride<_0>{});
auto b = make_layout(Shape<_4>{});
test_composition(a, b);
}
{
auto a = make_layout(Shape<_4>{});
auto b = make_layout(Shape<_4>{}, Stride<_0>{});
test_composition(a, b);
}
{
auto a = make_layout(Shape<_4>{});
auto b = make_layout(Shape<_1>{}, Stride<_0>{});
test_composition(a, b);
}
{
auto a = make_layout(Shape<_4>{});
auto b = make_layout(Shape<_2>{});
test_composition(a, b);
}
{
auto a = make_layout(Shape<_4>{}, Stride<_2>{});
auto b = make_layout(Shape<_2>{});
test_composition(a, b);
}
{
auto a = make_layout(Shape<_4>{});
auto b = make_layout(Shape<_2>{}, Stride<_2>{});
test_composition(a, b);
}
{
auto a = make_layout(Shape<_4>{}, Stride<_2>{});
auto b = make_layout(Shape<_2>{}, Stride<_2>{});
test_composition(a, b);
}
{
auto a = make_layout(Shape<_4,_3>{});
auto b = make_layout(Shape<_12>{});
test_composition(a, b);
}
{
auto a = make_layout(Shape<_12>{});
auto b = make_layout(Shape<_4,_3>{});
test_composition(a, b);
}
{
auto a = make_layout(Shape<_12>{}, Stride<_2>{});
auto b = make_layout(Shape<_4,_3>{});
test_composition(a, b);
}
{
auto a = make_layout(Shape<_12>{});
auto b = make_layout(Shape<_4,_3>{}, Stride<_3,_1>{});
test_composition(a, b);
}
{
auto a = make_layout(Shape<_12>{}, Stride<_2>{});
auto b = make_layout(Shape<_4,_3>{}, Stride<_3,_1>{});
test_composition(a, b);
}
{
auto a = make_layout(Shape<_12>{});
auto b = make_layout(Shape<_2,_3>{}, Stride<_2,_4>{});
test_composition(a, b);
}
{
auto a = make_layout(Shape<_4,_3>{});
auto b = make_layout(Shape<_4,_3>{});
test_composition(a, b);
}
{
auto a = make_layout(Shape<_4,_3>{});
auto b = make_layout(Shape<_6>{});
test_composition(a, b);
}
{
auto a = make_layout(Shape<_4,_3>{});
auto b = make_layout(Shape<_6>{}, Stride<_2>{});
test_composition(a, b);
}
{
auto a = make_layout(Shape<_4,_3>{});
auto b = make_layout(Shape<_6,_2>{}, Stride<_2,_1>{});
test_composition(a, b);
}
{
auto a = make_layout(Shape<_4,_3>{});
auto b = make_layout(Shape<_4,_3>{}, Stride<_3,_1>{});
test_composition(a, b);
}
{
auto a = make_layout(Shape<_4,_3>{}, Stride<_3,_1>{});
auto b = make_layout(Shape<_4,_3>{});
test_composition(a, b);
}
{
auto a = make_layout(Shape<_4,_3>{}, Stride<_3,_1>{});
auto b = make_layout(Shape<_12>{});
test_composition(a, b);
}
{
auto a = make_layout(Shape<_4,_3>{}, Stride<_3,_1>{});
auto b = make_layout(Shape<_6>{}, Stride<_2>{});
test_composition(a, b);
}
{
auto a = make_layout(Shape<_4,_3>{}, Stride<_3,_1>{});
auto b = make_layout(Shape<_6,_2>{}, Stride<_2,_1>{});
test_composition(a, b);
}
{
auto a = make_layout(Shape<_8,_8>{});
auto b = make_layout(Shape<Shape<_2, _2,_2>, Shape<_2,_2, _2>>{},
Stride<Stride<_1,_16,_4>, Stride<_8,_2,_32>>{});
test_composition(a, b);
}
{
auto a = make_layout(Shape<_8,_8>{}, Stride<_8,_1>{});
auto b = make_layout(Shape<Shape<_2, _2,_2>, Shape<_2,_2, _2>>{},
Stride<Stride<_1,_16,_4>, Stride<_8,_2,_32>>{});
test_composition(a, b);
}
{
auto a = make_layout(Shape<Shape<_4,_2>>{}, Stride<Stride<_1,_16>>{});
auto b = make_layout(Shape<_4,_2>{}, Stride<_2,_1>{});
test_composition(a, b);
}
{
auto a = make_layout(Shape<_2,_2>{}, Stride<_2,_1>{});
auto b = make_layout(Shape<_2,_2>{}, Stride<_2,_1>{});
test_composition(a, b);
}
{
auto a = make_layout(Shape<_4,_8,_2>{});
auto b = make_layout(Shape<_2,_2,_2>{}, Stride<_2,_8,_1>{});
test_composition(a, b);
}
{
auto a = make_layout(Shape<_4,_8,_2>{}, Stride<_2,_8,_1>{});
auto b = make_layout(Shape<_2,_2,_2>{}, Stride<_1,_8,_2>{});
test_composition(a, b);
}
{
auto a = make_layout(Shape<_4,_8,_2>{}, Stride<_2,_8,_1>{});
auto b = make_layout(Shape<_4,_2,_2>{}, Stride<_2,_8,_1>{});
test_composition(a, b);
}
CUTLASS_TRACE_HOST("-------------------------------");
CUTLASS_TRACE_HOST("Dynamic shapes/strides" );
CUTLASS_TRACE_HOST("-------------------------------");
{
auto a = make_layout(12, 1);
auto b = make_layout(_4{}, _1{});
test_composition(a, b);
}
{
auto a = make_layout(12, 1);
auto b = make_layout(_4{}, 1);
test_composition(a, b);
}
{
auto a = make_layout(12, _1{});
auto b = make_layout(_4{}, 1);
test_composition(a, b);
}
{
auto a = make_layout(12, _1{});
auto b = make_layout(_4{}, _1{});
test_composition(a, b);
}
{
auto a = make_layout(make_shape(12,3), make_stride(1,24));
auto b = make_layout(Shape<_4>{}, Stride<_1>{});
test_composition(a, b);
}
{
auto a = make_layout(16, 2);
auto b = make_layout(4, 2);
test_composition(a, b);
}
{
auto a = make_layout(make_shape(128,24,5), make_stride(1,128,3072));
auto b = make_layout(64, 2);
test_composition(a, b);
}
{
auto a = make_layout(make_shape(128,24,5), make_stride(1,128,3072));
auto b = make_layout(480, Int<32>{});
test_composition(a, b);
}
CUTLASS_TRACE_HOST("-------------------------------");
CUTLASS_TRACE_HOST("cosize(b) > size(a) and divisibility");
CUTLASS_TRACE_HOST("-------------------------------");
{
auto a = make_layout(Shape<_1>{}, Stride<_0>{});
auto b = make_layout(Shape<_4>{});
test_composition(a, b);
}
{
auto a = make_layout(Shape<_1>{}, Stride<_1>{});
auto b = make_layout(Shape<_4>{});
test_composition(a, b);
}
{
auto a = make_layout(Shape<_4>{});
auto b = make_layout(Shape<_4>{}, Stride<_2>{});
test_composition(a, b);
}
// Last mode gets extended
{
auto a = make_layout(Shape<_4,_3>{}, Stride<_3,_1>{});
auto b = make_layout(Shape<_24>{});
test_composition(a, b);
}
// Last mode extension even without last mode divisibility
{
auto a = make_layout(Shape<_4,_3>{}, Stride<_3,_1>{});
auto b = make_layout(Shape<_8>{});
test_composition(a, b);
}
// Capping a Layout with 1:0 forces divisibility and extends in stride-0
{
auto a = make_layout(Shape<_4,_3,_1>{}, Stride<_3,_1,_0>{});
auto b = make_layout(Shape<_24>{});
test_composition(a, b);
}
{
auto a = make_layout(3, _1{});
auto b = make_layout(_4{}, _1{});
test_composition(a, b);
}
{
auto a = make_layout(make_shape(48,24,5), make_stride(_1{},128,3072));
auto b = make_layout(32, Int<1>{});
test_composition(a, b);
}
CUTLASS_TRACE_HOST("-------------------------------");
CUTLASS_TRACE_HOST("Swizzle composition" );
CUTLASS_TRACE_HOST("-------------------------------");
{
auto a = Layout<Shape<_8,_8>, Stride<_8,_1>>{};
auto b = composition(Swizzle<2,0,-3>{}, Layout<Shape<_8,_8>, Stride<_8,_1>>{});
test_composition(a, b);
}
{
auto a = composition(Swizzle<2,0, 3>{}, Layout<Shape<_8,_8>, Stride<_8,_1>>{});
auto b = composition(Swizzle<2,0,-3>{}, Layout<Shape<_8,_8>, Stride<_8,_1>>{});
test_composition(a, b);
}
CUTLASS_TRACE_HOST("-------------------------------");
CUTLASS_TRACE_HOST("BETA: Negative strides" );
CUTLASS_TRACE_HOST("-------------------------------");
{
auto a = make_layout(Shape<_4>{}, Stride<_m1>{});
auto b = make_layout(Shape<_4>{}, Stride<_1>{});
test_composition(a, b);
}
{
auto a = make_layout(Shape<_4>{}, Stride<_1>{});
auto b = make_layout(Shape<_4>{}, Stride<_m1>{});
test_composition(a, b);
}
{
auto a = make_layout(Shape<_4>{}, Stride<_m1>{});
auto b = make_layout(Shape<_4>{}, Stride<_m1>{});
test_composition(a, b);
}
{
auto a = make_layout(Shape<_4>{}, Stride<_1>{});
auto b = make_layout(Shape<_4>{}, Stride<_m2>{});
test_composition(a, b);
}
{
auto a = make_layout(Shape<_4,_4>{}, Stride<_m1,_1>{});
auto b = make_layout(Shape<_2,_4,_2>{});
test_composition(a, b);
}
{
auto a = make_layout(Shape<_4,_4>{}, Stride<_m1,_1>{});
auto b = make_layout(Shape<_2,_4,_2>{}, Stride<_1,_4,_2>{});
test_composition(a, b);
}
// The SM80 fp64 MMA NT problem
{
auto a = make_layout(Shape<_1,Shape<_2,_4>>{}, Stride<_0,Stride<_m1,_512>>{});
auto b = make_layout(_2{}, _m1{});
test_composition(a, b);
}
{
auto a = make_layout(Shape<_1,Shape<_2,_4>>{}, Stride<_0,Stride<_m1,_512>>{});
auto b = make_layout(_4{}, _m1{});
test_composition(a, b);
}
CUTLASS_TRACE_HOST("-------------------------------");
CUTLASS_TRACE_HOST("BETA: Tuple strides" );
CUTLASS_TRACE_HOST("-------------------------------");
{
auto a = make_layout(Shape<_4,_4>{}, Stride<_4,_1>{});
auto b = make_layout(Shape<_4,_4>{}, Stride<E<1>,E<0>>{});
test_composition(a, b);
}
{
auto a = make_layout(Shape<_4,Shape<_2,_3>>{}, Stride<_6,Stride<_3,_1>>{});
auto b = make_layout(Shape<_2,_4>{}, Stride<E<1,1>,E<0>>{});
test_composition(a, b);
}
}
| test/unit/cute/core/composition.cpp/0 | {
"file_path": "test/unit/cute/core/composition.cpp",
"repo_id": "test",
"token_count": 5753
} | 55 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cutlass_unit_test.h"
#include <iostream>
#include <iomanip>
#include <utility>
#include <type_traits>
#include <vector>
#include <numeric>
#include <tuple>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <cute/tensor.hpp>
#include <cute/numeric/numeric_types.hpp>
using namespace cute;
namespace cooperative_copy_mode {
struct global_shared {};
struct global_global {};
struct shared_shared {};
}
// gs --> global to/from shared
template <int MaxVecBits, class GMemLayout, class SMemLayout, uint32_t ThreadBlockSize, class T>
__device__ void
cooperative_copy_default_gs(T const* g_in, T* g_out)
{
using namespace cute;
extern __shared__ float4 smem_buf[];
// Cast smem_buf to smem_uint8_ptr and move it by MaxVecBits bits
// This is to make sure tests pass on pointer aligned to MaxVecBits bits
uint8_t* smem_uint8_ptr = reinterpret_cast<uint8_t*>(smem_buf) + (MaxVecBits/8);
T* smem = reinterpret_cast<T*>(smem_uint8_ptr);
Tensor g_in_tensor = make_tensor(make_gmem_ptr(g_in), GMemLayout{});
Tensor g_out_tensor = make_tensor(make_gmem_ptr(g_out), GMemLayout{});
Tensor s_tensor = make_tensor(make_smem_ptr(smem), SMemLayout{});
cooperative_copy<ThreadBlockSize, MaxVecBits>(threadIdx.x, g_in_tensor, s_tensor);
__syncthreads();
if(thread0()) {
for(int i = 0; i < size(s_tensor); ++i) {
s_tensor(i) += T(i);
}
}
__syncthreads();
cooperative_copy<ThreadBlockSize, MaxVecBits>(threadIdx.x, s_tensor, g_out_tensor);
}
// ss --> shared to shared
template <int MaxVecBits, class Layout1, class Layout2, uint32_t ThreadBlockSize, class T>
__device__ void
cooperative_copy_default_ss(T const* g_in, T* g_out)
{
using namespace cute;
extern __shared__ float4 smem_buf[];
// Cast smem_buf to smem_uint8_ptr and move it by MaxVecBits bits
// This is to make sure tests pass on pointer aligned to MaxVecBits bits
T* smem1 = reinterpret_cast<T*>(smem_buf);
uint8_t* smem2_uint8_ptr = reinterpret_cast<uint8_t*>(smem_buf) + (MaxVecBits/8);
T* smem2 = reinterpret_cast<T*>(smem2_uint8_ptr) + cute::cosize(Layout2{});
Tensor g_in_tensor = make_tensor(make_gmem_ptr(g_in), Layout1 {});
Tensor g_out_tensor = make_tensor(make_gmem_ptr(g_out), Layout2 {});
Tensor s1_tensor = make_tensor(make_smem_ptr(smem1), Layout2 {});
Tensor s2_tensor = make_tensor(make_smem_ptr(smem2), Layout1 {});
cooperative_copy<ThreadBlockSize, cute::sizeof_bits_v<T>>(threadIdx.x, g_in_tensor, s1_tensor);
__syncthreads();
if(thread0()) {
for(int i = 0; i < size(s1_tensor); ++i) {
s1_tensor(i) += T(i);
}
}
__syncthreads();
cooperative_copy<ThreadBlockSize, MaxVecBits>(threadIdx.x, s1_tensor, s2_tensor);
__syncthreads();
cooperative_copy<ThreadBlockSize, cute::sizeof_bits_v<T>>(threadIdx.x, s2_tensor, g_out_tensor);
}
// gg --> global to global
template <int MaxVecBits, class Layout1, class Layout2, uint32_t ThreadBlockSize, class T>
__device__ void
cooperative_copy_default_gg(T const* g_in, T* g_out)
{
using namespace cute;
Tensor g_in_tensor = make_tensor(make_gmem_ptr(g_in), Layout1{});
Tensor g_out_tensor = make_tensor(make_gmem_ptr(g_out), Layout2{});
cooperative_copy<ThreadBlockSize, MaxVecBits>(threadIdx.x, g_in_tensor, g_out_tensor);
}
template <class Mode, int MaxVecBits, class Layout1, class Layout2, uint32_t ThreadBlockSize, class T>
__global__ void
cooperative_copy_default_kernel(T const* g_in, T* g_out)
{
if constexpr(std::is_same_v<Mode, cooperative_copy_mode::global_shared>) {
cooperative_copy_default_gs<MaxVecBits, Layout1, Layout2, ThreadBlockSize>(g_in, g_out);
} else if constexpr (std::is_same_v<Mode, cooperative_copy_mode::global_global>) {
cooperative_copy_default_gg<MaxVecBits, Layout1, Layout2, ThreadBlockSize>(g_in, g_out);
} else if constexpr (std::is_same_v<Mode, cooperative_copy_mode::shared_shared>) {
cooperative_copy_default_ss<MaxVecBits, Layout1, Layout2, ThreadBlockSize>(g_in, g_out);
}
}
// Mode - defines memory types of src and dst in cooperative_copy operation
// MaxVecBits - defines max vectorization in cooperative_copy operation, and enforces that
// alignment on used pointers to ensure correct testing
template <class Mode, int MaxVecBits, class Layout1, class Layout2, uint32_t ThreadBlockSize, class T>
void test_cooperative_copy_default()
{
using value_type = T;
static_assert(cute::size(Layout1{}) == cute::size(Layout2{}));
using gmem_layout_in = Layout1;
using gmem_layout_out = std::conditional_t<std::is_same_v<Mode, cooperative_copy_mode::global_shared>, Layout1, Layout2>;
#if 0
print(" "); print("layout1: "); print(Layout1{}); print("\n");
print(" "); print("layout2: "); print(Layout2{}); print("\n");
print(" "); print("threads: "); print(ThreadBlockSize); print("\n");
#endif
if constexpr (MaxVecBits < cute::sizeof_bits_v<value_type>) {
GTEST_SKIP() << "Skipping test since MaxVecBits (=" << MaxVecBits
<< ") < cute::sizeof_bits_v<value_type> (=" << cute::sizeof_bits_v<value_type> << ")";
} else {
constexpr auto max_vec_bytes = MaxVecBits / 8;
static_assert((max_vec_bytes % sizeof(T)) == 0);
constexpr uint32_t count = cute::cosize(gmem_layout_in {});
// Extra elements to force MaxVecBits alignment in global memory
constexpr uint32_t extra_elements = max_vec_bytes / sizeof(value_type);
// Allocate
thrust::host_vector<value_type> h_in(count + extra_elements);
thrust::host_vector<value_type> h_out(count + extra_elements);
// Initialize
Tensor h_in_tensor = make_tensor((h_in.data() + extra_elements), gmem_layout_in {});
Tensor h_out_tensor = make_tensor((h_out.data() + extra_elements), gmem_layout_out {});
for (int i = 0; i < cute::size(h_in_tensor); ++i) {
h_in_tensor(i) = value_type(float(i));
// For global-to-global copy need to compare against the same value
h_out_tensor(i) = std::is_same_v<Mode, cooperative_copy_mode::global_global> ? value_type(float(i)) : value_type(float(2 * i));
}
// To GPU
thrust::device_vector<value_type> d_in = h_in;
thrust::device_vector<value_type> d_out(d_in.size(), value_type(float(-2)));
// Adds (MaxVecBits/8) bytes to shared memory as we'll move pointer by that many bytes inside the kernel to enforce
// alignment to (MaxVecBits/8) bytes
size_t shared_memory_bytes = (sizeof(value_type) * count) + max_vec_bytes;
shared_memory_bytes += std::is_same_v<Mode, cooperative_copy_mode::shared_shared> * (sizeof(value_type) * count);
// Launch
auto coop_copy = cooperative_copy_default_kernel<Mode, MaxVecBits, Layout1, Layout2, ThreadBlockSize, value_type>;
ASSERT_EQ(cudaFuncSetAttribute(coop_copy, cudaFuncAttributeMaxDynamicSharedMemorySize, static_cast<int>(shared_memory_bytes)), cudaSuccess);
auto d_in_ptr = thrust::raw_pointer_cast(d_in.data() + extra_elements);
auto d_out_ptr = thrust::raw_pointer_cast(d_out.data() + extra_elements);
coop_copy<<<1, ThreadBlockSize, shared_memory_bytes>>>(d_in_ptr, d_out_ptr);
cudaError_t result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
cudaError_t error = cudaGetLastError();
FAIL() << "Error at kernel sync: " << cudaGetErrorString(error) << "\n";
}
// Validate
thrust::host_vector<value_type> h_result = d_out;
Tensor h_result_tensor = make_tensor((h_result.data() + extra_elements), gmem_layout_out {});
for (int i = 0; i < cute::size(h_in_tensor); ++i) {
ASSERT_EQ(h_result_tensor(i), h_out_tensor(i))
<< i << " - result:" << h_result_tensor(i) << " expected:" << h_out_tensor(i);
}
}
}
template<class T>
class SM70_CuTe_Volta;
template<class Mode, class MaxVecBits>
class SM70_CuTe_Volta<std::tuple<Mode, MaxVecBits>>: public testing::Test
{
public:
using mode = Mode;
static constexpr int max_vec_bits = MaxVecBits::value;
};
typedef testing::Types<
std::tuple<cooperative_copy_mode::global_shared, cute::Int<128>>,
std::tuple<cooperative_copy_mode::global_shared, cute::Int<64>>,
std::tuple<cooperative_copy_mode::global_shared, cute::Int<32>>,
std::tuple<cooperative_copy_mode::global_shared, cute::Int<16>>,
std::tuple<cooperative_copy_mode::global_global, cute::Int<128>>,
std::tuple<cooperative_copy_mode::global_global, cute::Int<64>>,
std::tuple<cooperative_copy_mode::global_global, cute::Int<32>>,
std::tuple<cooperative_copy_mode::global_global, cute::Int<16>>,
std::tuple<cooperative_copy_mode::shared_shared, cute::Int<128>>,
std::tuple<cooperative_copy_mode::shared_shared, cute::Int<64>>,
std::tuple<cooperative_copy_mode::shared_shared, cute::Int<32>>,
std::tuple<cooperative_copy_mode::shared_shared, cute::Int<16>>,
> CooperativeCopyModeMaxVecBitsList;
TYPED_TEST_SUITE(SM70_CuTe_Volta, CooperativeCopyModeMaxVecBitsList);
TYPED_TEST(SM70_CuTe_Volta, CooperativeCopyDefault1D)
{
using value_type = float;
constexpr uint32_t count = 512;
using gmem_layout_t = decltype(make_layout(make_shape(Int<count>{})));
using smem_layout_t = decltype(make_layout(make_shape(Int<count>{})));
constexpr uint32_t thread_block_size = 64;
test_cooperative_copy_default<typename TestFixture::mode,
TestFixture::max_vec_bits,
gmem_layout_t,
smem_layout_t,
thread_block_size,
value_type>();
}
TYPED_TEST(SM70_CuTe_Volta, CooperativeCopyDefault1DFallback)
{
using value_type = float;
constexpr uint32_t count = 99;
using gmem_layout_t = decltype(make_layout(make_shape(Int<count>{})));
using smem_layout_t = decltype(make_layout(make_shape(Int<count>{})));
constexpr uint32_t thread_block_size = 128;
test_cooperative_copy_default<typename TestFixture::mode,
TestFixture::max_vec_bits,
gmem_layout_t,
smem_layout_t,
thread_block_size,
value_type>();
}
TYPED_TEST(SM70_CuTe_Volta, CooperativeCopyDefaultGSSG2D)
{
using value_type = float;
constexpr uint32_t x = 32;
constexpr uint32_t y = 32;
using gmem_layout_t = decltype(make_layout(make_shape(Int<x>{}, Int<y>{})));
using smem_layout_t = decltype(make_layout(make_shape(Int<x>{}, Int<y>{})));
constexpr uint32_t thread_block_size = 64;
test_cooperative_copy_default<typename TestFixture::mode,
TestFixture::max_vec_bits,
gmem_layout_t,
smem_layout_t,
thread_block_size,
value_type>();
}
TYPED_TEST(SM70_CuTe_Volta, CooperativeCopyDefaultGSSG2DFallback)
{
using value_type = float;
constexpr uint32_t x = 37;
constexpr uint32_t y = 37;
using gmem_layout_t = decltype(make_layout(make_shape(Int<x>{}, Int<y>{})));
using smem_layout_t = decltype(make_layout(make_shape(Int<x>{}, Int<y>{})));
constexpr uint32_t thread_block_size = 64;
test_cooperative_copy_default<typename TestFixture::mode,
TestFixture::max_vec_bits,
gmem_layout_t,
smem_layout_t,
thread_block_size,
value_type>();
}
TYPED_TEST(SM70_CuTe_Volta, CooperativeCopyDefaultGSSG2DCustomStride)
{
using value_type = float;
constexpr uint32_t x = 16;
constexpr uint32_t y = 16;
using gmem_layout_t = decltype(make_layout(make_shape(Int<x>{}, Int<y>{}), make_stride(Int<y>{}, Int<1>{})));
using smem_layout_t = decltype(make_layout(make_shape(Int<x>{}, Int<y>{}), make_stride(Int<1>{}, Int<x>{})));
constexpr uint32_t thread_block_size = 64;
test_cooperative_copy_default<typename TestFixture::mode,
TestFixture::max_vec_bits,
gmem_layout_t,
smem_layout_t,
thread_block_size,
value_type>();
}
TYPED_TEST(SM70_CuTe_Volta, CooperativeCopyDefaultGSSG3D)
{
using value_type = cute::half_t;
constexpr uint32_t x = 8;
constexpr uint32_t y = 8;
constexpr uint32_t z = 16;
using gmem_layout_t = decltype(make_layout(make_shape(Int<x>{}, Int<y>{}, Int<z>{})));
using smem_layout_t = decltype(make_layout(make_shape(Int<x>{}, Int<y>{}, Int<z>{})));
constexpr uint32_t thread_block_size = 64;
test_cooperative_copy_default<typename TestFixture::mode,
TestFixture::max_vec_bits,
gmem_layout_t,
smem_layout_t,
thread_block_size,
value_type>();
}
TYPED_TEST(SM70_CuTe_Volta, CooperativeCopyDefaultGSSG3DFallback)
{
using value_type = cute::half_t;
constexpr uint32_t x = 44;
constexpr uint32_t y = 24;
constexpr uint32_t z = 14;
using gmem_layout_t = decltype(make_layout(make_shape(Int<x>{}, Int<y>{}, Int<z>{})));
using smem_layout_t = decltype(make_layout(make_shape(Int<x>{}, Int<y>{}, Int<z>{})));
constexpr uint32_t thread_block_size = 128;
test_cooperative_copy_default<typename TestFixture::mode,
TestFixture::max_vec_bits,
gmem_layout_t,
smem_layout_t,
thread_block_size,
value_type>();
}
TYPED_TEST(SM70_CuTe_Volta, CooperativeCopyDefaultGSSG2Dto3D)
{
using value_type = double;
constexpr uint32_t x = 16;
constexpr uint32_t y = 16;
constexpr uint32_t z = 4;
using gmem_layout_t = decltype(make_layout(make_shape(Int<x>{}, Int<y*z>{})));
using smem_layout_t = decltype(make_layout(make_shape(Int<z>{}, Int<y>{}, Int<x>{})));
constexpr uint32_t thread_block_size = 64;
test_cooperative_copy_default<typename TestFixture::mode,
TestFixture::max_vec_bits,
gmem_layout_t,
smem_layout_t,
thread_block_size,
value_type>();
}
TYPED_TEST(SM70_CuTe_Volta, CooperativeCopyDefaultGSSGCustom1)
{
using value_type = double;
using gmem_layout_t = decltype(make_layout(
make_shape(Int<8>{}, make_shape(Int<2>{}, Int<2>{})),
make_stride(Int<2>{}, make_shape(Int<1>{}, Int<16>{}))
));
using smem_layout_t = decltype(make_layout(
make_shape(Int<8>{}, Int<4>{}),
make_stride(Int<4>{}, Int<1>{})
));
constexpr uint32_t thread_block_size = 8;
test_cooperative_copy_default<typename TestFixture::mode,
TestFixture::max_vec_bits,
gmem_layout_t,
smem_layout_t,
thread_block_size,
value_type>();
}
TYPED_TEST(SM70_CuTe_Volta, CooperativeCopyDefaultGSSGCustom2)
{
using value_type = float;
using gmem_layout_t = decltype(make_layout(
make_shape(make_shape(Int<4>{}, Int<2>{}), make_shape(Int<2>{}, Int<2>{})),
make_stride(make_shape(Int<4>{}, Int<1>{}), make_shape(Int<16>{}, Int<2>{}))
));
using smem_layout_t = decltype(make_layout(
make_shape(make_shape(Int<2>{}, Int<2>{}, Int<2>{}), make_shape(Int<2>{}, Int<2>{})),
make_stride(make_shape(Int<16>{}, Int<4>{}, Int<1>{}), make_shape(Int<8>{}, Int<2>{}))
));
constexpr uint32_t thread_block_size = 16;
test_cooperative_copy_default<typename TestFixture::mode,
TestFixture::max_vec_bits,
gmem_layout_t,
smem_layout_t,
thread_block_size,
value_type>();
}
TYPED_TEST(SM70_CuTe_Volta, CooperativeCopyDefaultGSSGSwizzle1)
{
using value_type = float;
using gmem_layout_t = Layout<Shape<_8, _64>, Stride<_64, _1>>;
using smem_layout_t = decltype(composition(Swizzle<3, 3, 3>{}, Layout<Shape<_8, _64>, Stride<_64, _1>>{}));
constexpr uint32_t thread_block_size = 128;
test_cooperative_copy_default<typename TestFixture::mode,
TestFixture::max_vec_bits,
gmem_layout_t,
smem_layout_t,
thread_block_size,
value_type>();
}
TYPED_TEST(SM70_CuTe_Volta, CooperativeCopyDefaultGSSGSwizzle2)
{
using value_type = cute::half_t;
using gmem_layout_t = decltype(make_layout(make_shape(Int<64>{}, Int<64>{})));
using smem_atom_layout_t = decltype(composition(Swizzle<3, 2, 3> {}, Layout<Shape<_8, _32>, Stride<_32, _1>>{}));
using smem_layout_t = decltype(tile_to_shape(
smem_atom_layout_t{},
make_shape(shape<0>(gmem_layout_t{}), shape<1>(gmem_layout_t{})))
);
constexpr uint32_t thread_block_size = 128;
test_cooperative_copy_default<typename TestFixture::mode,
TestFixture::max_vec_bits,
gmem_layout_t,
smem_layout_t,
thread_block_size,
value_type>();
}
TYPED_TEST(SM70_CuTe_Volta, CooperativeCopyDefaultGSSGSwizzle3)
{
using value_type = cute::half_t;
using gmem_layout_t = decltype(make_layout(make_shape(Int<64>{}, Int<64>{})));
using smem_atom_layout_t = decltype(composition(Swizzle<2, 4, 3> {}, Layout<Shape<_16, _64>, Stride<_64, _1>>{}));
using smem_layout_t = decltype(tile_to_shape(
smem_atom_layout_t{},
make_shape(shape<0>(gmem_layout_t{}), shape<1>(gmem_layout_t{})))
);
constexpr uint32_t thread_block_size = 128;
test_cooperative_copy_default<typename TestFixture::mode,
TestFixture::max_vec_bits,
gmem_layout_t,
smem_layout_t,
thread_block_size,
value_type>();
}
TYPED_TEST(SM70_CuTe_Volta, CooperativeCopyDefaultGSSGSwizzle4)
{
using value_type = cute::half_t;
using gmem_atom_layout_t = decltype(composition(Swizzle<3, 2, 3> {}, Layout<Shape<_8, _32>, Stride<_32, _1>>{}));
using smem_layout_t = decltype(make_layout(make_shape(Int<64>{}, Int<64>{})));
using gmem_layout_t = decltype(tile_to_shape(
gmem_atom_layout_t{},
make_shape(shape<0>(smem_layout_t{}), shape<1>(smem_layout_t{})))
);
constexpr uint32_t thread_block_size = 128;
test_cooperative_copy_default<typename TestFixture::mode,
TestFixture::max_vec_bits,
gmem_layout_t,
smem_layout_t,
thread_block_size,
value_type>();
}
| test/unit/cute/volta/cooperative_copy.cu/0 | {
"file_path": "test/unit/cute/volta/cooperative_copy.cu",
"repo_id": "test",
"token_count": 9554
} | 56 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for thread-level GEMM
*/
#include "../../common/cutlass_unit_test.h"
#include "cutlass/layout/layout.h"
#include "cutlass/epilogue/thread/activation.h"
#include "cutlass/util/host_tensor.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T, int N, typename Func>
__global__ void test_Epilogue_thread_activation(T *out, T *in) {
cutlass::Array<T, N> *vec_out = reinterpret_cast<cutlass::Array<T, N> *>(out);
cutlass::Array<T, N> *vec_in = reinterpret_cast<cutlass::Array<T, N> *>(in);
Func func;
vec_out[threadIdx.x] = func(vec_in[threadIdx.x]);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Reference
//
static double GELU_golden_input[] = {
1.587425827980, 1.157652974129, 0.750432848930, -0.965980410576,
-0.388184845448, 0.014422321692, 0.353164494038, 1.354383468628,
0.167588576674, 0.272798538208, -0.377032428980, 1.923444747925,
0.308164477348, -0.341318070889, 0.278338819742, -0.292668998241,
-1.051743745804, -0.814175724983, 0.112737402320, 1.262938618660,
-1.582363605499, 0.722016870975, 1.053453564644, -0.659764587879,
0.734917521477, 0.091274201870, 0.604461073875, -0.219043627381,
-0.136795744300, 0.960650205612, -1.805408835411, 0.091029644012,
-1.023343324661, 0.147713735700, -0.499895423651, 1.351878166199,
-1.631091356277, -0.336171895266, -1.612408638000, 0.090832948685,
-0.658132910728, -0.326727777719, -1.986387014389, 0.787685871124,
-1.015677452087, -0.225094825029, 0.876752018929, 0.744826257229,
0.870290279388, -0.757595360279, 1.510331749916, 0.750012576580,
0.906444966793, -0.915759027004, 1.260277032852, -0.158465340734,
-0.109191477299, -0.817102134228, 0.391305118799, -0.524910449982,
0.351349592209, 0.801979541779, 0.446691334248, -0.741077482700,
1.205966711044, -0.910210072994, 0.945986449718, 0.784096539021,
1.670521497726, 0.344931513071, -0.301411420107, 0.309870749712,
-0.879704594612, -1.951189517975, -0.805817663670, -0.661812782288,
-0.505914270878, -1.836273789406, -0.381845980883, -0.554707705975,
-0.375447630882, -0.516645610332, 0.509586095810, 1.087131023407,
2.664817094803, -1.558295488358, -0.076461032033, -0.504621028900,
1.327111959457, -1.819981694221, 1.350415468216, -2.074112653732,
1.501431345940, -1.339013576508, 0.162817999721, -1.473457217216,
0.357770472765, 0.188413277268, 1.601302266121, -0.653882205486,
0.856162548065, 0.763102591038, -0.526283502579, 0.581961452961,
0.089969776571, 1.968745589256, 0.545802056789, -1.168786048889,
1.206663012505, -0.109096683562, -1.223938226700, 0.744599223137,
-1.779406785965, 0.766436159611, -0.579044401646, -1.002057313919,
-0.715845823288, -0.562508940697, 0.886768460274, 2.327786445618,
-0.148763969541, -0.918884515762, -0.367678701878, -1.105021238327,
-0.461237311363, 0.158228352666, -0.254040330648, 1.427477598190,
0.277530491352, 0.046293262392, -0.535557329655, -1.486695051193,
-0.953706681728, -1.040495038033, -0.314667612314, 0.348172843456,
0.522773325443, 0.025960063562, -0.482472360134, 1.993084549904,
-0.253064930439, -0.012146313675, -2.166327714920, 0.398040622473,
-0.022238900885, -0.443580865860, -0.898376941681, -0.571689844131,
1.666979670525, -0.831176340580, -0.671057403088, 0.481970995665,
-1.096243023872, -1.493894338608, 0.596651911736, -0.229505166411,
1.165976166725, 0.905094027519, 0.049716457725, -1.362933635712,
-0.366948783398, 1.461613893509, -0.718411505222, 0.895385026932,
-0.763122260571, 1.329716682434, 1.366570711136, -0.086544901133,
0.059739742428, 0.940766513348, -0.272854357958, -1.738811373711,
-0.361239165068, 0.696977972984, 1.288442254066, 1.264815807343,
-0.573566436768, -1.141678214073, 0.081865988672, -0.886228799820,
-0.236933603883, 1.050115466118, -0.538952171803, 0.651773929596,
-0.220034509897, -1.198960781097, 1.247478365898, -0.053529661149,
0.639809548855, 1.672434806824, 0.511088073254, -1.179364681244,
-0.730427742004, 0.157630980015, 0.389369845390, -0.925578773022,
-0.093250080943, -0.391062080860, 0.852983593941, 1.868778109550,
-1.198786258698, 0.604997038841, -1.482687234879, -2.469333171844,
0.718807697296, -0.559609353542, 2.187228441238, -2.927527904510,
0.148535788059, -0.097280368209, 0.674131810665, -1.137645959854,
0.792729616165, -1.166317462921, -0.498791724443, 1.675866723061,
-0.137909621000, -0.653263568878, -2.281216144562, 0.296096831560,
2.002410173416, 1.083609819412, 0.933580815792, -1.504760265350,
2.185185909271, 0.286121010780, -1.035485863686, -0.216372340918,
-0.274334043264, -0.849510788918, -1.397169828415, -0.407644748688,
0.159476816654, -0.170650705695, 0.335193097591, -0.156852483749,
0.036168430001, 0.858105242252, -1.086121797562, 0.404813349247,
-0.481496721506, -0.389882832766, 0.020690204576, -0.772020936012,
-0.758921504021, 0.323482036591, 0.115715265274, -0.811228036880,
-0.882436633110, 0.176811277866, 1.678015947342, 0.379081040621,
-0.842976212502, 0.346952259541, -0.545828759670, 1.632800459862
};
static double GELU_golden_output[] = {
1.498199582100, 1.014679551125, 0.580462038517, -0.161344811320,
-0.135453075171, 0.007294139825, 0.225325092673, 1.235459089279,
0.094946734607, 0.165724009275, -0.133120641112, 1.871103763580,
0.191376730800, -0.125069886446, 0.169681981206, -0.112644664943,
-0.154036879539, -0.169163048267, 0.061428427696, 1.132469892502,
-0.089851818979, 0.552240371704, 0.899579226971, -0.168043658137,
0.565008401871, 0.048956073821, 0.439583092928, -0.090532489121,
-0.060955654830, 0.798911273479, -0.064101703465, 0.048816055059,
-0.156645998359, 0.082529976964, -0.154254898429, 1.232632875443,
-0.083896033466, -0.123835846782, -0.086161509156, 0.048703473061,
-0.167972877622, -0.121522113681, -0.046670529991, 0.617986679077,
-0.157319813967, -0.092503339052, 0.709896743298, 0.574865520000,
0.703132867813, -0.169963955879, 1.411436080933, 0.580042064190,
0.741154611111, -0.164741978049, 1.129479527473, -0.069256491959,
-0.049848672003, -0.169087052345, 0.255214750767, -0.157380074263,
0.223928079009, 0.632535398006, 0.300378054380, -0.169946283102,
1.068588852882, -0.165071934462, 0.783203184605, 0.614346146584,
1.591325283051, 0.219006344676, -0.115003645420, 0.192637458444,
-0.166712537408, -0.049788996577, -0.169361919165, -0.168130636215,
-0.155041679740, -0.060888241976, -0.134137839079, -0.160614117980,
-0.132782235742, -0.156389534473, 0.354075312614, 0.936574816704,
2.654553413391, -0.092845752835, -0.035900454968, -0.154874503613,
1.204704761505, -0.062572605908, 1.230982899666, -0.039479542524,
1.401402950287, -0.120890334249, 0.091938301921, -0.103604510427,
0.228880971670, 0.108285568655, 1.513783097267, -0.167782157660,
0.688394129276, 0.593158841133, -0.157540664077, 0.418839782476,
0.048209801316, 1.920528769493, 0.386099845171, -0.141709372401,
1.069367766380, -0.049809500575, -0.135230198503, 0.574639260769,
-0.066881760955, 0.596510827541, -0.162873372436, -0.158483341336,
-0.169686436653, -0.161375194788, 0.720409095287, 2.304597616196,
-0.065585561097, -0.164551988244, -0.131098195910, -0.148708447814,
-0.148663327098, 0.089060656726, -0.101548098028, 1.317959904671,
0.169103100896, 0.024001283571, -0.158595800400, -0.101909510791,
-0.162240833044, -0.155090972781, -0.118474565446, 0.221488356590,
0.365645468235, 0.013248858973, -0.151851043105, 1.946992278099,
-0.101253561676, -0.006014300976, -0.032804865390, 0.260597169399,
-0.010922161862, -0.145792976022, -0.165743649006, -0.162226170301,
1.587365984917, -0.168676435947, -0.168497130275, 0.330191940069,
-0.149622067809, -0.100989677012, 0.432351946831, -0.093922272325,
1.023946166039, 0.739726305008, 0.025843897834, -0.117827951908,
-0.130937814713, 1.356489539146, -0.169726014137, 0.729478538036,
-0.169943705201, 1.207641005516, 1.249209761620, -0.040288090706,
0.031292784959, 0.777626037598, -0.107090584934, -0.071350336075,
-0.129670530558, 0.527676224709, 1.161149263382, 1.134579420090,
-0.162394225597, -0.144757837057, 0.043603736907, -0.166386902332,
-0.096278958023, 0.895924389362, -0.158969298005, 0.484089732170,
-0.090857118368, -0.138206124306, 1.115107178688, -0.025622237474,
0.472724437714, 1.593463659286, 0.355387806892, -0.140493586659,
-0.169871479273, 0.088687323034, 0.253673940897, -0.164135158062,
-0.043161027133, -0.136040985584, 0.685087263584, 1.811169505119,
-0.138226687908, 0.440080583096, -0.102422207594, -0.016713079065,
0.549075841904, -0.161096408963, 2.155813455582, -0.005001218989,
0.083037458360, -0.044870752841, 0.505522191525, -0.145202502608,
0.623111069202, -0.141991063952, -0.154108211398, 1.597298502922,
-0.061391282827, -0.167753636837, -0.025704355910, 0.182520583272,
1.957115054131, 0.932696640491, 0.769961357117, -0.099604383111,
2.153636932373, 0.175279796124, -0.155551761389, -0.089653611183,
-0.107515335083, -0.168032020330, -0.113423995674, -0.139319628477,
0.089841812849, -0.073763631284, 0.211594089866, -0.068651281297,
0.018605981022, 0.690416753292, -0.150658726692, 0.266040354967,
-0.151710823178, -0.135800719261, 0.010515870526, -0.169883996248,
-0.169960290194, 0.202769815922, 0.063187584281, -0.169236257672,
-0.166577890515, 0.100812792778, 1.599699616432, 0.245525524020,
-0.168275654316, 0.220552831888, -0.159705042839, 1.549110531807
};
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Epilogue_thread_gelu_taylor, device_f32) {
int const kN = 256;
int const kV = 4;
using Element = float;
using Func = cutlass::epilogue::thread::GELU_taylor<cutlass::Array<Element, kV>>;
double tolerance = 0.005;
//
// Construct workspace
//
cutlass::HostTensor<Element, cutlass::layout::RowMajor> tensor_Destination({1, kN});
cutlass::HostTensor<Element, cutlass::layout::RowMajor> tensor_Source({1, kN});
for (int i = 0; i < kN; ++i) {
tensor_Source.host_data(i) = Element(GELU_golden_input[i]);
}
tensor_Destination.sync_device();
tensor_Source.sync_device();
//
// Launch the kernel
//
dim3 grid(1,1,1);
dim3 block(kN / kV, 1, 1);
test_Epilogue_thread_activation<Element, kV, Func><<< grid, block >>>(
tensor_Destination.device_data(),
tensor_Source.device_data());
tensor_Destination.sync_host();
//
// Verify
//
for (int i = 0; i < kN; ++i) {
Element input = Element(GELU_golden_input[i]);
Element got = tensor_Destination.host_data(i);
Element expected = Element(GELU_golden_output[i]);
double rel_error = (double(got) - double(expected)) / double(expected);
double tolerance_override = tolerance;
switch (i) {
case 142: tolerance_override = 0.008; break;
case 203: tolerance_override = 0.03; break;
case 207: tolerance_override = 0.09; break;
case 218: tolerance_override = 0.013; break;
}
EXPECT_LT(std::abs(rel_error), tolerance_override)
<< "Input[" << i << "]: " << input << ", Got: " << got << ", expected: " << expected;
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Epilogue_thread_gelu_taylor, device_f16) {
int const kN = 256;
int const kV = 8;
using Element = cutlass::half_t;
using Func = cutlass::epilogue::thread::GELU_taylor<cutlass::Array<Element, kV>>;
double tolerance = 0.005;
//
// Construct workspace
//
cutlass::HostTensor<Element, cutlass::layout::RowMajor> tensor_Destination({1, kN});
cutlass::HostTensor<Element, cutlass::layout::RowMajor> tensor_Source({1, kN});
for (int i = 0; i < kN; ++i) {
tensor_Source.host_data(i) = Element(GELU_golden_input[i]);
}
tensor_Destination.sync_device();
tensor_Source.sync_device();
//
// Launch the kernel
//
dim3 grid(1,1,1);
dim3 block(kN / kV, 1, 1);
test_Epilogue_thread_activation<Element, kV, Func><<< grid, block >>>(
tensor_Destination.device_data(),
tensor_Source.device_data());
tensor_Destination.sync_host();
//
// Verify
//
for (int i = 0; i < kN; ++i) {
Element input = Element(GELU_golden_input[i]);
Element got = tensor_Destination.host_data(i);
Element expected = Element(GELU_golden_output[i]);
double rel_error = (double(got) - double(expected)) / double(expected);
double tolerance_override = tolerance;
switch (i) {
case 36: tolerance_override = 0.006; break;
case 77: tolerance_override = 0.009; break;
case 95: tolerance_override = 0.008; break;
case 112: tolerance_override = 0.007; break;
case 171: tolerance_override = 0.006; break;
case 203: tolerance_override = 0.03; break;
case 207: tolerance_override = 0.15; break;
}
EXPECT_LT(std::abs(rel_error), tolerance_override)
<< "Input[" << i << "]: " << input << ", Got: " << got << ", expected: " << expected;
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/epilogue/thread/activation.cu/0 | {
"file_path": "test/unit/epilogue/thread/activation.cu",
"repo_id": "test",
"token_count": 7577
} | 57 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for epilogues
*/
#pragma once
#include <fstream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/half.h"
#include "cutlass/complex.h"
#include "cutlass/epilogue/thread/linear_combination_planar_complex.h"
#include "cutlass/util/host_tensor_planar_complex.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/host/tensor_fill.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace kernel {
template <typename Epilogue>
__global__ void epilogue_planar_complex_threadblock(
typename Epilogue::OutputTileIterator::Params params_D,
typename Epilogue::OutputTileIterator::Element *ptr_D,
int64_t imaginary_stride_D,
typename Epilogue::OutputTileIterator::Params params_C,
typename Epilogue::OutputTileIterator::Element *ptr_C,
int64_t imaginary_stride_C,
typename Epilogue::OutputOp::Params params_output_op,
cutlass::MatrixCoord problem_size,
cutlass::TensorRef<
typename Epilogue::WarpMmaOperator::ElementC,
typename Epilogue::WarpMmaOperator::LayoutC> accumulator_ref,
int64_t imaginary_stride_accum,
int epilogue_count = 1) {
__shared__ typename Epilogue::SharedStorage shared_storage;
int thread_idx = threadIdx.x;
int warp_idx = threadIdx.x / 32;
int lane_idx = threadIdx.x % 32;
//
// Construct the epilogue
//
// Tile iterator writing to output tile
typename Epilogue::OutputTileIterator iterator_D_real(
params_D,
ptr_D,
problem_size,
thread_idx
);
typename Epilogue::OutputTileIterator iterator_D_imag(
params_D,
ptr_D + imaginary_stride_D,
problem_size,
thread_idx
);
// Tile iterator writing to output tile
typename Epilogue::OutputTileIterator iterator_C_real(
params_C,
ptr_C,
problem_size,
thread_idx
);
typename Epilogue::OutputTileIterator iterator_C_imag(
params_C,
ptr_C + imaginary_stride_C,
problem_size,
thread_idx
);
// Epilogue operator
Epilogue epilogue(
shared_storage,
thread_idx,
warp_idx,
lane_idx);
//
// Initialize the accumulators
//
int warp_mn = warp_idx % (Epilogue::WarpCount::kM * Epilogue::WarpCount::kN);
int warp_m = warp_mn % Epilogue::WarpCount::kM;
int warp_n = warp_mn / Epilogue::WarpCount::kM;
accumulator_ref.add_coord_offset({
warp_m * Epilogue::WarpMmaOperator::Shape::kM,
warp_n * Epilogue::WarpMmaOperator::Shape::kN});
//
// Load accumulators
//
typename Epilogue::WarpMmaOperator::IteratorC accumulator_iterator(accumulator_ref, lane_idx);
typename Epilogue::AccumulatorTile accumulators;
accumulators.clear();
accumulator_iterator.load(accumulators.real);
accumulator_iterator.load_with_pointer_offset(accumulators.imag, imaginary_stride_accum);
//
// Perform the epilogue operation
//
typename Epilogue::OutputOp output_op(params_output_op);
// Place the epilogue in a loop so assembly is clearly visible
for (int iter = 0; iter < epilogue_count; ++iter) {
epilogue(
output_op,
iterator_D_real,
iterator_D_imag,
accumulators,
iterator_C_real,
iterator_C_imag);
}
}
} // namespace kernel
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Epilogue_
>
class EpiloguePlanarComplexTestbed {
public:
using Epilogue = Epilogue_;
using ElementAccumulator = typename Epilogue::ElementAccumulator;
using ElementCompute = typename Epilogue::OutputOp::ElementCompute;
using ElementOutput = typename Epilogue::ElementOutput;
using OutputOpParams = typename Epilogue::OutputOp::Params;
using ComplexElementOutput = cutlass::complex<ElementOutput>;
using ComplexElementAccumulator = cutlass::complex<ElementAccumulator>;
using ComplexElementCompute = cutlass::complex<ElementCompute>;
public:
//
// Data members
//
cutlass::MatrixCoord quantized_size;
cutlass::HostTensorPlanarComplex<ElementAccumulator, cutlass::layout::RowMajor> accumulator_tensor;
cutlass::HostTensorPlanarComplex<ElementOutput, cutlass::layout::RowMajor> source_tensor;
cutlass::HostTensorPlanarComplex<ElementOutput, cutlass::layout::RowMajor> output_tensor;
public:
//
// Methods
//
EpiloguePlanarComplexTestbed():
quantized_size(Epilogue::Shape::kM, Epilogue::Shape::kN),
accumulator_tensor({Epilogue::Shape::kM, Epilogue::Shape::kN}),
source_tensor({Epilogue::Shape::kM, Epilogue::Shape::kN}),
output_tensor({Epilogue::Shape::kM, Epilogue::Shape::kN}) {
//
// Initialize problem space
//
#if 1
uint64_t seed = 2019;
cutlass::reference::host::TensorFillRandomUniform(
accumulator_tensor.host_view(),
seed,
20,
-20,
0);
cutlass::reference::host::TensorFillRandomUniform(
source_tensor.host_view(),
seed + 2018,
20,
-20,
0);
#else
cutlass::reference::host::BlockFillSequential(accumulator_tensor.host_data(), accumulator_tensor.capacity());
#endif
}
bool run_all() {
cutlass::complex<float> alpha_values[3];
alpha_values[0] = cutlass::complex<float>(1, 0);
alpha_values[1] = cutlass::complex<float>(0, 0);
alpha_values[2] = cutlass::complex<float>(2.25f, -0.5f);
cutlass::complex<float> beta_values[3];
beta_values[0] = cutlass::complex<float>(0, 0);
beta_values[1] = cutlass::complex<float>(1, 0);
beta_values[2] = cutlass::complex<float>(0.5f, -2.25f);
// Test runtime explodes if we tried to test every case exhaustively. This tests the full
// output tile and several smaller sizes to stress predication.
for (int m_idx = 0; m_idx < 3; ++m_idx) {
for (int n_idx = 0; n_idx < 3; ++n_idx) {
cutlass::MatrixCoord problem_size(
quantized_size.row() - m_idx * 3,
quantized_size.column() - n_idx * Epilogue::kElementsPerAccess
);
for (auto const &alpha : alpha_values) {
for (auto const &beta : beta_values) {
bool passed = run(problem_size, {alpha, beta});
if (!passed) {
return false;
}
}
}
}
}
return true;
}
/// Runs the test
bool run(
cutlass::MatrixCoord problem_size,
OutputOpParams output_params) {
//
// Initialize problem space
//
ComplexElementOutput default_output = ComplexElementOutput(ElementOutput(-127), ElementOutput(-101));
cutlass::reference::host::TensorFill(output_tensor.host_view(), default_output);
accumulator_tensor.sync_device();
output_tensor.sync_device();
source_tensor.sync_device();
//
// Initialize epilogue parameters
//
typename Epilogue::OutputTileIterator::Params params_D(output_tensor.layout());
typename Epilogue::OutputTileIterator::Params params_C(source_tensor.layout());
//
// Launch kernel
//
dim3 grid(1, 1);
dim3 block(Epilogue::WarpCount::kCount * 32, 1);
test::kernel::epilogue_planar_complex_threadblock<Epilogue><<< grid, block >>>(
params_D,
output_tensor.device_data(),
output_tensor.imaginary_stride(),
params_C,
source_tensor.device_data(),
source_tensor.imaginary_stride(),
output_params,
problem_size,
accumulator_tensor.device_view_real(),
accumulator_tensor.imaginary_stride()
);
cudaError_t result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
std::cerr << "Kernel error: " << cudaGetErrorString(result) << std::endl;
return false;
}
//
// Verify results
//
output_tensor.sync_host();
int errors = 0;
int const kMaxErrors = 5;
for (int r = 0; errors < kMaxErrors && r < quantized_size.row(); ++r) {
for (int c = 0; errors < kMaxErrors && c < quantized_size.column(); ++c) {
cutlass::MatrixCoord coord{r, c};
ComplexElementOutput got = output_tensor.at(coord);
ComplexElementOutput expected = default_output;
if (coord.row() < problem_size.row() && coord.column() < problem_size.column()) {
ComplexElementOutput src = source_tensor.at(coord);
ComplexElementCompute tmp =
output_params.alpha * ComplexElementCompute(accumulator_tensor.at(coord)) +
output_params.beta * ComplexElementCompute(src.real(), src.imag());
expected = ComplexElementOutput(ElementOutput(tmp.real()), ElementOutput(tmp.imag()));
}
if (expected != got) {
using OutputIO = cutlass::ScalarIO<ComplexElementOutput>;
EXPECT_TRUE(false)
<< "-------\n"
<< "Error - output element (" << coord << ") - expected: "
<< OutputIO(expected)
<< ", got: " << OutputIO(got) << std::endl;
++errors;
}
}
}
//
// Report results on error
//
if (errors) {
std::cout << "Incorrect result for problem("
<< problem_size.row() << ", "
<< problem_size.column() << ") for alpha: " << output_params.alpha << ", beta: " << output_params.beta << std::endl;
std::stringstream ss;
ss
<< "output_tensor_op_" << Epilogue::Shape::kM << "x" << Epilogue::Shape::kN << "_"
<< Epilogue::WarpTileIterator::WarpShape::kM << "x"
<< Epilogue::WarpTileIterator::WarpShape::kN
<< "_slice_" << Epilogue::WarpCount::kK << ".csv";
std::ofstream output_file(ss.str());
output_file << output_tensor.host_view();
std::cout << "Wrote workspace to '" << ss.str() << "'" << std::endl;
}
return !errors;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/epilogue/threadblock/testbed_planar_complex.h/0 | {
"file_path": "test/unit/epilogue/threadblock/testbed_planar_complex.h",
"repo_id": "test",
"token_count": 4326
} | 58 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for GEMM + broadcast interface
*/
#include <fstream>
#include "cutlass/cutlass.h"
#include "cutlass/functional.h"
#include "cutlass/gemm/kernel/default_gemm_with_broadcast.h"
#include "cutlass/gemm/device/gemm_universal.h"
#include "cutlass/gemm/device/gemm_universal_with_broadcast.h"
#include "cutlass/gemm/device/gemm_universal_adapter.h"
#include "cutlass/epilogue/thread/activation.h"
#include "cutlass/epilogue/thread/linear_combination_bias_relu.h"
#include "cutlass/epilogue/thread/linear_combination_residual_block.h"
#include "../../common/cutlass_unit_test.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_elementwise.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/reference/host/gemm.h"
template<typename GemmElement, typename LayoutA, typename LayoutB, typename LayoutC>
struct TestbedUtils {
/// Initialization
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::Distribution::Kind init_C;
uint64_t seed;
cutlass::HostTensor<GemmElement, LayoutA> tensor_A; // Input A
cutlass::HostTensor<GemmElement, LayoutB> tensor_B; // Input B
cutlass::HostTensor<GemmElement, LayoutC> tensor_C; // Input C
cutlass::HostTensor<GemmElement, LayoutC> tensor_D1; // Input D
cutlass::HostTensor<GemmElement, LayoutC> tensor_D2; // Input D
cutlass::HostTensor<GemmElement, LayoutC> tensor_Y1; // Input Y
cutlass::HostTensor<GemmElement, LayoutC> tensor_Y2; // Input Y
cutlass::HostTensor<GemmElement, LayoutC> tensor_Y_ref;
//
// Methods
//
TestbedUtils(
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint64_t seed_ = 2080
):
init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { }
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
bool initialize_tensor(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
double scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
int bits_output = cutlass::sizeof_bits<Element>::value;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
} else if (bits_input <= 8) {
scope_max = 2;
scope_min = -2;
} else if (bits_output == 16) {
scope_max = 5;
scope_min = -5;
} else {
scope_max = 8;
scope_min = -8;
}
cutlass::reference::host::TensorFillRandomUniform(
view, seed, scope_max, scope_min, 0);
}
else if (dist_kind == cutlass::Distribution::AllZeros) {
cutlass::reference::host::TensorFill(view);
}
else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5);
}
else if (dist_kind == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(
view.data(), view.capacity());
}
else {
EXPECT_TRUE(false) << "Not implemented";
return false;
}
return true;
}
/// Initializes data structures
void initialize(cutlass::gemm::GemmCoord problem_size) {
//
// Allocate the GEMM workspace
//
tensor_A.resize(problem_size.mk());
tensor_B.resize(problem_size.kn());
tensor_C.resize({1, problem_size.n()});
tensor_D1.resize(problem_size.mn());
tensor_D2.resize(problem_size.mn());
tensor_Y1.resize(problem_size.mn());
tensor_Y2.resize(problem_size.mn());
tensor_Y_ref.resize(problem_size.mn());
EXPECT_TRUE(initialize_tensor(tensor_A.host_view(), init_A, seed + 2019));
EXPECT_TRUE(initialize_tensor(tensor_B.host_view(), init_B, seed + 2018));
EXPECT_TRUE(initialize_tensor(tensor_C.host_view(), init_C, seed + 2017));
// Initialize D data to smaller data range. This helps avoid large roundoff errors.
int d_scope_min = -2;
int d_scope_max = 2;
cutlass::reference::host::TensorFillRandomUniform(tensor_D1.host_view(), seed + 2016, d_scope_max, d_scope_min, 0);
cutlass::reference::host::TensorFillRandomUniform(tensor_D2.host_view(), seed + 2015, d_scope_max, d_scope_min, 0);
EXPECT_TRUE(initialize_tensor(tensor_Y1.host_view(), cutlass::Distribution::AllZeros, 0));
EXPECT_TRUE(initialize_tensor(tensor_Y2.host_view(), cutlass::Distribution::AllZeros, 0));
EXPECT_TRUE(initialize_tensor(tensor_Y_ref.host_view(), cutlass::Distribution::AllZeros, 0));
// It is possible to randomly initialize to all zeros, so override this with non-zeros
// in the upper left corner of each operand.
tensor_A.host_view().at({0, 0}) = GemmElement(1);
tensor_B.host_view().at({0, 0}) = GemmElement(1);
tensor_C.host_view().at({0, 0}) = GemmElement(1);
tensor_D1.host_view().at({0, 0}) = GemmElement(1);
tensor_D2.host_view().at({0, 0}) = GemmElement(1);
tensor_A.sync_device();
tensor_B.sync_device();
tensor_C.sync_device();
tensor_D1.sync_device();
tensor_D2.sync_device();
}
/// Compares computed reference with device reference and outputs to a file if incorrect
bool compare_reference(
cutlass::gemm::GemmCoord problem_size, cutlass::HostTensor<GemmElement, LayoutC>& tensor_Y_ref, cutlass::HostTensor<GemmElement, LayoutC>& tensor_Y) {
tensor_Y_ref.sync_host();
tensor_Y.sync_host();
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_A.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_B.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_C.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D1.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D2.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_Y_ref.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_Y.host_view()), 0);
bool passed = true;
float norm_diff = 0;
norm_diff = cutlass::reference::host::TensorNormDiff(tensor_Y_ref.host_view(), tensor_Y.host_view(), float());
passed = (norm_diff <= 0.1f);
EXPECT_LT(norm_diff, 0.1f) << " tensor_Y is incorrect";
if (!passed) {
std::ofstream file("errors_testbed_gemm_broadcast_new.txt");
file
<< "problem: " << problem_size << "\n\n";
file
<< "capacity: \n"
<< "A: " << tensor_A.capacity()
<< "\nB: " << tensor_B.capacity()
<< "\nC: " << tensor_C.capacity()
<< "\nD1: " << tensor_D1.capacity()
<< "\nD2: " << tensor_D2.capacity()
<< "\nY: " << tensor_Y.capacity()
<< "\n\n"
<< "\nY_ref: " << tensor_Y_ref.capacity()
<< "\n\n";
file
<< "A =\n" << tensor_A.host_view()
<< "\n\nB =\n" << tensor_B.host_view()
<< "\n\nC =\n" << tensor_C.host_view()
<< "\n\nD1 =\n" << tensor_D1.host_view()
<< "\n\nD2 =\n" << tensor_D2.host_view()
<< "\n\nY =\n" << tensor_Y.host_view()
<< "\n\nY_ref =\n" << tensor_Y_ref.host_view();
}
return passed;
}
};
#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED)
TEST(SM80_Device_GemmWithBroadcast_f16t_f16n_f16t_tensor_op_f16, 128x128_32x3_64x64x32_16x8x16) {
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementOutput = cutlass::half_t;
using ElementAccumulator = cutlass::half_t;
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
using OpClass = cutlass::arch::OpClassTensorOp;
using ArchTag = cutlass::arch::Sm80;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using ThreadblockSwizzle = cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle;
const int kStages = 3;
const int batch_count = 1;
const cutlass::half_t alpha(1);
const cutlass::half_t beta(1);
const int M = 1024;
const int K = 10240;
const int N = 512;
cutlass::gemm::GemmCoord problem{M, N, K};
const int batch_stride_A = 0;
const int batch_stride_B = 0;
const int batch_stride_C1 = 0;
const int batch_stride_C2 = 0;
const int batch_stride_D = 0;
const int batch_stride_Vector = 0;
const int batch_stride_Tensor = 0;
const int64_t lda = LayoutA::packed({problem.m(), problem.k()}).stride(0);
const int64_t ldb = LayoutB::packed({problem.k(), problem.n()}).stride(0);
const int64_t ldc1 = LayoutC::packed({problem.m(), problem.n()}).stride(0);
const int64_t ldc2 = LayoutC::packed({problem.m(), problem.n()}).stride(0);
const int64_t ldd = LayoutC::packed({problem.m(), problem.n()}).stride(0);
const int64_t ldv = 0;
const int64_t ldt = 0;
TestbedUtils<ElementA, LayoutA, LayoutB, LayoutC> utils;
utils.initialize(problem);
//
// Create reference Gemm
//
using GemmRef = cutlass::gemm::device::GemmUniversal<
ElementA, LayoutA, ElementB, LayoutB, ElementOutput, LayoutC, ElementAccumulator,
OpClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape,
cutlass::epilogue::thread::LinearCombination<
ElementOutput, 128 / cutlass::sizeof_bits<ElementOutput>::value,
ElementAccumulator, ElementAccumulator>,
ThreadblockSwizzle, kStages>;
typename GemmRef::Arguments args_ref{
cutlass::gemm::GemmUniversalMode::kGemm,
problem,
batch_count,
{alpha, beta},
utils.tensor_A.device_data(),
utils.tensor_B.device_data(),
utils.tensor_C.device_data(),
utils.tensor_Y_ref.device_data(),
batch_stride_A,
batch_stride_B,
batch_stride_C1,
batch_stride_D,
lda,
ldb,
ldv,
ldd,
};
GemmRef gemm_op_ref;
size_t workspace_size_ref = GemmRef::get_workspace_size(args_ref);
cutlass::device_memory::allocation<uint8_t> workspace_ref(workspace_size_ref);
cutlass::Status status = gemm_op_ref.initialize(args_ref, workspace_ref.get());
EXPECT_TRUE(status == cutlass::Status::kSuccess) << cutlassGetStatusString(status);
status = gemm_op_ref();
EXPECT_TRUE(status == cutlass::Status::kSuccess) << cutlassGetStatusString(status);
//
// Create GemmWithBroadcast from single source
//
using GemmSingle = cutlass::gemm::device::GemmUniversalWithBroadcast<
ElementA, LayoutA, ElementB, LayoutB, ElementOutput, LayoutC, ElementAccumulator,
OpClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape,
cutlass::epilogue::thread::LinearCombinationResidualBlock<
ElementOutput, ElementAccumulator, ElementAccumulator,
ElementAccumulator, 128 / cutlass::sizeof_bits<ElementOutput>::value,
cutlass::epilogue::thread::Identity, cutlass::multiplies, cutlass::epilogue::thread::Identity>,
ThreadblockSwizzle, kStages>;
typename GemmSingle::Arguments args_single{
cutlass::gemm::GemmUniversalMode::kGemm,
problem,
batch_count,
{alpha, beta},
utils.tensor_A.device_data(),
utils.tensor_B.device_data(),
utils.tensor_D1.device_data(),
utils.tensor_Y1.device_data(),
utils.tensor_C.device_data(),
/* ptr_Tensor = */ nullptr,
batch_stride_A,
batch_stride_B,
batch_stride_C1,
batch_stride_D,
batch_stride_Vector,
batch_stride_Tensor,
lda,
ldb,
ldc1,
ldd,
ldv,
ldt
};
GemmSingle gemm_op_single;
size_t workspace_size_single = GemmSingle::get_workspace_size(args_single);
cutlass::device_memory::allocation<uint8_t> workspace_single(workspace_size_single);
status = gemm_op_single.initialize(args_single, workspace_single.get());
EXPECT_TRUE(status == cutlass::Status::kSuccess) << cutlassGetStatusString(status);
status = gemm_op_single();
EXPECT_TRUE(status == cutlass::Status::kSuccess) << cutlassGetStatusString(status);
// Compute the broadcast on the reference previously computed and compare results
utils.tensor_Y_ref.sync_host();
cutlass::reference::host::TensorMul(utils.tensor_Y_ref.host_view(), utils.tensor_D1.host_view());
utils.tensor_Y_ref.sync_device();
utils.compare_reference(problem, utils.tensor_Y_ref, utils.tensor_Y1);
//
// Create GemmWithBroadcast from two sources
//
using GemmDouble = cutlass::gemm::device::GemmUniversalWithBroadcast<
ElementA, LayoutA, ElementB, LayoutB, ElementOutput, LayoutC, ElementAccumulator,
OpClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape,
cutlass::epilogue::thread::LinearCombinationResidualBlock<
ElementOutput, ElementAccumulator, ElementAccumulator,
ElementAccumulator, 128 / cutlass::sizeof_bits<ElementOutput>::value,
cutlass::epilogue::thread::Identity, cutlass::multiplies, cutlass::epilogue::thread::Identity, cutlass::plus>,
ThreadblockSwizzle, kStages>;
typename GemmDouble::Arguments args_double{
cutlass::gemm::GemmUniversalMode::kGemm,
problem,
batch_count,
{alpha, beta},
utils.tensor_A.device_data(),
utils.tensor_B.device_data(),
utils.tensor_D1.device_data(),
utils.tensor_D2.device_data(),
utils.tensor_Y2.device_data(),
utils.tensor_C.device_data(),
/* ptr_Tensor = */ nullptr,
batch_stride_A,
batch_stride_B,
batch_stride_C1,
batch_stride_C2,
batch_stride_D,
batch_stride_Vector,
batch_stride_Tensor,
lda,
ldb,
ldc1,
ldc2,
ldd,
ldv,
ldt
};
GemmDouble gemm_op_double;
size_t workspace_size_double = GemmDouble::get_workspace_size(args_double);
cutlass::device_memory::allocation<uint8_t> workspace_double(workspace_size_double);
status = gemm_op_double.initialize(args_double, workspace_double.get());
EXPECT_TRUE(status == cutlass::Status::kSuccess) << cutlassGetStatusString(status);
status = gemm_op_double();
EXPECT_TRUE(status == cutlass::Status::kSuccess) << cutlassGetStatusString(status);
// Compute the broadcast on the reference previously computed and compare results
utils.tensor_Y_ref.sync_host();
cutlass::reference::host::TensorAdd(utils.tensor_Y_ref.host_view(), utils.tensor_D2.host_view());
utils.tensor_Y_ref.sync_device();
utils.compare_reference(problem, utils.tensor_Y_ref, utils.tensor_Y2);
}
#endif
| test/unit/gemm/device/gemm_f16t_f16n_f16t_tensor_op_f16_broadcast_sm80.cu/0 | {
"file_path": "test/unit/gemm/device/gemm_f16t_f16n_f16t_tensor_op_f16_broadcast_sm80.cu",
"repo_id": "test",
"token_count": 6890
} | 59 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide GEMM interface
*/
#pragma once
#include <iostream>
#include <sstream>
#include <stdexcept>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/reference/host/gemm_complex.h"
#include "testbed.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Gemm>
struct TestbedComplex : public Testbed<Gemm> {
using Base = Testbed<Gemm>;
using ElementA = typename Gemm::ElementA;
using ElementB = typename Gemm::ElementB;
using ElementC = typename Gemm::ElementC;
using ElementAccumulator = typename Gemm::ElementAccumulator;
using ElementCompute = typename Gemm::GemmKernel::Epilogue::OutputOp::ElementCompute;
//
// Methods
//
TestbedComplex(
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint64_t seed_ = 2080
):
Base(init_A_, init_B_, init_C_, seed_) { }
/// Verifies the result is a GEMM
bool verify(
cutlass::gemm::GemmCoord problem_size,
ElementCompute alpha,
ElementCompute beta) {
//
// Verify
//
cutlass::reference::host::GemmComplex(
problem_size,
alpha,
this->tensor_A.host_ref(),
Gemm::kTransformA,
this->tensor_B.host_ref(),
Gemm::kTransformB,
beta,
this->tensor_C.host_ref(),
this->reference_D.host_ref(),
ElementAccumulator(0)
);
return this->compare_reference(problem_size, alpha, beta);
}
/// Returns true if the CUDA device is sufficient to execute the kernel.
bool sufficient() const {
//
// Determine SMEM requirements and waive if not satisfied
//
size_t smem_size = sizeof(typename Gemm::GemmKernel::SharedStorage);
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.sharedMemPerBlockOptin < smem_size) {
return false;
}
return true;
}
/// Executes one test
bool run(
cutlass::gemm::GemmCoord problem_size,
int split_k_slices = 1,
ElementCompute alpha = ElementCompute(1),
ElementCompute beta = ElementCompute(0)) {
// Waive test if insufficient CUDA device
if (!sufficient()) {
if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) {
std::cerr << "Test waived due to insufficient CUDA device." << std::endl;
}
return true;
}
//
// Initialize workspace
//
this->initialize(problem_size);
//
// Initialize the GEMM operator
//
typename Gemm::Arguments arguments{
problem_size,
this->tensor_A.device_ref(),
this->tensor_B.device_ref(),
this->tensor_C.device_ref(),
this->tensor_D.device_ref(),
{alpha, beta},
split_k_slices
};
Gemm gemm_op;
size_t workspace_size = Gemm::get_workspace_size(arguments);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
cutlass::Status status = gemm_op.initialize(arguments, workspace.get());
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
//
// Run the GEMM
//
status = gemm_op();
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
//
// Verify
//
bool passed = this->verify(problem_size, alpha, beta);
if (!passed) {
std::cout << "Error with split_k_slices = " << split_k_slices << ", alpha: " << alpha << std::endl;
}
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Gemm>
bool TestAllGemmComplex() {
bool passed = true;
using ElementCompute = typename Gemm::EpilogueOutputOp::ElementCompute;
int const kMinimumOperandElementSize =
std::min(
int(cutlass::sizeof_bits<typename Gemm::ElementA>::value),
int(cutlass::sizeof_bits<typename Gemm::ElementB>::value));
int const kAlignment =
cutlass::platform::is_same<
typename Gemm::OperatorClass,
cutlass::arch::OpClassSimt>::value ? 1 : 128 / kMinimumOperandElementSize;
int problem_size_m[] = {
kAlignment, 512 - 3*kAlignment
};
int problem_size_n[] = {
kAlignment, 512 - 2*kAlignment
};
int problem_size_k[] = {
kAlignment, 128 - kAlignment
};
int split_k_slices[] = {
1, 2, 3
};
double problem_alpha[] = {
1
};
double problem_beta[] = {
2.0
};
TestbedComplex<Gemm> testbed;
for (int m : problem_size_m) {
for (int n : problem_size_n) {
for (int k : problem_size_k) {
for (int split_k : split_k_slices) {
if (!Gemm::kSplitKSerial && split_k > 1) {
continue;
}
for (auto alpha : problem_alpha) {
for (auto beta : problem_beta) {
cutlass::gemm::GemmCoord problem_size(m, n, k);
passed = testbed.run(
problem_size,
split_k,
cutlass::from_real<ElementCompute>(alpha),
cutlass::from_real<ElementCompute>(beta)
);
if (!passed) {
return false;
}
}
}
}
}
}
}
return passed;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace gemm
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/gemm/device/testbed_complex.h/0 | {
"file_path": "test/unit/gemm/device/testbed_complex.h",
"repo_id": "test",
"token_count": 2998
} | 60 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide GEMM interface
*/
#pragma once
#include <iostream>
#include <fstream>
#include <sstream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/util/reference/host/gemm_complex.h"
#include "testbed_utils.h"
namespace test {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Gemm, bool Relu = false>
struct TestbedUniversal {
using ElementA = typename Gemm::ElementA;
using ElementB = typename Gemm::ElementB;
using ElementC = typename Gemm::ElementC;
using ElementAccumulator = typename Gemm::ElementAccumulator;
using ElementCompute = typename Gemm::GemmKernel::Epilogue::OutputOp::ElementCompute;
/// Initialization
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::Distribution::Kind init_C;
uint64_t seed;
cutlass::HostTensor<typename Gemm::ElementA, typename Gemm::LayoutA> tensor_A;
cutlass::HostTensor<typename Gemm::ElementB, typename Gemm::LayoutB> tensor_B;
cutlass::HostTensor<typename Gemm::ElementC, typename Gemm::LayoutC> tensor_C;
cutlass::HostTensor<typename Gemm::ElementC, typename Gemm::LayoutC> tensor_D;
cutlass::HostTensor<typename Gemm::ElementC, typename Gemm::LayoutC> reference_D;
//
// Methods
//
TestbedUniversal(
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint64_t seed_ = 2080
):
init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { }
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
bool initialize_tensor(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
double scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
int bits_output = cutlass::sizeof_bits<typename Gemm::ElementC>::value;
bool is_unsigned_int = std::numeric_limits<Element>::is_integer && !std::numeric_limits<Element>::is_signed;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
} else if (bits_input <= 8) {
scope_max = is_unsigned_int ? 2 : 1;
scope_min = is_unsigned_int ? 0 : -1;
} else if (bits_output == 16) {
scope_max = is_unsigned_int ? 10 : 5;
scope_min = is_unsigned_int ? 0 : -5;
} else {
scope_max = 8;
scope_min = -8;
}
cutlass::reference::host::TensorFillRandomUniform(
view, seed, scope_max, scope_min, 0);
}
else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5);
}
else if (dist_kind == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(
view.data(), view.capacity());
}
else {
EXPECT_TRUE(false) << "Not implemented";
return false;
}
return true;
}
/// Initializes data structures
void initialize(cutlass::gemm::GemmCoord problem_size) {
//
// Allocate the GEMM workspace
//
tensor_A.resize(problem_size.mk());
tensor_B.resize(problem_size.kn());
tensor_C.resize(problem_size.mn());
tensor_D.resize(problem_size.mn());
reference_D.resize(problem_size.mn(), false);
EXPECT_TRUE(initialize_tensor(tensor_A.host_view(), init_A, seed + 2019));
EXPECT_TRUE(initialize_tensor(tensor_B.host_view(), init_B, seed + 2018));
EXPECT_TRUE(initialize_tensor(tensor_C.host_view(), init_C, seed + 2017));
// It is possible to randomly initialize to all zeros, so override this with non-zeros
// in the upper left corner of each operand.
cutlass::Coord<2> origin(0);
tensor_A.host_view().at(origin) = typename Gemm::ElementA(1);
tensor_B.host_view().at(origin) = typename Gemm::ElementB(1);
tensor_C.host_view().at(origin) = typename Gemm::ElementC(1);
cutlass::reference::host::TensorCopy(reference_D.host_view(), tensor_C.host_view());
tensor_A.sync_device();
tensor_B.sync_device();
tensor_C.sync_device();
tensor_D.sync_device();
}
/// Compares computed reference with device reference and outputs to a file if incorrect
bool compare_reference(
cutlass::gemm::GemmCoord problem_size,
ElementCompute alpha,
ElementCompute beta) {
tensor_D.sync_host();
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_A.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_B.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_C.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(reference_D.host_view()), 0);
bool passed = cutlass::reference::host::TensorEquals(reference_D.host_view(), tensor_D.host_view());
EXPECT_TRUE(passed) << " mismatched reference";
if (!passed) {
/*
std::stringstream fname;
fname << "error_Gemm_device_"
<< problem_size.m() << "x"
<< problem_size.n() << "x"
<< problem_size.k() << "_"
<< Gemm::ThreadblockShape::kM << "x"
<< Gemm::ThreadblockShape::kN << "x"
<< Gemm::ThreadblockShape::kK << "_"
<< Gemm::WarpShape::kM << "x"
<< Gemm::WarpShape::kN << "x"
<< Gemm::WarpShape::kK << ".txt";
std::ofstream file(fname.str());
*/
std::ofstream file("testbed_universal_errors.txt");
file
<< "problem: " << problem_size
<< ", alpha: " << alpha << ", beta: " << beta << "\n\n";
file
<< "A =\n" << tensor_A.host_view()
<< "\nB =\n" << tensor_B.host_view()
<< "\nC =\n" << tensor_C.host_view()
<< "\n\nReference =\n" << reference_D.host_view()
<< "\nComputed =\n" << tensor_D.host_view();
}
return passed;
}
/// Verifies the result is a GEMM
bool verify(
cutlass::gemm::GemmCoord problem_size,
ElementCompute alpha,
ElementCompute beta) {
//
// Verify
//
cutlass::reference::host::GemmComplex<
typename Gemm::ElementA, typename Gemm::LayoutA,
typename Gemm::ElementB, typename Gemm::LayoutB,
typename Gemm::ElementC, typename Gemm::LayoutC,
ElementCompute, ElementAccumulator
>(
problem_size,
alpha,
tensor_A.host_ref(),
Gemm::kTransformA,
tensor_B.host_ref(),
Gemm::kTransformB,
beta,
tensor_C.host_ref(),
reference_D.host_ref(),
ElementAccumulator(0)
);
if (Relu) {
for (int i = 0; i < problem_size.m(); ++i) {
for (int j = 0; j < problem_size.n(); ++j) {
reference_D.at(cutlass::MatrixCoord(i, j)) =
((ElementCompute)reference_D.at(cutlass::MatrixCoord(i, j)) < (ElementCompute)0)
? (typename Gemm::ElementC)0
: reference_D.at(cutlass::MatrixCoord(i, j));
}
}
}
return compare_reference(problem_size, alpha, beta);
}
/// Returns true if the CUDA device is sufficient to execute the kernel.
bool sufficient() const {
//
// Determine SMEM requirements and waive if not satisfied
//
size_t smem_size = sizeof(typename Gemm::GemmKernel::SharedStorage);
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.sharedMemPerBlockOptin < smem_size) {
return false;
}
return true;
}
/// Executes one test
bool run(
cutlass::gemm::GemmUniversalMode mode,
cutlass::gemm::GemmCoord problem_size,
int batch_count = 1,
ElementCompute alpha = ElementCompute(1),
ElementCompute beta = ElementCompute(0))
{
/*
std::cout << "\n-----------------------\n";
std::cout << "mode: " << (int) mode << "\n";
std::cout << "problem size: " << problem_size << "\n";
std::cout << "batch_count: " << batch_count << "\n";
std::cout << "alpha: " << alpha << "\n";
std::cout << "beta: " << beta << "\n";
std::cout << "-----------------------\n\n";
*/
// Waive test if insufficient CUDA device
if (!sufficient()) {
if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) {
std::cerr << "Test waived due to insufficient CUDA device." << std::endl;
}
return true;
}
this->initialize(problem_size);
//
// Initialize the GEMM operator
//
typename Gemm::Arguments arguments{
mode,
problem_size,
batch_count,
{alpha, beta},
tensor_A.device_data(),
tensor_B.device_data(),
tensor_C.device_data(),
tensor_D.device_data(),
problem_size.m() * problem_size.k(),
problem_size.n() * problem_size.k(),
problem_size.m() * problem_size.n(),
problem_size.m() * problem_size.n(),
tensor_A.layout().stride(0),
tensor_B.layout().stride(0),
tensor_C.layout().stride(0),
tensor_D.layout().stride(0)
};
Gemm gemm_op;
size_t workspace_size = Gemm::get_workspace_size(arguments);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
cutlass::Status status = gemm_op.initialize(arguments, workspace.get());
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
//
// Run the GEMM
//
status = gemm_op();
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
//
// Verify
//
bool passed = this->verify(problem_size, alpha, beta);
if (!passed) {
std::cout << "Failed with batch_count/split_k_slices = " << batch_count << std::endl;
}
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Gemm, bool Relu = false>
bool TestGemmUniversal(
cutlass::gemm::GemmCoord const & problem_size,
cutlass::gemm::GemmUniversalMode mode,
int batch_count,
double alpha = 1.0,
double beta = 2.0) {
bool passed = true;
TestbedUniversal<Gemm, Relu> testbed;
using ElementCompute = typename Gemm::EpilogueOutputOp::ElementCompute;
passed = testbed.run(
mode,
problem_size,
batch_count,
cutlass::from_real<ElementCompute>(alpha),
cutlass::from_real<ElementCompute>(beta)
);
return passed;
}
template <typename Gemm, bool Relu = false>
bool TestAllGemmUniversal() {
bool passed = true;
int const kMinimumOperandElementSize =
std::min(
int(cutlass::sizeof_bits<typename Gemm::ElementA>::value),
int(cutlass::sizeof_bits<typename Gemm::ElementB>::value));
int const kAlignment = cutlass::platform::is_same<
typename Gemm::OperatorClass,
cutlass::arch::OpClassSimt>::value ? 1 : 128 / kMinimumOperandElementSize;
// int8_t gemm alignment constraints
int const kAlignmentM = cutlass::platform::is_same<typename Gemm::OperatorClass, cutlass::arch::OpClassSimt>::value &&
cutlass::platform::is_same<typename Gemm::ElementA, int8_t>::value &&
cutlass::platform::is_same<typename Gemm::LayoutA, cutlass::layout::ColumnMajor>::value ? 4 : kAlignment;
int const kAlignmentN = cutlass::platform::is_same<typename Gemm::OperatorClass, cutlass::arch::OpClassSimt>::value &&
cutlass::platform::is_same<typename Gemm::ElementB, int8_t>::value &&
cutlass::platform::is_same<typename Gemm::LayoutB, cutlass::layout::RowMajor>::value ? 4 : kAlignment;
int const kAlignmentK = cutlass::platform::is_same<typename Gemm::OperatorClass, cutlass::arch::OpClassSimt>::value &&
cutlass::platform::is_same<typename Gemm::ElementA, int8_t>::value &&
cutlass::platform::is_same<typename Gemm::ElementB, int8_t>::value &&
(cutlass::platform::is_same<typename Gemm::LayoutA, cutlass::layout::RowMajor>::value ||
cutlass::platform::is_same<typename Gemm::LayoutB, cutlass::layout::ColumnMajor>::value) ? 4 : kAlignment;
cutlass::gemm::GemmUniversalMode modes[] = {
cutlass::gemm::GemmUniversalMode::kGemm,
};
int problem_size_m[] = {
kAlignmentM, 512 - 3*kAlignmentM
};
int problem_size_n[] = {
kAlignmentN, 512 - 2*kAlignmentN
};
int problem_size_k[] = {
kAlignmentK,
Gemm::ThreadblockShape::kK * Gemm::kStages - kAlignmentK,
Gemm::ThreadblockShape::kK * Gemm::kStages * 3 - kAlignmentK
};
int batch_counts[] = { // may be interpretted as batch count or split-K slices
1, 2, 3, 5, 7
};
double problem_alpha[] = {
1
};
double problem_beta[] = {
2.0
};
using ElementCompute = typename Gemm::EpilogueOutputOp::ElementCompute;
for (cutlass::gemm::GemmUniversalMode mode : modes) {
for (int m : problem_size_m) {
for (int n : problem_size_n) {
for (int k : problem_size_k) {
for (int batch_count : batch_counts) {
for (auto alpha : problem_alpha) {
for (auto beta : problem_beta) {
if (mode == cutlass::gemm::GemmUniversalMode::kGemm ||
mode == cutlass::gemm::GemmUniversalMode::kGemmSplitKParallel) {
// skip very small K problems
if (k / batch_count < 2 * Gemm::ThreadblockShape::kK) {
continue;
}
}
cutlass::gemm::GemmCoord problem_size(m, n, k);
TestbedUniversal<Gemm, Relu> testbed;
passed = testbed.run(
mode,
problem_size,
batch_count,
cutlass::from_real<ElementCompute>(alpha),
cutlass::from_real<ElementCompute>(beta)
);
if (!passed) {
return false;
}
}
}
}
}
}
}
}
/*
// large problem with high coverage
for (int split_k_slices = 1; split_k_slices <= 3; ++split_k_slices) {
TestbedUniversal<Gemm> testbed;
cutlass::gemm::GemmCoord problem_size(72, 56, 8192);
passed = testbed.run(
cutlass::gemm::GemmUniversalMode::kGemm,
problem_size,
split_k_slices,
cutlass::from_real<ElementCompute>(1.0),
cutlass::from_real<ElementCompute>(2.0)
);
if (!passed) {
break;
}
}
*/
return passed;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace gemm
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/gemm/device/testbed_universal.h/0 | {
"file_path": "test/unit/gemm/device/testbed_universal.h",
"repo_id": "test",
"token_count": 7089
} | 61 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for thread-level GEMM
*/
#include "../../common/cutlass_unit_test.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/half.h"
#include "cutlass/gemm/warp/default_mma_tensor_op.h"
#include "cutlass/core_io.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/gemm.h"
#include "testbed.h"
#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED)
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_f16, 128x128x32_64x64x32_16x8x16) {
using Shape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using Element = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 32> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_f16, 128x128x32_64x32x32_16x8x16) {
using Shape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using Element = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 32> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_f16, 128x128x32_32x32x32_16x8x16) {
using Shape = cutlass::gemm::GemmShape<32, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using Element = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 32> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_f16, 128x128x32_32x16x32_16x8x16) {
using Shape = cutlass::gemm::GemmShape<32, 16, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using Element = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 32> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_f16, 128x128x32_16x16x32_16x8x16) {
using Shape = cutlass::gemm::GemmShape<16, 16, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using Element = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 32> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_f16, 128x128x64_64x64x64_16x8x16) {
using Shape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using Element = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_f16, 128x128x64_64x32x64_16x8x16) {
using Shape = cutlass::gemm::GemmShape<64, 32, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using Element = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_f16, 128x128x64_32x32x64_16x8x16) {
using Shape = cutlass::gemm::GemmShape<32, 32, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using Element = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_f16, 128x128x64_32x16x64_16x8x16) {
using Shape = cutlass::gemm::GemmShape<32, 16, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using Element = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_f16, 128x128x64_16x16x64_16x8x16) {
using Shape = cutlass::gemm::GemmShape<16, 16, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using Element = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_tf32, 128x128x16_64x64x16_16x8x8) {
using Shape = cutlass::gemm::GemmShape<64, 64, 16>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::tfloat32_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 16>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 16>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 16> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_tf32, 128x128x16_64x32x16_16x8x8) {
using Shape = cutlass::gemm::GemmShape<64, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::tfloat32_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 16>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 16>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 16> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_tf32, 128x128x16_32x32x16_16x8x8) {
using Shape = cutlass::gemm::GemmShape<32, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::tfloat32_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 16>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 16>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 16> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_tf32, 128x128x16_32x16x16_16x8x8) {
using Shape = cutlass::gemm::GemmShape<32, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::tfloat32_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 16>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 16>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 16> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_tf32, 128x128x16_16x16x16_16x8x8) {
using Shape = cutlass::gemm::GemmShape<16, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::tfloat32_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 16>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 16>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 16> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_tf32, 128x128x32_64x64x32_16x8x8) {
using Shape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::tfloat32_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 32> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_tf32, 128x128x32_64x32x32_16x8x8) {
using Shape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::tfloat32_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 32> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_tf32, 128x128x32_32x32x32_16x8x8) {
using Shape = cutlass::gemm::GemmShape<32, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::tfloat32_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 32> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_tf32, 128x128x32_32x16x32_16x8x8) {
using Shape = cutlass::gemm::GemmShape<32, 16, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::tfloat32_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 32> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_tf32, 128x128x32_16x16x32_16x8x8) {
using Shape = cutlass::gemm::GemmShape<16, 16, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::tfloat32_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 32> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_congruous_f16, 128x128x32_64x64x32_16x8x16) {
using Shape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using Element = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 32> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_congruous_f16, 128x128x32_32x32x32_16x8x16) {
using Shape = cutlass::gemm::GemmShape<32, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using Element = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 32> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_congruous_f16, 16x16x32_16x16x32_16x8x16) {
using Shape = cutlass::gemm::GemmShape<16, 16, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using Element = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 16>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 16>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<16, 16, 32> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_congruous_f16, 32x32x32_32x32x32_16x8x16) {
using Shape = cutlass::gemm::GemmShape<32, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using Element = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<32, 32, 32> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_congruous_f16, 128x128x64_64x64x64_16x8x16) {
using Shape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using Element = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_congruous_f16, 128x128x64_32x32x64_16x8x16) {
using Shape = cutlass::gemm::GemmShape<32, 32, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using Element = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_congruous_tf32, 128x128x16_64x64x16_16x8x8) {
using Shape = cutlass::gemm::GemmShape<64, 64, 16>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::tfloat32_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 16> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_congruous_tf32, 128x128x16_32x32x16_16x8x8) {
using Shape = cutlass::gemm::GemmShape<32, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::tfloat32_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 16> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_congruous_tf32, 128x128x32_64x64x32_16x8x8) {
using Shape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::tfloat32_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 32> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_congruous_tf32, 128x128x32_32x32x32_16x8x8) {
using Shape = cutlass::gemm::GemmShape<32, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::tfloat32_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 32> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_tn, tf32_round_128x128x32_64x64x32_16x8x8) {
using Shape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = float;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type;
test::gemm::warp::TransformTestbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 32> >()
.run();
}
TEST(SM80_warp_gemm_tensor_op_nt, tf32_round_128x128x32_64x64x32_16x8x8) {
using Shape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = float;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type;
test::gemm::warp::TransformTestbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 32> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_interleaved, 128x128x64_16x16x64_16x8x16) {
using Shape = cutlass::gemm::GemmShape<16, 16, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using Element = int8_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_interleaved, 128x128x64_32x16x64_16x8x16) {
using Shape = cutlass::gemm::GemmShape<32, 16, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using Element = int8_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_interleaved, 128x128x64_32x32x64_16x8x16) {
using Shape = cutlass::gemm::GemmShape<32, 32, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using Element = int8_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_interleaved, 128x128x64_64x32x64_16x8x16) {
using Shape = cutlass::gemm::GemmShape<64, 32, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using Element = int8_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_interleaved, 128x128x64_64x64x64_16x8x16) {
using Shape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using Element = int8_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_interleaved, 128x128x64_16x16x64_16x8x32) {
using Shape = cutlass::gemm::GemmShape<16, 16, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
using Element = int8_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_interleaved, 128x128x64_32x16x64_16x8x32) {
using Shape = cutlass::gemm::GemmShape<32, 16, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
using Element = int8_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_interleaved, 128x128x64_32x32x64_16x8x32) {
using Shape = cutlass::gemm::GemmShape<32, 32, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
using Element = int8_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_interleaved, 128x128x64_64x32x64_16x8x32) {
using Shape = cutlass::gemm::GemmShape<64, 32, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
using Element = int8_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_interleaved, 128x128x64_64x64x64_16x8x32) {
using Shape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
using Element = int8_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_i8, 128x128x64_64x64x64_16x8x32) {
using Shape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
using Element = int8_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_i8, 128x128x64_64x32x64_16x8x32) {
using Shape = cutlass::gemm::GemmShape<64, 32, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
using Element = int8_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_i8, 128x128x64_32x32x64_16x8x32) {
using Shape = cutlass::gemm::GemmShape<32, 32, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
using Element = int8_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_i8, 128x128x64_32x16x64_16x8x32) {
using Shape = cutlass::gemm::GemmShape<32, 16, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
using Element = int8_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_i8, 128x128x64_16x16x64_16x8x32) {
using Shape = cutlass::gemm::GemmShape<16, 16, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
using Element = int8_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_i8, 128x128x128_64x64x128_16x8x32) {
using Shape = cutlass::gemm::GemmShape<64, 64, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
using Element = int8_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 128>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 128>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 128> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_i8, 128x128x128_64x32x128_16x8x32) {
using Shape = cutlass::gemm::GemmShape<64, 32, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
using Element = int8_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 128>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 128>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 128> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_i8, 128x128x128_32x32x128_16x8x32) {
using Shape = cutlass::gemm::GemmShape<32, 32, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
using Element = int8_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 128>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 128>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 128> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_i8, 128x128x128_32x16x128_16x8x32) {
using Shape = cutlass::gemm::GemmShape<32, 16, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
using Element = int8_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 128>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 128>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 128> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_i8, 128x128x128_16x16x128_16x8x32) {
using Shape = cutlass::gemm::GemmShape<16, 16, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
using Element = int8_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 128>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 128>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 128> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_i4, 128x128x128_64x64x128_16x8x64) {
using Shape = cutlass::gemm::GemmShape<64, 64, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
using Element = cutlass::int4b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 128>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 128>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 128> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_i4, 128x128x128_64x32x128_16x8x64) {
using Shape = cutlass::gemm::GemmShape<64, 32, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
using Element = cutlass::int4b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 128>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 128>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 128> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_i4, 128x128x128_32x32x128_16x8x64) {
using Shape = cutlass::gemm::GemmShape<32, 32, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
using Element = cutlass::int4b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 128>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 128>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 128> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_i4, 128x128x128_32x16x128_16x8x64) {
using Shape = cutlass::gemm::GemmShape<32, 16, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
using Element = cutlass::int4b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 128>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 128>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 128> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_i4, 128x128x128_16x16x128_16x8x64) {
using Shape = cutlass::gemm::GemmShape<16, 16, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
using Element = cutlass::int4b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 128>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 128>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 128> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_i4, 128x128x256_64x64x256_16x8x64) {
using Shape = cutlass::gemm::GemmShape<64, 64, 256>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
using Element = cutlass::int4b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 256>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 256>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 256> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_i4, 128x128x256_64x32x256_16x8x64) {
using Shape = cutlass::gemm::GemmShape<64, 32, 256>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
using Element = cutlass::int4b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 256>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 256>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 256> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_i4, 128x128x256_32x32x256_16x8x64) {
using Shape = cutlass::gemm::GemmShape<32, 32, 256>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
using Element = cutlass::int4b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 256>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 256>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 256> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_i4, 128x128x256_32x16x256_16x8x64) {
using Shape = cutlass::gemm::GemmShape<32, 16, 256>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
using Element = cutlass::int4b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 256>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 256>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 256> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_i4, 128x128x256_16x16x256_16x8x64) {
using Shape = cutlass::gemm::GemmShape<16, 16, 256>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
using Element = cutlass::int4b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 256>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 256>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 256> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_b1, 128x128x512_64x64x512_16x8x256) {
using Shape = cutlass::gemm::GemmShape<64, 64, 512>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
using Element = cutlass::uint1b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 512>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 512>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 512> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_b1, 128x128x512_64x32x512_16x8x256) {
using Shape = cutlass::gemm::GemmShape<64, 32, 512>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
using Element = cutlass::uint1b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 512>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 512>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 512> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_b1, 128x128x512_32x32x512_16x8x256) {
using Shape = cutlass::gemm::GemmShape<32, 32, 512>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
using Element = cutlass::uint1b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 512>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 512>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 512> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_b1, 128x128x512_32x16x512_16x8x256) {
using Shape = cutlass::gemm::GemmShape<32, 16, 512>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
using Element = cutlass::uint1b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 512>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 512>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 512> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_b1, 128x128x512_16x16x512_16x8x256) {
using Shape = cutlass::gemm::GemmShape<16, 16, 512>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
using Element = cutlass::uint1b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 512>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 512>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 512> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_b1, 128x128x1024_64x64x1024_16x8x256) {
using Shape = cutlass::gemm::GemmShape<64, 64, 1024>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
using Element = cutlass::uint1b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 1024>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 1024>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 1024> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_b1, 128x128x1024_64x32x1024_16x8x256) {
using Shape = cutlass::gemm::GemmShape<64, 32, 1024>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
using Element = cutlass::uint1b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 1024>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 1024>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 1024> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_b1, 128x128x1024_32x32x1024_16x8x256) {
using Shape = cutlass::gemm::GemmShape<32, 32, 1024>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
using Element = cutlass::uint1b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 1024>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 1024>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 1024> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_b1, 128x128x1024_32x16x1024_16x8x256) {
using Shape = cutlass::gemm::GemmShape<32, 16, 1024>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
using Element = cutlass::uint1b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 1024>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 1024>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 1024> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_b1, 128x128x1024_16x16x1024_16x8x256) {
using Shape = cutlass::gemm::GemmShape<16, 16, 1024>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
using Element = cutlass::uint1b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 1024>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 1024>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 1024> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_congruous_f64, 16x16x4_16x16x4_8x8x4) {
using Shape = cutlass::gemm::GemmShape<16, 16, 4>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Element = double;
using ElementC = double;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<16, 16, 4> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_congruous_f64, 32x16x4_32x16x4_8x8x4) {
using Shape = cutlass::gemm::GemmShape<32, 16, 4>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Element = double;
using ElementC = double;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<32, 16, 4> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_congruous_f64, 32x32x4_32x32x4_8x8x4) {
using Shape = cutlass::gemm::GemmShape<32, 32, 4>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Element = double;
using ElementC = double;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<32, 32, 4> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_congruous_f64, 32x64x4_32x64x4_8x8x4) {
using Shape = cutlass::gemm::GemmShape<32, 64, 4>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Element = double;
using ElementC = double;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<32, 64, 4> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_f64, 16x16x16_16x16x16_8x8x4) {
using Shape = cutlass::gemm::GemmShape<16, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Element = double;
using ElementC = double;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicand64bCrosswise;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicand64bCrosswise;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<16, 16, 16> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_f64, 32x32x16_32x32x16_8x8x4) {
using Shape = cutlass::gemm::GemmShape<32, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Element = double;
using ElementC = double;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicand64bCrosswise;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicand64bCrosswise;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<32, 32, 16> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_f64, 64x32x16_64x32x16_8x8x4) {
using Shape = cutlass::gemm::GemmShape<64, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Element = double;
using ElementC = double;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicand64bCrosswise;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicand64bCrosswise;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<64, 32, 16> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_f64, 32x64x16_32x64x16_8x8x4) {
using Shape = cutlass::gemm::GemmShape<32, 64, 16>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Element = double;
using ElementC = double;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicand64bCrosswise;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicand64bCrosswise;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<32, 64, 16> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_interleaved, 128x128x128_16x16x128_16x8x64) {
using Shape = cutlass::gemm::GemmShape<16, 16, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
using Element = cutlass::int4b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 128> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_interleaved, 128x128x128_32x16x128_16x8x64) {
using Shape = cutlass::gemm::GemmShape<32, 16, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
using Element = cutlass::int4b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 128> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_interleaved, 128x128x128_32x32x128_16x8x64) {
using Shape = cutlass::gemm::GemmShape<32, 32, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
using Element = cutlass::int4b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 128> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_interleaved, 128x128x128_64x32x128_16x8x64) {
using Shape = cutlass::gemm::GemmShape<64, 32, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
using Element = cutlass::int4b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 128> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_interleaved, 128x128x128_64x64x128_16x8x64) {
using Shape = cutlass::gemm::GemmShape<64, 64, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
using Element = cutlass::int4b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 128> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_canonical_f64_row_col, 32x32x8_64x32x8_8x8x4) {
using Shape = cutlass::gemm::GemmShape<32, 32, 4>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Element = double;
using ElementC = double;
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<32, 32, 8> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_canonical_f64_col_row, 32x32x8_64x32x8_8x8x4) {
using Shape = cutlass::gemm::GemmShape<32, 32, 4>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Element = double;
using ElementC = double;
using LayoutA = cutlass::layout::ColumnMajor;
using LayoutB = cutlass::layout::RowMajor;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<32, 32, 8> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_canonical_tf32_row_col, 32x32x8_64x32x8_8x8x4) {
using Shape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::tfloat32_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<32, 32, 16> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_canonical_tf32_col_row, 32x32x8_64x32x8_8x8x4) {
using Shape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::tfloat32_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajor;
using LayoutB = cutlass::layout::RowMajor;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<32, 32, 16> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
#endif // if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED)
| test/unit/gemm/warp/gemm_sm80.cu/0 | {
"file_path": "test/unit/gemm/warp/gemm_sm80.cu",
"repo_id": "test",
"token_count": 32133
} | 62 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
typedef char int8_t;
typedef unsigned char uint8_t;
typedef short int16_t;
typedef unsigned short uint16_t;
typedef int int32_t;
typedef unsigned int uint32_t;
typedef long long int int64_t;
typedef unsigned long long int uint64_t;
#if defined __x86_64__ && !defined __ILP32__
# define __WORDSIZE 64
#else
# define __WORDSIZE 32
#endif
/* Small types. */
/* Signed. */
typedef signed char int_least8_t;
typedef short int int_least16_t;
typedef int int_least32_t;
#if __WORDSIZE == 64
typedef long int int_least64_t;
#else
__extension__
typedef long long int int_least64_t;
#endif
/* Unsigned. */
typedef unsigned char uint_least8_t;
typedef unsigned short int uint_least16_t;
typedef unsigned int uint_least32_t;
#if __WORDSIZE == 64
typedef unsigned long int uint_least64_t;
#else
__extension__
typedef unsigned long long int uint_least64_t;
#endif
/* Fast types. */
/* Signed. */
typedef signed char int_fast8_t;
#if __WORDSIZE == 64
typedef long int int_fast16_t;
typedef long int int_fast32_t;
typedef long int int_fast64_t;
#else
typedef int int_fast16_t;
typedef int int_fast32_t;
__extension__
typedef long long int int_fast64_t;
#endif
/* Unsigned. */
typedef unsigned char uint_fast8_t;
#if __WORDSIZE == 64
typedef unsigned long int uint_fast16_t;
typedef unsigned long int uint_fast32_t;
typedef unsigned long int uint_fast64_t;
#else
typedef unsigned int uint_fast16_t;
typedef unsigned int uint_fast32_t;
__extension__
typedef unsigned long long int uint_fast64_t;
#endif
/* Types for `void *' pointers. */
#if __WORDSIZE == 64
# ifndef __intptr_t_defined
typedef long int intptr_t;
# define __intptr_t_defined
# endif
typedef unsigned long int uintptr_t;
#else
# ifndef __intptr_t_defined
typedef int intptr_t;
# define __intptr_t_defined
# endif
typedef unsigned int uintptr_t;
#endif
/* Largest integral types. */
#if __WORDSIZE == 64
typedef long int intmax_t;
typedef unsigned long int uintmax_t;
#else
__extension__
typedef long long int intmax_t;
__extension__
typedef unsigned long long int uintmax_t;
#endif
| test/unit/nvrtc/stdlib/stdint.h/0 | {
"file_path": "test/unit/nvrtc/stdlib/stdint.h",
"repo_id": "test",
"token_count": 1610
} | 63 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for TensorReduce family of device-wide operators
*/
#include <iostream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/cutlass.h"
#include "cutlass/complex.h"
#include "cutlass/reduction/thread/reduction_operators.h"
#include "cutlass/reduction/device/tensor_reduce.h"
#include "cutlass/functional.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/device/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/tensor_view_io.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
/// This reduces the C dimension, transforming an NHWC tensor into NHWC with C=1.
template <typename TensorReduction, typename ElementCompute = typename TensorReduction::ElementCompute>
bool TestAllReduction_NHWC_reduce_c(ElementCompute reduction_identity = ElementCompute()) {
using Layout = typename TensorReduction::Layout;
using ElementOutput = typename TensorReduction::ElementOutput;
using ElementSource = typename TensorReduction::ElementSource;
int const kV = TensorReduction::kVectorLength;
int const N_indices[] = {3, 13};
int const H_indices[] = {5, 17};
int const W_indices[] = {7, 19};
int const C_indices[] = {2049, 2048, 2047, 384, 64, 48, 32, 24, 16, 12, 8, 6, 4, 3, 2, 1};
for (int N : N_indices) {
for (int H : H_indices) {
for (int W : W_indices) {
for (int Cx : C_indices) {
int C = Cx * kV;
cutlass::HostTensor<ElementSource, Layout> src_tensor({N, H, W, C});
cutlass::HostTensor<ElementOutput, Layout> dst_tensor({N, H, W, 1});
cutlass::reference::host::TensorFillRandomUniform(
src_tensor.host_view(), 17, 10, -10, 0);
dst_tensor.sync_device();
src_tensor.sync_device();
// Execute a tensor reduction over rank 3 (the 'C' dimension is reduced; NHWC => NHW)
TensorReduction reduction(src_tensor.extent(), 3);
cutlass::DeviceAllocation<uint8_t> device_workspace(reduction.workspace_size());
cutlass::Status status = reduction.reduce(
dst_tensor.device_ref(),
src_tensor.device_ref(),
device_workspace.get(),
reduction_identity
);
EXPECT_EQ(status, cutlass::Status::kSuccess);
EXPECT_EQ(cudaDeviceSynchronize(), cudaSuccess);
dst_tensor.sync_host();
typename TensorReduction::ReductionOp reduction_op;
//
// Reference check
//
for (int n = 0; n < src_tensor.extent().n(); ++n) {
for (int h = 0; h < src_tensor.extent().h(); ++h) {
for (int w = 0; w < src_tensor.extent().w(); ++w) {
ElementCompute c_accum = reduction_identity;
for (int c = 0; c < src_tensor.extent().c(); ++c) {
c_accum = reduction_op(c_accum, ElementCompute(src_tensor.at({n, h, w, c})));
}
ElementCompute got = ElementCompute(dst_tensor.at({n, h, w, 0}));
bool equal = (c_accum == got);
EXPECT_TRUE(equal);
if (!equal) {
std::cerr
<< "Error at location (" << n << ", " << h << ", " << w << ", 0)" << std::endl;
std::cerr
<< " expected: " << c_accum << std::endl
<< " got: " << got << std::endl;
std::cerr
<< "Problem: " << src_tensor.extent() << " -> "
<< dst_tensor.extent() << std::endl;
std::cerr
<< " Grid: " << reduction.reduction_strided.grid_shape
<< "\n Block: " << reduction.reduction_strided.threadblock_shape << std::endl
<< " FInal: " << reduction.reduction_strided.grid_final
<< "\n Block: " << reduction.reduction_strided.threadblock_final << "\n";
return false;
}
} //w
} // h
} // n
//
// Next problem
//
} // C
} // W
} // H
} // N
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_reduce_c_f32x1) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = float;
using ElementCompute = float;
int const kV = 1;
// Define the functor
using Functor = cutlass::plus<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_reduce_c_f32x1_f16x1) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = cutlass::half_t;
using ElementCompute = float;
int const kV = 1;
// Define the functor
using Functor = cutlass::plus<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_reduce_c_f32x2) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = float;
using ElementCompute = float;
int const kV = 2;
// Define the functor
using Functor = cutlass::plus<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_reduce_c_f32x2_f16x2) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = cutlass::half_t;
using ElementCompute = float;
int const kV = 2;
// Define the functor
using Functor = cutlass::plus<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_reduce_c_f32x4) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = float;
using ElementCompute = float;
int const kV = 4;
// Define the functor
using Functor = cutlass::plus<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_reduce_c_f32x4_f16x4) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = cutlass::half_t;
using ElementCompute = float;
int const kV = 4;
// Define the functor
using Functor = cutlass::plus<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_maximum_c_f32x4) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = float;
using ElementCompute = float;
int const kV = 4;
// Define the functor
using Functor = cutlass::maximum<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>( -std::numeric_limits<float>::max() ));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_minimum_c_f32x4) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = float;
using ElementCompute = float;
int const kV = 4;
// Define the functor
using Functor = cutlass::minimum<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>( std::numeric_limits<float>::max() ));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_ANY_c_s32) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = int;
using ElementSource = int;
using ElementCompute = int;
int const kV = 1;
// Define the functor
using Functor = cutlass::logical_or<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>( ElementCompute(0) ));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_ALL_c_s32) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = int;
using ElementSource = int;
using ElementCompute = int;
int const kV = 1;
// Define the functor
using Functor = cutlass::logical_and<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>( ElementCompute(1) ));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_ANY_c_f32) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = float;
using ElementCompute = float;
int const kV = 1;
// Define the functor
using Functor = cutlass::logical_or<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>( ElementCompute(0) ));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_ALL_c_f32) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = float;
using ElementCompute = float;
int const kV = 1;
// Define the functor
using Functor = cutlass::logical_and<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>( ElementCompute(1) ));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/reduction/device/tensor_reduce_contiguous.cu/0 | {
"file_path": "test/unit/reduction/device/tensor_reduce_contiguous.cu",
"repo_id": "test",
"token_count": 5294
} | 64 |
/***************************************************************************************************
* Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Defines operations for all CONV operation kinds in CUTLASS Library.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/library/library.h"
#include "library_internal.h"
#include "cutlass/conv/convnd_problem_shape.hpp"
#include "cutlass/util/packed_stride.hpp"
#include "cutlass/detail/dependent_false.hpp"
#include "cutlass/trace.h"
#include <utility>
#include <variant>
#if defined(CUTLASS_DEBUG_TRACE_LEVEL)
#include <sstream>
#endif
namespace cutlass::library {
namespace detail {
template<class ValueType, size_t ... Indices>
constexpr cute::array<ValueType, 1u + sizeof...(Indices)>
vector_to_array_strides_helper(const std::vector<ValueType>& v,
std::index_sequence<Indices...>)
{
return {v[(sizeof...(Indices) - 1u) - Indices]..., ValueType(1)};
}
template<class ValueType, size_t Size>
cute::array<ValueType, Size>
vector_to_array_strides(const std::vector<ValueType>& v, std::integral_constant<size_t, Size>)
{
static_assert(Size != 0);
CUTLASS_ASSERT(v.size() + 1u == Size);
return vector_to_array_strides_helper(v, std::make_index_sequence<Size - 1u>{});
}
template<class Index, class LongIndex, size_t ... Indices>
constexpr cute::array<int64_t, 1u + sizeof...(Indices)>
coord_to_array_strides_helper(
const ::cutlass::Coord<int(sizeof...(Indices)), Index, LongIndex> coord,
std::index_sequence<Indices...>)
{
return {int64_t(coord[(sizeof...(Indices) - 1u) - Indices])..., int64_t(1)};
}
template<int Rank, class Index, class LongIndex>
cute::array<int64_t, 1u + size_t(Rank)>
coord_to_array_strides(const ::cutlass::Coord<Rank, Index, LongIndex>& coord)
{
static_assert(Rank >= 0);
return coord_to_array_strides_helper(coord, std::make_index_sequence<Rank>{});
}
} // namespace detail
// Tells the profiler about CUTLASS 3's 2-D and 3-D convolutions.
// For CUTLASS 2's 2-D convolutions, see Conv2dOperation.
// For CUTLASS 2's 3-D convolutions, see Conv3dOperation.
template<class Operator_>
class ConvOperation3x : public Operation {
public:
using Operator = Operator_;
static_assert(Operator::NumSpatialDimensions == 2 ||
Operator::NumSpatialDimensions == 3,
"The profiler currently only supports convolutions with 2 or 3 spatial dimensions.");
using LayoutA = cute::conditional_t<Operator::NumSpatialDimensions == 3,
cutlass::layout::TensorNDHWC,
cute::conditional_t<Operator::NumSpatialDimensions == 2,
cutlass::layout::TensorNHWC,
cutlass::layout::TensorNWC>
>;
using LayoutB = LayoutA;
using LayoutC = LayoutA;
using ElementA = typename Operator::ElementA;
using ElementB = typename Operator::ElementB;
using ElementC = typename Operator::ElementC;
using ElementD = typename Operator::ElementD;
using ElementAccumulator = typename Operator::ElementAccumulator;
using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute;
static cutlass::conv::Operator const kConvolutionalOperator = Operator::kConvolutionalOperator;
ConvOperation3x(const char* name = "unknown_cutlass_3_conv") {
// Initialize OperationDescription (the base class)
description_.name = name;
description_.provider = Provider::kCUTLASS;
if constexpr (Operator::NumSpatialDimensions == 2) {
description_.kind = OperationKind::kConv2d;
}
else if constexpr (Operator::NumSpatialDimensions == 3) {
description_.kind = OperationKind::kConv3d;
}
else {
static_assert(::cutlass::detail::dependent_false<Operator>,
"This class currently only supports 2-D and 3-D convolutions.");
}
description_.tile_description.threadblock_shape = make_Coord(
Operator::ThreadblockShape::kM,
Operator::ThreadblockShape::kN,
Operator::ThreadblockShape::kK);
description_.tile_description.threadblock_stages = Operator::kStages;
description_.tile_description.warp_count = make_Coord(
Operator::WarpCount::kM,
Operator::WarpCount::kN,
Operator::WarpCount::kK);
description_.tile_description.math_instruction.instruction_shape = make_Coord(
Operator::InstructionShape::kM,
Operator::InstructionShape::kN,
Operator::InstructionShape::kK);
description_.tile_description.math_instruction.element_accumulator =
NumericTypeMap<ElementAccumulator>::kId;
description_.tile_description.math_instruction.opcode_class =
OpcodeClassMap<typename Operator::OperatorClass>::kId;
description_.tile_description.math_instruction.math_operation =
MathOperationID::kMultiplyAdd;
description_.tile_description.minimum_compute_capability =
ArchMap<typename Operator::ArchTag, typename Operator::OperatorClass>::kMin;
description_.tile_description.maximum_compute_capability =
ArchMap<typename Operator::ArchTag, typename Operator::OperatorClass>::kMax;
// Initialize ConvDescription (the subclass)
// kConvDim does not exist in Operator for CUTLASS 3 convolutions.
// For CUTLASS 2 convolutions, it is the number of spatial dimensions.
description_.conv_dim = Operator::NumSpatialDimensions;
description_.conv_kind = ConvKindMap<kConvolutionalOperator>::kId;
description_.iterator_algorithm = {};
description_.A = make_TensorDescription<ElementA, LayoutA>();
description_.B = make_TensorDescription<ElementB, LayoutB>();
description_.C = make_TensorDescription<ElementC, LayoutC>();
description_.element_epilogue = NumericTypeMap<ElementCompute>::kId;
}
~ConvOperation3x() override = default;
OperationDescription const& description() const override {
return static_cast<OperationDescription const&>(description_);
}
private:
Status update_operator_arguments_from_configuration_2d_or_3d(
typename Operator::Arguments& out_args,
void const* configuration) const {
Status status = Status::kInvalid;
CUTLASS_ASSERT(configuration != nullptr);
if constexpr (Operator::NumSpatialDimensions == 2) {
CUTLASS_ASSERT(description_.kind == OperationKind::kConv2d);
// tools/library/include/cutlass/library/library.h
// defines Conv2dConfiguration.
// tools/profiler/include/cutlass/profiler/conv2d_operation_profiler.h
// uses Conv2dConfiguration.
auto* conf_ptr = reinterpret_cast<Conv2dConfiguration const*>(configuration);
status = update_operator_arguments_from_configuration(out_args, *conf_ptr);
}
else if constexpr (Operator::NumSpatialDimensions == 3) {
CUTLASS_ASSERT(description_.kind == OperationKind::kConv3d);
auto* conf_ptr = reinterpret_cast<Conv3dConfiguration const*>(configuration);
status = update_operator_arguments_from_configuration(out_args, *conf_ptr);
}
else {
static_assert(::cutlass::detail::dependent_false<Operator>,
"This class currently only supports 2-D and 3-D convolutions.");
}
return status;
}
public:
Status can_implement(
void const* configuration,
void const* arguments) const override {
Status status = Status::kInvalid;
// gemm_operation_3x.hpp accesses "configuration" as
// GemmUniversalConfiguration (which lives in
// tools/library/include/cutlass/library/library.h) and
// "arguments" as GemmUniversalArguments (which lives in
// tools/library/include/cutlass/library/library.h).
// Those things don't apply to convolutions.
// Despite the existence of ConvUniversal, there's no
// corresponding "ConvUniversalConfiguration" or
// "ConvUniversalArguments."
CUTLASS_ASSERT(configuration != nullptr);
CUTLASS_ASSERT(arguments != nullptr);
typename Operator::Arguments out_args{};
status = update_operator_arguments_from_configuration_2d_or_3d(out_args, configuration);
if (status != Status::kSuccess) {
return status;
}
auto* in_args_ptr = reinterpret_cast<ConvArguments const*>(arguments);
status = update_operator_arguments_from_arguments(out_args, *in_args_ptr);
if (status != Status::kSuccess) {
return status;
}
return Operator::can_implement(out_args);
}
uint64_t get_host_workspace_size(void const* /* configuration */) const override {
return sizeof(Operator);
}
uint64_t get_device_workspace_size(
void const* configuration,
void const* arguments = nullptr) const override
{
// This presumes that at least one of configuration or arguments is nonnull.
Status status = Status::kInvalid;
// gemm_operation_3x.hpp has get_device_workspace_size return 0 on
// error. It's not clear that this is what we want -- perhaps we
// should return something like expected<uint64_t, Status>? -- but
// it's the only option that preserves the current interface.
constexpr uint64_t error_indication = 0;
typename Operator::Arguments out_args{};
if (configuration != nullptr) {
status = update_operator_arguments_from_configuration_2d_or_3d(out_args, configuration);
if (status != Status::kSuccess) {
return error_indication;
}
}
if (arguments != nullptr) {
auto* in_args_ptr = reinterpret_cast<ConvArguments const*>(arguments);
status = update_operator_arguments_from_arguments(out_args, *in_args_ptr);
if (status != Status::kSuccess) {
return error_indication;
}
}
if (status == Status::kSuccess) {
return static_cast<uint64_t>(Operator::get_workspace_size(out_args));
}
else {
return error_indication;
}
}
Status initialize(
void const* configuration,
void* host_workspace,
void* /* device_workspace */ = nullptr,
cudaStream_t stream = nullptr) const override
{
Status status = Status::kInvalid;
if (configuration == nullptr) {
CUTLASS_TRACE_HOST("Input configuration is null.");
return Status::kInvalid;
}
typename Operator::Arguments out_args{};
status = update_operator_arguments_from_configuration_2d_or_3d(out_args, configuration);
if (status != Status::kSuccess) {
// Any kind of failure invalidates the last successful configuration.
clear_last_successful_config();
return status;
}
else {
set_last_successful_config(configuration);
}
if (host_workspace == nullptr) {
CUTLASS_TRACE_HOST("host_workspace is null.");
return Status::kInvalid;
}
(void) new (host_workspace) Operator;
return status;
// CUTLASS 2 convolutions call the Operator's initialize function
// here, like this.
//
//return op->initialize(args, device_workspace, stream);
//
// CUTLASS 3 convolutions (ConvUniversal), like CUTLASS 3 Gemms
// (GemmUniversal), lack an "initialize" member function.
}
Status run(
void const* arguments,
void* host_workspace,
void* device_workspace = nullptr,
cudaStream_t stream = nullptr) const override
{
auto status = Status::kInvalid;
// The Operator doesn't appear to save the last configuration (it
// doesn't have a way to do that, since it lacks an initialize()
// member function), so we have to use the stored configuration
// from the last successful initialize() call (if any).
typename Operator::Arguments out_args{};
status = update_operator_arguments_from_stored_configuration(out_args);
if (status != Status::kSuccess) {
CUTLASS_TRACE_HOST("Updating from previous successful configuration failed.");
return status;
}
if (arguments == nullptr) {
CUTLASS_TRACE_HOST("Input argument 'arguments' is null.");
return Status::kInvalid;
}
auto* in_args_ptr = reinterpret_cast<ConvArguments const*>(arguments);
status = update_operator_arguments_from_arguments(out_args, *in_args_ptr);
if (status != Status::kSuccess) {
return status;
}
auto* op = reinterpret_cast<Operator*>(host_workspace);
return op->run(out_args, device_workspace, stream);
}
private:
ConvDescription description_;
// Result of initialize() calling
// update_operator_arguments_from_configuration() successfully.
// This is needed because run() doesn't take a configuration, just
// arguments, and the kernel doesn't appear to save the
// configuration from the last initialize() call.
//
// Unfortunately, this must be declared mutable, because it must be
// set in initialize(), and initialize() is inherited as const.
mutable std::variant<
std::monostate,
Conv2dConfiguration,
Conv3dConfiguration> last_successful_config_{std::monostate{}};
// Clear the last configuration resulting from a successful initialize() call.
//
// Unfortunately, this must be declared const, because initialize() is.
void clear_last_successful_config() const {
last_successful_config_ = std::monostate{};
}
// Set the last configuration resulting from a successful initialize() call.
//
// Unfortunately, this must be declared const, because initialize() is.
void set_last_successful_config(void const* configuration) const {
CUTLASS_ASSERT(configuration != nullptr);
if constexpr (Operator::NumSpatialDimensions == 2) {
CUTLASS_ASSERT(description_.kind == OperationKind::kConv2d);
auto* conf_ptr = reinterpret_cast<Conv2dConfiguration const*>(configuration);
last_successful_config_ = *conf_ptr;
} else if constexpr (Operator::NumSpatialDimensions == 3) {
CUTLASS_ASSERT(description_.kind == OperationKind::kConv3d);
auto* conf_ptr = reinterpret_cast<Conv3dConfiguration const*>(configuration);
last_successful_config_ = *conf_ptr;
}
else {
static_assert(::cutlass::detail::dependent_false<Operator>,
"This class currently only supports 2-D and 3-D convolutions.");
}
}
// Whether a configuration from a successful initialize() call exists.
bool last_successful_config_exists() const {
return not std::holds_alternative<std::monostate>(last_successful_config_);
}
// Visitor for update_operator_arguments_from_stored_configuration.
struct ConfigurationVisitor {
typename Operator::Arguments& out_args;
Status operator() (std::monostate const&) const {
CUTLASS_TRACE_HOST("No successful previous configuration exists. "
"One cause is calling run() before a successful initialize() call.");
return Status::kInvalid;
}
Status operator() (Conv2dConfiguration const& conf2d) const {
return update_operator_arguments_from_configuration(out_args, conf2d);
}
Status operator() (Conv3dConfiguration const& conf3d) const {
return update_operator_arguments_from_configuration(out_args, conf3d);
}
};
// Like update_operator_arguments_from_configuration, but on the
// stored configuration from the last successful initialize() call,
// if any. If there was no last successful initialize() call,
// then return Status::kInvalid.
//
// Unfortunately, this must be declared const, because run() is.
Status update_operator_arguments_from_stored_configuration(
typename Operator::Arguments& out_args) const
{
return std::visit(ConfigurationVisitor{out_args}, last_successful_config_);
}
template<class FusionArgs, class = void>
struct UpdateFusionArgs {
static Status update_(
FusionArgs const&,
ConvArguments const&)
{
// For custom EVT, it is the user's responsibility to ensure
// that alpha and beta are updated appropriately.
return Status::kSuccess;
}
};
template<class FusionArgs>
struct UpdateFusionArgs<FusionArgs, cute::void_t<decltype(FusionArgs{}.alpha)>> {
static Status update_(
FusionArgs& fusion_args,
ConvArguments const& arguments)
{
if (arguments.pointer_mode == ScalarPointerMode::kHost) {
fusion_args.alpha = *static_cast<ElementCompute const *>(arguments.alpha);
fusion_args.beta = *static_cast<ElementCompute const *>(arguments.beta);
fusion_args.alpha_ptr = nullptr;
fusion_args.beta_ptr = nullptr;
return Status::kSuccess;
}
else if (arguments.pointer_mode == ScalarPointerMode::kDevice) {
fusion_args.alpha = 0;
fusion_args.beta = 0;
fusion_args.alpha_ptr = static_cast<ElementCompute const *>(arguments.alpha);
fusion_args.beta_ptr = static_cast<ElementCompute const *>(arguments.beta);
return Status::kSuccess;
}
else {
return Status::kErrorInvalidProblem;
}
}
};
static Status update_operator_arguments_from_configuration(
typename Operator::Arguments& out_args,
Conv2dConfiguration const& config)
{
using detail::vector_to_array_strides;
constexpr int num_spatial_dims = Operator::NumSpatialDimensions;
if constexpr (num_spatial_dims != 2) {
CUTLASS_TRACE_HOST("You can only use Conv2dConfiguration "
"with an Operator whose NumSpatialDimensions is exactly 2.");
return Status::kInvalid;
}
else {
// Convolutions split the metadata (in Conv2dConfiguration) from
// the data (ConvArguments, which only has pointers and a single
// enum value). Thus, this class will need both the
// configuration and the (user's input) arguments to set up the
// kernel's arguments. This function can fill in what the
// configuration has now, but the class will need the user's
// input arguments later.
if (config.split_k_mode != conv::SplitKMode::kSerial) {
CUTLASS_TRACE_HOST("CUTLASS 3 convolutions currently only support split_k_mode = kSerial.");
return Status::kInvalid;
}
// config.problem_size.split_k_slices is only meaningful if
// split_k_mode != kSerial. If this code later supports other
// split_k_mode values, then it will also need to read
// split_k_slices.
const int N = config.problem_size.N;
const int H = config.problem_size.H;
const int W = config.problem_size.W;
const int C = config.problem_size.C;
const int K = config.problem_size.K;
const int R = config.problem_size.R;
const int S = config.problem_size.S;
const int pad_h = config.problem_size.pad_h;
const int pad_w = config.problem_size.pad_w;
const int traversal_stride_h = config.problem_size.stride_h;
const int traversal_stride_w = config.problem_size.stride_w;
const int dilation_h = config.problem_size.dilation_h;
const int dilation_w = config.problem_size.dilation_w;
// CUTLASS 3's implicit GEMM convolution kernels currently only
// support cross correlation (passing over the activation and
// filter tensors in the same order). The convolution mode is
// future work.
const auto mode = config.problem_size.mode;
if (mode != cutlass::conv::Mode::kCrossCorrelation) {
CUTLASS_TRACE_HOST("Convolution modes other than kCrossCorrelation "
"are not currently supported.");
return Status::kInvalid;
}
constexpr int num_spatial_dims = Operator::NumSpatialDimensions;
constexpr size_t stride_size = size_t(num_spatial_dims) + 2u;
constexpr auto the_stride_size = std::integral_constant<size_t, stride_size>{};
#if defined(CUTLASS_DEBUG_TRACE_LEVEL) && (CUTLASS_DEBUG_TRACE_LEVEL > 1)
std::cerr << " num_spatial_dims = " << num_spatial_dims << "\n"
<< " stride_size = " << stride_size << "\n";
auto print_stride = [] (auto const& stride, char const variable_name[]) {
std::cerr << " " << variable_name << ": [";
for (size_t k = 0; k < stride.size(); ++k) {
std::cerr << stride[k];
if (k + 1u < stride.size()) {
std::cerr << ", ";
}
}
std::cerr << "]\n";
};
print_stride(config.stride_a, "config.stride_a");
print_stride(config.stride_b, "config.stride_b");
print_stride(config.stride_c, "config.stride_c");
#endif
// Conv2dConfiguration stores the strides as std::vector,
// so the code needs to check the run-time vector lengths.
if (config.stride_a.size() + 1u != stride_size) {
#if defined(CUTLASS_DEBUG_TRACE_LEVEL)
std::ostringstream os;
os << "config.stride_a.size() + 1u = "
<< (config.stride_a.size() + 1u)
<< " != num_spatial_dims + 2u = " << stride_size;
CUTLASS_TRACE_HOST( os.str() );
#endif
return Status::kInvalid;
}
if (config.stride_b.size() + 1u != stride_size) {
#if defined(CUTLASS_DEBUG_TRACE_LEVEL)
std::ostringstream os;
os << "config.stride_b.size() + 1u = "
<< (config.stride_b.size() + 1u)
<< " != num_spatial_dims + 2u = " << stride_size;
CUTLASS_TRACE_HOST( os.str() );
#endif
return Status::kInvalid;
}
if (config.stride_c.size() + 1u != stride_size) {
#if defined(CUTLASS_DEBUG_TRACE_LEVEL)
std::ostringstream os;
os << "config.stride_c.size() + 1u = "
<< (config.stride_c.size() + 1u)
<< " != num_spatial_dims + 2u = " << stride_size;
CUTLASS_TRACE_HOST( os.str() );
#endif
return Status::kInvalid;
}
constexpr cutlass::conv::Operator conv_op = Operator::DispatchPolicy::ConvOp;
using problem_shape_type =
cutlass::conv::ConvProblemShape<conv_op, num_spatial_dims>;
// cute::array<int64_t, RankT>; must convert to the kernel's native strides
using TensorStride = typename problem_shape_type::TensorStride;
const TensorStride stride_A = vector_to_array_strides(config.stride_a, the_stride_size);
const TensorStride stride_B = vector_to_array_strides(config.stride_b, the_stride_size);
const TensorStride stride_C = vector_to_array_strides(config.stride_c, the_stride_size);
// cutlass::library::Conv2dConfiguration has no member stride_d.
// The code below imitates the testbed,
// which just sets D's strides to C's strides.
const TensorStride stride_D = stride_C;
const int num_groups = config.problem_size.groups;
if (num_groups != 1) {
CUTLASS_TRACE_HOST("CUTLASS 3 kernels currently only support groups = 1.");
return Status::kInvalid;
}
problem_shape_type problem_shape(
/* mode = */ mode,
/* shape_act = */ {N, H, W, C},
/* stride_act = */ stride_A,
/* shape_flt = */ {K, R, S, C},
/* stride_flt = */ stride_B,
/* lower_padding = */ {pad_h, pad_w},
/* upper_padding = */ {pad_h, pad_w},
/* traversal_stride = */ {traversal_stride_h, traversal_stride_w},
/* dilation = */ {dilation_h, dilation_w},
num_groups);
out_args.mainloop.problem_shape = problem_shape;
// ConvProblemShape's constructor sets its shape_C member.
#if defined(CUTLASS_DEBUG_TRACE_LEVEL) && (CUTLASS_DEBUG_TRACE_LEVEL > 1)
std::cerr << " problem_shape:\n"
<< " shape_C: " << problem_shape.shape_C << "\n";
std::cerr << " stride_C: " << problem_shape.stride_C << "\n";
#endif
// Initialization of C's and D's strides follows the CUTLASS 3
// convolutions testbed (test/unit/conv/device_3x/testbed_conv.hpp).
{
using StrideC = typename Operator::ConvKernel::StrideC;
using StrideD = typename Operator::ConvKernel::StrideD;
auto stride_C = StrideC{};
auto stride_D = StrideD{};
if constexpr (conv_op == cutlass::conv::Operator::kWgrad) {
stride_C = cutlass::make_cute_packed_stride(
StrideC{}, problem_shape.shape_C, problem_shape.stride_C, conv_op);
stride_D = cutlass::make_cute_packed_stride(
StrideD{}, problem_shape.shape_C, problem_shape.stride_C, conv_op);
#if defined(CUTLASS_DEBUG_TRACE_LEVEL) && (CUTLASS_DEBUG_TRACE_LEVEL > 1)
std::cerr << " Wgrad: stride_C: " << stride_C << "\n";
#endif
}
else {
cute::for_each(cute::make_seq<cute::rank<0>(StrideC{})>{}, [&](auto i) {
#if defined(CUTLASS_DEBUG_TRACE_LEVEL) && (CUTLASS_DEBUG_TRACE_LEVEL > 1)
const auto stride_C_i = problem_shape.stride_C[problem_shape_type::RankT-2-i];
std::cerr << " Fprop or Dgrad: get<0, " << i << ">(stride_C): "
<< stride_C_i << "\n";
#endif
cute::get<0, i>(stride_C) = problem_shape.stride_C[problem_shape_type::RankT-2-i];
});
cute::for_each(cute::make_seq<cute::rank<0>(StrideD{})>{}, [&](auto i) {
#if defined(CUTLASS_DEBUG_TRACE_LEVEL) && (CUTLASS_DEBUG_TRACE_LEVEL > 1)
const auto stride_D_i = problem_shape.stride_C[problem_shape_type::RankT-2-i];
std::cerr << " Fprop or Dgrad: get<0, " << i << ">(stride_D): "
<< stride_D_i << "\n";
#endif
cute::get<0, i>(stride_D) = problem_shape.stride_C[problem_shape_type::RankT-2-i];
});
}
out_args.epilogue.dC = stride_C;
out_args.epilogue.dD = stride_D;
}
return Status::kSuccess;
}
}
static Status update_operator_arguments_from_configuration(
typename Operator::Arguments& out_args,
Conv3dConfiguration const& config)
{
using detail::coord_to_array_strides;
constexpr int num_spatial_dims = Operator::NumSpatialDimensions;
if constexpr (num_spatial_dims != 3) {
CUTLASS_TRACE_HOST("You can only use Conv3dConfiguration "
"with an Operator whose NumSpatialDimensions is exactly 3.");
return Status::kInvalid;
}
else {
// Convolutions split the metadata (in Conv3dConfiguration) from
// the data (ConvArguments, which only has pointers and a single
// enum value). Thus, this class will need both the
// configuration and the (user's input) arguments to set up the
// kernel's arguments. This function can fill in what the
// configuration has now, but the class will need the user's
// input arguments later.
if (config.split_k_mode != conv::SplitKMode::kSerial) {
CUTLASS_TRACE_HOST("CUTLASS 3 convolutions currently only support split_k_mode = kSerial.");
return Status::kInvalid;
}
// config.problem_size.split_k_slices is only meaningful if
// split_k_mode != kSerial. If this code later supports other
// split_k_mode values, then it will also need to read
// split_k_slices.
const int N = config.problem_size.N;
const int D = config.problem_size.D;
const int H = config.problem_size.H;
const int W = config.problem_size.W;
const int C = config.problem_size.C;
const int K = config.problem_size.K;
const int T = config.problem_size.T;
const int R = config.problem_size.R;
const int S = config.problem_size.S;
const int pad_d = config.problem_size.pad_d;
const int pad_h = config.problem_size.pad_h;
const int pad_w = config.problem_size.pad_w;
const int traversal_stride_d = config.problem_size.stride_d;
const int traversal_stride_h = config.problem_size.stride_h;
const int traversal_stride_w = config.problem_size.stride_w;
const int dilation_d = config.problem_size.dilation_d;
const int dilation_h = config.problem_size.dilation_h;
const int dilation_w = config.problem_size.dilation_w;
// CUTLASS 3's implicit GEMM convolution kernels currently only
// support cross correlation (passing over the activation and
// filter tensors in the same order). The convolution mode is
// future work.
const auto mode = config.problem_size.mode;
if (mode != cutlass::conv::Mode::kCrossCorrelation) {
CUTLASS_TRACE_HOST("Convolution modes other than kCrossCorrelation "
"are not currently supported.");
return Status::kInvalid;
}
using Stride = cutlass::layout::TensorNDHWC::Stride;
static_assert(std::is_same_v<Stride, cutlass::Coord<4>>);
const cutlass::library::ConvKind conv_kind = [] () {
constexpr cutlass::conv::Operator op = Operator::DispatchPolicy::ConvOp;
if constexpr (op == cutlass::conv::Operator::kFprop) {
return library::ConvKind::kFprop;
}
else if constexpr (op == cutlass::conv::Operator::kDgrad) {
return library::ConvKind::kDgrad;
}
else /* if constexpr (op == cutlass::conv::Operator::kWgrad) */ {
return library::ConvKind::kWgrad;
}
} ();
const Stride input_stride_a = config.layout_a(conv_kind).stride();
const Stride input_stride_b = config.layout_b(conv_kind).stride();
const Stride input_stride_c = config.layout_c(conv_kind).stride();
#if defined(CUTLASS_DEBUG_TRACE_LEVEL) && (CUTLASS_DEBUG_TRACE_LEVEL > 1)
constexpr size_t stride_size = size_t(num_spatial_dims) + 2u;
std::cerr << " num_spatial_dims = " << num_spatial_dims << "\n"
<< " stride_size = " << stride_size << "\n";
auto print_stride = [] (Stride const& stride, char const variable_name[]) {
std::cerr << " " << variable_name << ": [";
for (size_t k = 0; k < Stride::kRank; ++k) {
std::cerr << stride[static_cast<int>(k)];
if (k + 1u < Stride::kRank) {
std::cerr << ", ";
}
}
std::cerr << "]\n";
};
print_stride(input_stride_a, "input_stride_a");
print_stride(input_stride_b, "input_stride_b");
print_stride(input_stride_c, "input_stride_c");
#endif
constexpr cutlass::conv::Operator conv_op = Operator::DispatchPolicy::ConvOp;
using problem_shape_type =
cutlass::conv::ConvProblemShape<conv_op, num_spatial_dims>;
// cute::array<int64_t, RankT>; must convert to the kernel's native strides
using TensorStride = typename problem_shape_type::TensorStride;
const TensorStride stride_A = coord_to_array_strides(input_stride_a);
const TensorStride stride_B = coord_to_array_strides(input_stride_b);
const TensorStride stride_C = coord_to_array_strides(input_stride_c);
const TensorStride stride_D = stride_C;
const int num_groups = config.problem_size.groups;
if (num_groups != 1) {
CUTLASS_TRACE_HOST("CUTLASS 3 kernels currently only support groups = 1.");
return Status::kInvalid;
}
problem_shape_type problem_shape(
/* mode = */ mode,
/* shape_act = */ {N, D, H, W, C},
/* stride_act = */ stride_A,
/* shape_flt = */ {K, T, R, S, C},
/* stride_flt = */ stride_B,
/* lower_padding = */ {pad_d, pad_h, pad_w},
/* upper_padding = */ {pad_d, pad_h, pad_w},
/* traversal_stride = */ {traversal_stride_d, traversal_stride_h, traversal_stride_w},
/* dilation = */ {dilation_d, dilation_h, dilation_w},
num_groups);
out_args.mainloop.problem_shape = problem_shape;
// ConvProblemShape's constructor sets its shape_C member.
#if defined(CUTLASS_DEBUG_TRACE_LEVEL) && (CUTLASS_DEBUG_TRACE_LEVEL > 1)
std::cerr << " problem_shape:\n"
<< " shape_C: " << problem_shape.shape_C << "\n";
std::cerr << " stride_C: " << problem_shape.stride_C << "\n";
#endif
{
#if defined(CUTLASS_DEBUG_TRACE_LEVEL) && (CUTLASS_DEBUG_TRACE_LEVEL > 1)
std::cerr << " Compute stride_C and stride_D\n";
#endif
using StrideC = typename Operator::ConvKernel::StrideC;
using StrideD = typename Operator::ConvKernel::StrideD;
auto stride_C = StrideC{};
auto stride_D = StrideD{};
if constexpr (conv_op == cutlass::conv::Operator::kWgrad) {
stride_C = cutlass::make_cute_packed_stride(
StrideC{}, problem_shape.shape_C, problem_shape.stride_C, conv_op);
stride_D = cutlass::make_cute_packed_stride(
StrideD{}, problem_shape.shape_C, problem_shape.stride_C, conv_op);
#if defined(CUTLASS_DEBUG_TRACE_LEVEL) && (CUTLASS_DEBUG_TRACE_LEVEL > 1)
std::cerr << " Wgrad: stride_C: " << stride_C << "\n";
#endif
}
else {
cute::for_each(cute::make_seq<cute::rank<0>(StrideC{})>{}, [&](auto i) {
#if defined(CUTLASS_DEBUG_TRACE_LEVEL) && (CUTLASS_DEBUG_TRACE_LEVEL > 1)
const auto stride_C_i = problem_shape.stride_C[problem_shape_type::RankT-2-i];
std::cerr << " Fprop or Dgrad: get<0, " << i << ">(stride_C): "
<< stride_C_i << "\n";
#endif
cute::get<0, i>(stride_C) = problem_shape.stride_C[problem_shape_type::RankT-2-i];
});
cute::for_each(cute::make_seq<cute::rank<0>(StrideD{})>{}, [&](auto i) {
#if defined(CUTLASS_DEBUG_TRACE_LEVEL) && (CUTLASS_DEBUG_TRACE_LEVEL > 1)
const auto stride_D_i = problem_shape.stride_C[problem_shape_type::RankT-2-i];
std::cerr << " Fprop or Dgrad: get<0, " << i << ">(stride_D): "
<< stride_D_i << "\n";
#endif
cute::get<0, i>(stride_D) = problem_shape.stride_C[problem_shape_type::RankT-2-i];
});
}
out_args.epilogue.dC = stride_C;
out_args.epilogue.dD = stride_D;
}
return Status::kSuccess;
}
}
Status update_operator_arguments_from_arguments(
typename Operator::Arguments& out_args,
ConvArguments const& in_args) const
{
#if defined(CUTLASS_DEBUG_TRACE_LEVEL) && (CUTLASS_DEBUG_TRACE_LEVEL > 1)
std::cerr << "ConvOperation3x::update_operator_arguments_from_arguments\n";
#endif
out_args.mainloop.ptr_A = reinterpret_cast<ElementA const*>(in_args.A);
out_args.mainloop.ptr_B = reinterpret_cast<ElementB const*>(in_args.B);
out_args.epilogue.ptr_C = reinterpret_cast<ElementC const*>(in_args.C);
out_args.epilogue.ptr_D = reinterpret_cast<ElementD*>(in_args.D);
return Status::kSuccess;
}
};
} // namespace cutlass::library
| tools/library/src/conv_operation_3x.hpp/0 | {
"file_path": "tools/library/src/conv_operation_3x.hpp",
"repo_id": "tools",
"token_count": 13932
} | 65 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Defines operations for all Symm operation kinds (Symm, Hemm)
in CUTLASS Library.
*/
#pragma once
#include <iostream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/symm.h"
#include "cutlass/gemm/kernel/default_symm_universal.h"
#include "cutlass/library/library.h"
#include "library_internal.h"
#include "cutlass/core_io.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Operator_>
class SymmOperationBase : public Operation {
public:
using Operator = Operator_;
using ElementA = typename Operator::ElementA;
using LayoutA = typename Operator::LayoutA;
using ElementB = typename Operator::ElementB;
using LayoutB = typename Operator::LayoutB;
using ElementC = typename Operator::ElementC;
using LayoutC = typename Operator::LayoutC;
using ElementAccumulator = typename Operator::ElementAccumulator;
using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute;
static BlasMode const kBlasMode = Operator::kBlasMode;
static SideMode const kSideModeA = Operator::kSideModeA;
static FillMode const kFillModeA = Operator::kFillModeA;
using OperatorArguments = typename Operator::Arguments;
protected:
///
SymmDescription description_;
public:
/// Constructor
SymmOperationBase(char const *name = "unknown_symm") {
description_.name = name;
description_.provider = Provider::kCUTLASS;
description_.symm_kind = SymmKind::kUniversal;
description_.side_mode = kSideModeA;
description_.fill_mode = kFillModeA;
description_.blas_mode = kBlasMode;
description_.kind = OperationKind::kSymm;
description_.tile_description.threadblock_shape = make_Coord(
Operator::ThreadblockShape::kM,
Operator::ThreadblockShape::kN,
Operator::ThreadblockShape::kK);
description_.tile_description.threadblock_stages = Operator::kStages;
description_.tile_description.warp_count = make_Coord(
Operator::SymmKernel::WarpCount::kM,
Operator::SymmKernel::WarpCount::kN,
Operator::SymmKernel::WarpCount::kK);
description_.tile_description.math_instruction.instruction_shape = make_Coord(
Operator::InstructionShape::kM,
Operator::InstructionShape::kN,
Operator::InstructionShape::kK);
description_.tile_description.math_instruction.element_accumulator =
NumericTypeMap<ElementAccumulator>::kId;
description_.tile_description.math_instruction.opcode_class =
OpcodeClassMap<typename Operator::OperatorClass>::kId;
description_.tile_description.math_instruction.math_operation =
MathOperationMap<typename Operator::Operator>::kId;
description_.tile_description.minimum_compute_capability =
ArchMap<typename Operator::ArchTag, typename Operator::OperatorClass>::kMin;
description_.tile_description.maximum_compute_capability =
ArchMap<typename Operator::ArchTag, typename Operator::OperatorClass>::kMax;
description_.A = make_TensorDescription<ElementA, LayoutA>(Operator::kAlignmentA);
description_.B = make_TensorDescription<ElementB, LayoutB>(Operator::kAlignmentB);
description_.C = make_TensorDescription<ElementC, LayoutC>(Operator::kAlignmentC);
description_.element_epilogue = NumericTypeMap<ElementCompute>::kId;
description_.split_k_mode = SplitKMode::kNone;
}
/// Returns the description of the SYMM operation
virtual OperationDescription const & description() const {
return description_;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Operator_>
class SymmOperation : public SymmOperationBase<Operator_> {
public:
using Operator = Operator_;
using ElementA = typename Operator::ElementA;
using LayoutA = typename Operator::LayoutA;
using ElementB = typename Operator::ElementB;
using LayoutB = typename Operator::LayoutB;
using ElementC = typename Operator::ElementC;
using LayoutC = typename Operator::LayoutC;
using ElementAccumulator = typename Operator::ElementAccumulator;
using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute;
static BlasMode const kBlasMode = Operator::kBlasMode;
static SideMode const kSideModeA = Operator::kSideModeA;
static FillMode const kFillModeA = Operator::kFillModeA;
using OperatorArguments = typename Operator::Arguments;
public:
/// Constructor
SymmOperation(char const *name = "unknown_symm"):
SymmOperationBase<Operator_>(name) {
this->description_.symm_kind = SymmKind::kUniversal;
}
protected:
/// Constructs the arguments structure given the configuration and arguments
static Status construct_arguments_(
OperatorArguments &operator_args,
SymmConfiguration const *configuration) {
//operator_args.mode = configuration->mode;
operator_args.problem_size = configuration->problem_size;
operator_args.batch_count = configuration->batch_count;
operator_args.lda = int(configuration->lda);
operator_args.ldb = int(configuration->ldb);
operator_args.ldc = int(configuration->ldc);
operator_args.ldd = int(configuration->ldd);
return Status::kSuccess;
}
/// Constructs the arguments structure given the configuration and arguments
static Status update_arguments_(
OperatorArguments &operator_args,
SymmArguments const *arguments) {
if (arguments->pointer_mode == ScalarPointerMode::kHost) {
typename Operator::EpilogueOutputOp::Params params(
*static_cast<ElementCompute const *>(arguments->alpha),
*static_cast<ElementCompute const *>(arguments->beta)
);
operator_args.epilogue = params;
}
else if (arguments->pointer_mode == ScalarPointerMode::kDevice){
typename Operator::EpilogueOutputOp::Params params(
static_cast<ElementCompute const *>(arguments->alpha),
static_cast<ElementCompute const *>(arguments->beta)
);
operator_args.epilogue = params;
}
else {
return Status::kErrorInvalidProblem;
}
// update arguments
operator_args.ptr_A = arguments->A;
operator_args.ptr_B = arguments->B;
operator_args.ptr_C = arguments->C;
operator_args.ptr_D = arguments->D;
operator_args.batch_stride_A = arguments->batch_stride_A;
operator_args.batch_stride_B = arguments->batch_stride_B;
operator_args.batch_stride_C = arguments->batch_stride_C;
operator_args.batch_stride_D = arguments->batch_stride_D;
return Status::kSuccess;
}
public:
/// Returns success if the operation can proceed
virtual Status can_implement(
void const *configuration_ptr,
void const *arguments_ptr) const {
SymmConfiguration const *configuration =
static_cast<SymmConfiguration const *>(configuration_ptr);
SymmArguments const *arguments =
static_cast<SymmArguments const *>(arguments_ptr);
OperatorArguments args;
Status status = construct_arguments_(args, configuration);
if (status != Status::kSuccess) {
return status;
}
status = update_arguments_(args, arguments);
if (status != Status::kSuccess) {
return status;
}
return Operator::can_implement(args);
}
/// Gets the host-side workspace
virtual uint64_t get_host_workspace_size(
void const *configuration) const {
return sizeof(Operator);
}
/// Gets the device-side workspace
virtual uint64_t get_device_workspace_size(
void const *configuration_ptr,
void const *arguments_ptr = nullptr) const {
OperatorArguments args;
Status status = construct_arguments_(
args,
static_cast<SymmConfiguration const *>(configuration_ptr));
if (status != Status::kSuccess) {
return 0;
}
uint64_t size = Operator::get_workspace_size(args);
return size;
}
/// Initializes the workspace
virtual Status initialize(
void const *configuration_ptr,
void *host_workspace,
void *device_workspace,
cudaStream_t stream = nullptr) const {
OperatorArguments args;
Status status = construct_arguments_(
args,
static_cast<SymmConfiguration const *>(configuration_ptr));
if (status != Status::kSuccess) {
return status;
}
Operator *op = new (host_workspace) Operator;
//std::cout << "initialize() library::SymmOperation" << std::endl;
//print_operator_args(args);
status = op->initialize(args, device_workspace, stream);
return status;
}
/// Runs the kernel
virtual Status run(
void const *arguments_ptr,
void *host_workspace,
void *device_workspace = nullptr,
cudaStream_t stream = nullptr) const {
OperatorArguments args;
Status status = update_arguments_(
args,
static_cast<SymmArguments const *>(arguments_ptr));
if (status != Status::kSuccess) {
return status;
}
Operator *op = static_cast<Operator *>(host_workspace);
bool need_swapped_matrices = (kSideModeA == SideMode::kLeft &&
std::is_same<typename Operator::LayoutC, layout::ColumnMajor>::value) ||
(kSideModeA == SideMode::kRight &&
std::is_same<typename Operator::LayoutC, layout::RowMajor>::value);
if (need_swapped_matrices) {
status = op->update(args.swapped_matrices(), device_workspace);
} else {
status = op->update(args, device_workspace);
}
if (status != Status::kSuccess) {
return status;
}
//std::cout << "run() library::SymmOperation" << std::endl;
//print_operator_args(args);
status = op->run(stream);
return status;
}
/// Call print_operator_args from the Conv2dOperation::initialize()
// to dump arguments passed on to cutlass operator for debugging
void print_operator_args(OperatorArguments &operator_args) const {
std::cout << "SymmOperation::OperatorArguments" << std::endl
<< " problem_size:" << std::endl
<< operator_args.problem_size << std::endl
<< " epilogue (alpha, beta): "
<< operator_args.epilogue.alpha << ", "
<< operator_args.epilogue.beta << std::endl
<< " ref_A (ptr, {stride}): "
<< operator_args.ptr_A << ", {"
<< operator_args.lda << "}" << std::endl
<< " ref_B (ptr, {stride}): "
<< operator_args.ptr_B << ", {"
<< operator_args.ldb << "}" << std::endl
<< " ref_C (ptr, {stride}): "
<< operator_args.ptr_C << ", {"
<< operator_args.ldc << "}" << std::endl
<< " ref_D (ptr, {stride}): "
<< operator_args.ptr_D << ", {"
<< operator_args.ldd << "}" << std::endl;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
| tools/library/src/symm_operation.h/0 | {
"file_path": "tools/library/src/symm_operation.h",
"repo_id": "tools",
"token_count": 4442
} | 66 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Command line options for performance test program
*/
#pragma once
#include <string>
#include <vector>
#include <map>
#include <cuda_runtime.h>
#include "cutlass/util/command_line.h"
#include "cutlass/util/distribution.h"
#include "cutlass/library/library.h"
#include "enumerated_types.h"
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Global options
class Options {
public:
/// Cublas and cuDNN options
struct Library {
//
// Data members
//
/// Algorithm mode
AlgorithmMode algorithm_mode;
/// Algorithm enumerants
std::vector<int> algorithms;
//
// Methods
//
Library(CommandLine const &cmdline);
void print_usage(std::ostream &out) const;
void print_options(std::ostream &out, int indent = 0) const;
};
/// Options related to the selected device
struct Device {
/// Device ID
int device;
/// CUDA Device properties
cudaDeviceProp properties;
/// Total memory allocation on device
size_t maximum_capacity;
//
// Methods
//
Device(CommandLine const &cmdline);
void print_usage(std::ostream &out) const;
void print_options(std::ostream &out, int indent = 0) const;
void print_device_info(std::ostream &out) const;
/// Returns the compute capability of the listed device (e.g. 61, 60, 70, 75)
int compute_capability() const;
};
/// Options related to initializing input tensors
struct Initialization {
/// If true, data is initialized randomly. If false, no initialization is performed after
/// allocating tensors.
bool enabled;
/// If true, data distribution is set by the user and is not allowed to change
/// If false, data distribution is allowed to change based on element_type (library::NumericTypeID)
bool fix_data_distribution;
/// Data distribution for input tensors
Distribution data_distribution;
/// Source of random tensor elements
library::Provider provider;
/// Random number generator seed.
int seed;
//
// Methods
//
Initialization(CommandLine const &cmdline);
void print_usage(std::ostream &out) const;
void print_options(std::ostream &out, int indent = 0) const;
/// Helper to parse a Distribution object from the command line parser
static void get_distribution(
cutlass::CommandLine const &args,
std::string const &arg,
cutlass::Distribution &dist);
};
/// Options related to verification of the result
struct Verification {
//
// Data members
//
/// If true, kernels are verified before they are profiled
bool enabled;
/// If true, causes profiler to return an error code if no reference check is run.
/// Only valid when verification is enabled.
bool required;
/// Relative error threshold - zero to require bit-level consistency
double epsilon;
/// Values smaller than this are assumed to be zero
double nonzero_floor;
/// List of providers used to verify each result
ProviderVector providers;
/// Indicates when to save the workspace
SaveWorkspace save_workspace;
//
// Methods
//
Verification(CommandLine const &cmdline);
void print_usage(std::ostream &out) const;
void print_options(std::ostream &out, int indent = 0) const;
/// Returns true if a provider is enabled
bool provider_enabled(library::Provider provider) const;
/// Returns the index of a provider if its enabled
size_t index(library::Provider provider) const;
};
/// Options related to profiling
struct Profiling {
/// Number of workspaces to rotate through to avoid cache-resident working sets
int workspace_count;
/// Number of iterations to warmup each kernel prior to profiling
int warmup_iterations;
/// Number of iterations to profile each kernel - if 0, kernels are launched up to the profiling duration
int iterations;
/// Number of ms to sleep between profiling periods (ms)
int sleep_duration;
/// If true, profiling is actually conducted.
bool enabled;
/// If true, profiling returns an error code if no kernels are found to match the filters.
bool error_on_no_match = false;
/// List of providers of each functionality to be profiled
ProviderVector providers;
//
// Methods
//
Profiling(CommandLine const &cmdline);
void print_usage(std::ostream &out) const;
void print_options(std::ostream &out, int indent = 0) const;
/// Returns true if a provider is enabled
bool provider_enabled(library::Provider provider) const;
/// Returns the index of a provider if its enabled
size_t index(library::Provider provider) const;
};
/// Options related to reporting
struct Report {
/// If true, result is appended to possibly existing file
bool append;
/// Path to a file containing results
std::string output_path;
/// Path to a file containing junit xml results
std::string junit_output_path;
/// Sequence of tags to attach to each result
std::vector<std::pair<std::string, std::string>> pivot_tags;
/// If true, reports status of all kernels including those that were
/// not run for the given arguments
bool report_not_run;
/// Prints human-readable text to stdout. If false, nothing is written to stdout
bool verbose;
/// Sort results by (currently by flops-per-byte)
bool sort_results;
/// Prints the name of the kernel being profiled before running the kernel.
/// This is useful for determining which kernel is causing a run of the profiler to hang
bool print_kernel_before_running;
//
// Methods
//
Report(CommandLine const &cmdline);
void print_usage(std::ostream &out) const;
void print_options(std::ostream &out, int indent = 0) const;
};
/// Options related to printing usage and version information
struct About {
/// If true, usage is printed and the program ends.
bool help;
/// Prints version string
bool version;
/// Print information about devices
bool device_info;
//
// Methods
//
About(CommandLine const &cmdline);
void print_usage(std::ostream &out) const;
void print_options(std::ostream &out, int indent = 0) const;
static void print_version(std::ostream &out);
};
public:
//
// Data members
//
/// Top-level execution mode
ExecutionMode execution_mode;
/// Name of math function to profile
library::OperationKind operation_kind;
/// Vector of operation name substrings
std::vector<std::string> operation_names;
/// Vector of operation name substrings
std::vector<std::string> excluded_operation_names;
//
// Detailed configuration options
//
/// Configuration
CommandLine cmdline;
Device device;
Initialization initialization;
Library library;
Verification verification;
Profiling profiling;
Report report;
About about;
public:
Options(CommandLine const &cmdline);
void print_usage(std::ostream &out) const;
void print_options(std::ostream &out) const;
static std::string indent_str(int indent);
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
| tools/profiler/include/cutlass/profiler/options.h/0 | {
"file_path": "tools/profiler/include/cutlass/profiler/options.h",
"repo_id": "tools",
"token_count": 2703
} | 67 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief
*/
#include "cutlass/profiler/device_context.h"
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Allocates memory of a given type, capacity (elements), and name
DeviceAllocation *DeviceContext::allocate_block(
std::string const &name,
library::NumericTypeID type,
size_t capacity) {
device_memory_.emplace_back(type, capacity);
DeviceAllocation *allocation = &device_memory_.back();
allocations_[name] = allocation;
return allocation;
}
/// Allocates memory of a given type, capacity (elements), and name
DeviceAllocation *DeviceContext::allocate_tensor(
std::string const &name,
library::NumericTypeID type,
library::LayoutTypeID layout_id,
std::vector<int> const &extent,
std::vector<int64_t> const &stride,
int batch_count) {
device_memory_.emplace_back(type, layout_id, extent, stride, batch_count);
DeviceAllocation *allocation = &device_memory_.back();
allocations_[name] = allocation;
return allocation;
}
/// Allocates memory of a given type, capacity (elements), and name
DeviceAllocation *DeviceContext::allocate_tensor(
Options const &options,
std::string const &name,
library::NumericTypeID type,
library::LayoutTypeID layout_id,
std::vector<int> const &extent,
std::vector<int64_t> const &stride,
int batch_count,
int seed_shift) {
DeviceAllocation *allocation =
allocate_tensor(name, type, layout_id, extent, stride, batch_count);
if (options.initialization.enabled) {
Distribution data_distribution = options.initialization.data_distribution;
// check if data distribution is allowed to change
if(!options.initialization.fix_data_distribution) {
// change data distribution based on bit width
switch(type) {
case library::NumericTypeID::kFE4M3:
data_distribution.set_uniform(-1, 1, 0);
break;
case library::NumericTypeID::kFE5M2:
data_distribution.set_uniform(-1, 1, 0);
break;
case library::NumericTypeID::kF16:
data_distribution.set_uniform(-3, 3, 0);
break;
case library::NumericTypeID::kB1:
data_distribution.set_uniform(0, 1, 0);
break;
case library::NumericTypeID::kS2:
data_distribution.set_uniform(-1, 1, 0);
break;
case library::NumericTypeID::kS4:
data_distribution.set_uniform(-2, 2, 0);
break;
case library::NumericTypeID::kU2:
data_distribution.set_uniform(0, 2, 0);
break;
case library::NumericTypeID::kU4:
data_distribution.set_uniform(0, 2, 0);
break;
case library::NumericTypeID::kS8:
data_distribution.set_uniform(-3, 3, 0);
break;
case library::NumericTypeID::kU8:
data_distribution.set_uniform(0, 4, 0);
break;
default: break;
}
}
// Override pnz for the A/B/C tensors if overridden for Gaussian distributions
if (data_distribution.kind == Distribution::Gaussian) {
double mean = data_distribution.gaussian.mean;
double stddev = data_distribution.gaussian.stddev;
int scale = data_distribution.int_scale;
if (name == "A" && data_distribution.gaussian.pnzA != 100.0) {
data_distribution.set_gaussian(mean, stddev, scale, data_distribution.gaussian.pnzA);
}
else if (name == "B" && data_distribution.gaussian.pnzB != 100.0) {
data_distribution.set_gaussian(mean, stddev, scale, data_distribution.gaussian.pnzB);
}
else if (name == "C" && data_distribution.gaussian.pnzC != 100.0) {
data_distribution.set_gaussian(mean, stddev, scale, data_distribution.gaussian.pnzC);
}
}
if (options.initialization.provider == library::Provider::kReferenceDevice) {
if (data_distribution.kind == Distribution::Sequential) {
allocation->initialize_sequential_device(
data_distribution);
}
else {
allocation->initialize_random_device(
options.initialization.seed + seed_shift,
data_distribution);
}
}
else if (options.initialization.provider == library::Provider::kReferenceHost) {
if (data_distribution.kind == Distribution::Sequential) {
allocation->initialize_sequential_host(
data_distribution);
}
else {
allocation->initialize_random_host(
options.initialization.seed + seed_shift,
data_distribution);
}
}
}
return allocation;
}
/// Allocates memory for sparse meta data
DeviceAllocation *DeviceContext::allocate_sparsemeta_tensor(
Options const &options,
std::string const &name,
library::NumericTypeID type,
library::LayoutTypeID layout_id,
library::NumericTypeID type_a,
std::vector<int> const &extent,
std::vector<int64_t> const &stride,
int batch_count,
int seed_shift) {
DeviceAllocation *allocation =
allocate_tensor(name, type, layout_id, extent, stride, batch_count);
if (options.initialization.enabled) {
// TF32 has 4bit meta data. The rest has 2bit.
int MetaSizeInBits = (cutlass::library::sizeof_bits(type_a) == 32) ? 4 : 2;
if (options.initialization.provider == library::Provider::kReferenceDevice) {
allocation->initialize_random_sparsemeta_device(
options.initialization.seed + seed_shift,
MetaSizeInBits);
}
else if (options.initialization.provider == library::Provider::kReferenceHost) {
allocation->initialize_random_sparsemeta_host(
options.initialization.seed + seed_shift,
MetaSizeInBits);
}
}
return allocation;
}
/// Clears named allocations (but does not necessarily free memory)
void DeviceContext::clear() {
allocations_.clear();
}
/// Frees all device memory allocations
void DeviceContext::free() {
allocations_.clear();
device_memory_.clear();
}
/// Gets the allocation by name
DeviceAllocation &DeviceContext::at(std::string const &name) {
return *allocations_.at(name);
}
size_t DeviceContext::size() const {
return allocations_.size();
}
DeviceContext::AllocationMap::iterator DeviceContext::begin() {
return allocations_.begin();
}
DeviceContext::AllocationMap::iterator DeviceContext::end() {
return allocations_.end();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
| tools/profiler/src/device_context.cu/0 | {
"file_path": "tools/profiler/src/device_context.cu",
"repo_id": "tools",
"token_count": 2887
} | 68 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief GETT command line parser to gather semantic modes, their stride order, and extents.
*/
#pragma once
#include <iostream>
#include <iomanip>
#include <utility>
#include <type_traits>
#include <vector>
#include <map>
#include <algorithm>
#include <numeric>
#include "cutlass/util/command_line.h"
namespace cutlass {
// Output shortcuts
std::ostream& operator<<(std::ostream& os, std::vector<char> data) {
for (auto& a : data) os << a;
return os;
}
template <class T>
std::ostream& operator<<(std::ostream& os, std::vector<T> data) {
for (auto& a : data) os << a << " ";
return os;
}
struct GettCommandLine {
struct GettProblem {
using extent_type = int;
using stride_type = int64_t;
// Row modes: appear in A and C/D
std::vector<extent_type> M;
std::vector<stride_type> ldAm;
std::vector<stride_type> ldCm;
// Column modes: appear in B and C/D
std::vector<extent_type> N;
std::vector<stride_type> ldBn;
std::vector<stride_type> ldCn;
// Reduction modes: appear in A and B
std::vector<extent_type> K;
std::vector<stride_type> ldAk;
std::vector<stride_type> ldBk;
// Batch modes: appear in all in/out tensors
std::vector<extent_type> L;
std::vector<stride_type> ldAl;
std::vector<stride_type> ldBl;
std::vector<stride_type> ldCl;
};
static GettProblem
parse(int argc, char const* argv[], bool parse_verbose = false) {
using extent_type = typename GettProblem::extent_type;
using stride_type = typename GettProblem::stride_type;
cutlass::CommandLine cmd(argc, argv);
// modeA
std::vector<char> a_mode;
cmd.get_cmd_line_arguments("modeA", a_mode);
// modeB
std::vector<char> b_mode;
cmd.get_cmd_line_arguments("modeB", b_mode);
// modeC
std::vector<char> c_mode;
cmd.get_cmd_line_arguments("modeC", c_mode);
// mode_sizes
std::map<char,extent_type> mode_size;
// First, initialize all modes in a, b, c to make sure they're in map
for (char a : a_mode) mode_size[a] = 1;
for (char b : b_mode) mode_size[b] = 1;
for (char c : c_mode) mode_size[c] = 1;
// Then, overwrite the ones in -extent
std::vector<std::pair<std::string, std::string> > extent_tokens;
cmd.get_cmd_line_argument_pairs("extents", extent_tokens);
for (auto e : extent_tokens) {
if (std::get<0>(e).size() > 1) {
std::cerr << "ERROR: Mode name must only be 1 character long.\n";
print_usage();
exit(1);
}
char label = std::get<0>(e)[0];
int size = std::stoi(std::get<1>(e));
mode_size[label] = size;
}
// Print out symbolic modes and their extents
if (parse_verbose) {
std::cout << "C_" << c_mode << " = A_" << a_mode << " * B_" << b_mode << "\n";
for (auto e : mode_size) std::cout << " " << std::get<0>(e) << " : " << std::get<1>(e) << "\n";
}
//
// Collect/Compute strides
//
std::map<char,stride_type> mode_ldA;
std::map<char,stride_type> mode_ldB;
std::map<char,stride_type> mode_ldC;
{
stride_type current;
current = 1;
for (char a : a_mode) { mode_ldA[a] = current; current *= mode_size[a]; }
current = 1;
for (char b : b_mode) { mode_ldB[b] = current; current *= mode_size[b]; }
current = 1;
for (char c : c_mode) { mode_ldC[c] = current; current *= mode_size[c]; }
}
//
// Collect mode categories
//
std::vector<char> row_mode; // rows
std::vector<char> col_mode; // columns
std::vector<char> red_mode; // reductions
std::vector<char> bat_mode; // batches
{
std::vector<char> a_label = a_mode;
std::vector<char> b_label = b_mode;
std::vector<char> c_label = c_mode;
std::sort(std::begin(a_label), std::end(a_label));
std::sort(std::begin(b_label), std::end(b_label));
std::sort(std::begin(c_label), std::end(c_label));
// std::set_intersections to find semantic category of each symbolic mode
std::set_intersection(std::begin(a_label), std::end(a_label),
std::begin(c_label), std::end(c_label),
std::back_inserter(row_mode));
std::set_intersection(std::begin(b_label), std::end(b_label),
std::begin(c_label), std::end(c_label),
std::back_inserter(col_mode));
std::set_intersection(std::begin(a_label), std::end(a_label),
std::begin(b_label), std::end(b_label),
std::back_inserter(red_mode));
std::set_intersection(std::begin(row_mode), std::end(row_mode),
std::begin(col_mode), std::end(col_mode),
std::back_inserter(bat_mode));
// std::set_difference to remove batch modes from other semantic modes
for (char l : bat_mode) {
row_mode.erase(std::remove(std::begin(row_mode), std::end(row_mode), l), std::end(row_mode));
col_mode.erase(std::remove(std::begin(col_mode), std::end(col_mode), l), std::end(col_mode));
red_mode.erase(std::remove(std::begin(red_mode), std::end(red_mode), l), std::end(red_mode));
}
}
// Print out the semantic association of each symbolic mode
if (parse_verbose) {
std::cout << " rows : " << row_mode << '\n';
std::cout << " cols : " << col_mode << '\n';
std::cout << " reds : " << red_mode << '\n';
std::cout << " bats : " << bat_mode << '\n';
}
//
// Permute modes
//
// Permute the batched modes to promote coalescing
// Sort the batched modes by min(ldAl,ldBl) and in case of a tie by the size
std::sort(std::begin(bat_mode), std::end(bat_mode), [&](char l1, char l2) {
return std::tie(std::min(mode_ldA[l1],mode_ldB[l1]),mode_size[l1])
< std::tie(std::min(mode_ldA[l2],mode_ldB[l2]),mode_size[l2]);
});
// Compute sizes and strides of ordered reduction modes
std::vector<extent_type> L;
std::vector<stride_type> ldAl;
std::vector<stride_type> ldBl;
std::vector<stride_type> ldCl;
for (char l : bat_mode) {
L.push_back(mode_size[l]);
ldAl.push_back(mode_ldA[l]);
ldBl.push_back(mode_ldB[l]);
ldCl.push_back(mode_ldC[l]);
}
// Permute the reduction modes to promote coalescing
// Sort the reduction modes by min(ldAk,ldBk) and in case of a tie by the size
std::sort(std::begin(red_mode), std::end(red_mode), [&](char k1, char k2) {
return std::tie(std::min(mode_ldA[k1],mode_ldB[k1]),mode_size[k1])
< std::tie(std::min(mode_ldA[k2],mode_ldB[k2]),mode_size[k2]);
});
// Compute sizes and strides of ordered reduction modes
std::vector<extent_type> K;
std::vector<stride_type> ldAk;
std::vector<stride_type> ldBk;
for (char k : red_mode) {
K.push_back(mode_size[k]);
ldAk.push_back(mode_ldA[k]);
ldBk.push_back(mode_ldB[k]);
}
// Permute the row modes to promote coalescing
// Sort the row modes by min(ldAm,ldCm) and in case of a tie by ldAm
std::sort(std::begin(row_mode), std::end(row_mode), [&](char m1, char m2) {
return std::tie(std::min(mode_ldA[m1],mode_ldC[m1]),mode_ldA[m1])
< std::tie(std::min(mode_ldA[m2],mode_ldC[m2]),mode_ldA[m2]);
});
// Compute sizes and strides of ordered row modes
std::vector<extent_type> M;
std::vector<stride_type> ldAm;
std::vector<stride_type> ldCm;
for (char m : row_mode) {
M.push_back(mode_size[m]);
ldAm.push_back(mode_ldA[m]);
ldCm.push_back(mode_ldC[m]);
}
// Permute the col modes to promote coalescing
// Sort the col modes by min(ldBn,ldCn) and in case of a tie by ldBn
std::sort(std::begin(col_mode), std::end(col_mode), [&](char n1, char n2) {
return std::tie(std::min(mode_ldB[n1],mode_ldC[n1]),mode_ldB[n1])
< std::tie(std::min(mode_ldB[n2],mode_ldC[n2]),mode_ldB[n2]);
});
// Compute sizes and strides of ordered col modes
std::vector<extent_type> N;
std::vector<stride_type> ldBn;
std::vector<stride_type> ldCn;
for (char n : col_mode) {
N.push_back(mode_size[n]);
ldBn.push_back(mode_ldB[n]);
ldCn.push_back(mode_ldC[n]);
}
if (parse_verbose) {
std::cout << "C_";
if (! row_mode.empty()) {
std::cout << "(" << row_mode << ")";
}
if (! col_mode.empty()) {
std::cout << "(" << col_mode << ")";
}
if (! bat_mode.empty()) {
std::cout << "(" << bat_mode << ")";
}
std::cout << " = A_";
if (! row_mode.empty()) {
std::cout << "(" << row_mode << ")";
}
if (! red_mode.empty()) {
std::cout << "(" << red_mode << ")";
}
if (! bat_mode.empty()) {
std::cout << "(" << bat_mode << ")";
}
std::cout << " * B_";
if (! col_mode.empty()) {
std::cout << "(" << col_mode << ")";
}
if (! red_mode.empty()) {
std::cout << "(" << red_mode << ")";
}
if (! bat_mode.empty()) {
std::cout << "(" << bat_mode << ")";
}
std::cout << '\n';
int M_size = std::accumulate(std::begin(M), std::end(M), 1, std::multiplies<>{});
int N_size = std::accumulate(std::begin(N), std::end(N), 1, std::multiplies<>{});
int K_size = std::accumulate(std::begin(K), std::end(K), 1, std::multiplies<>{});
int L_size = std::accumulate(std::begin(L), std::end(L), 1, std::multiplies<>{});
std::cout << " M : (" << M_size << ") ";
for (char m : row_mode) std::cout << m << ":" << mode_size[m] << " ";
std::cout << '\n';
std::cout << " N : (" << N_size << ") ";
for (char n : col_mode) std::cout << n << ":" << mode_size[n] << " ";
std::cout << '\n';
std::cout << " K : (" << K_size << ") ";
for (char k : red_mode) std::cout << k << ":" << mode_size[k] << " ";
std::cout << '\n';
std::cout << " L : (" << L_size << ") ";
for (char l : bat_mode) std::cout << l << ":" << mode_size[l] << " ";
std::cout << '\n';
std::cout << " ldAm : " << ldAm << '\n';
std::cout << " ldAk : " << ldAk << '\n';
std::cout << " ldAl : " << ldAl << '\n';
std::cout << " ldBn : " << ldBn << '\n';
std::cout << " ldBk : " << ldBk << '\n';
std::cout << " ldBl : " << ldBl << '\n';
std::cout << " ldCm : " << ldCm << '\n';
std::cout << " ldCn : " << ldCn << '\n';
std::cout << " ldCl : " << ldCl << '\n';
}
return {M, ldAm, ldCm,
N, ldBn, ldCn,
K, ldAk, ldBk,
L, ldAl, ldBl, ldCl};
}
static void
print_usage() {
std::cout <<
"GETT problem command line parser:\n"
" --modeA=<m0,...>\n"
" A comma delimited list of characters that correspond to the row, reduction, and batch modes in A tensor.\n"
" The semantic association of each symbolic mode is determined automatically.\n\n"
" --modeB=<m0,...>\n"
" A comma delimited list of characters that correspond to the column, reduction, and batch modes in B tensor.\n"
" The semantic association of each symbolic mode is determined automatically.\n\n"
" --modeC=<m0,...>\n"
" A comma delimited list of characters that correspond to the row, column, and batch modes in B tensor.\n"
" The semantic association of each symbolic mode is determined automatically.\n\n"
" --extents=<mode:extent,....>\n"
" A command delimited list of symbolic mode and its corresponding extent.\n"
" Extents are defaulted to 1 if any are not provided.\n\n"
"Example usage: gett.exe --modeC=m,n,l --modeA=m,k,l --modeB=k,n,l --extents=m:4096,n:4096,k:4096\n";
}
};
} // namespace cutlass
| tools/util/include/cutlass/util/gett_commandline.hpp/0 | {
"file_path": "tools/util/include/cutlass/util/gett_commandline.hpp",
"repo_id": "tools",
"token_count": 5990
} | 69 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Reference implementation for GEMM in host-side code.
*/
#pragma once
#include "cutlass/coord.h"
#include "cutlass/tensor_view.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/util/reference/device/thread/gemm.h"
namespace cutlass {
namespace reference {
namespace device {
namespace kernel {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
template <
typename TensorRefA,
typename TensorRefB,
typename TensorRefC,
typename ScalarType,
typename AccumulatorType,
typename OutputTile,
typename InnerProductOp,
typename ConvertOp
>
__global__ void Gemm(
gemm::GemmCoord problem_size,
ScalarType alpha,
TensorRefA tensor_a,
TensorRefB tensor_b,
ScalarType beta,
TensorRefC tensor_c,
TensorRefC tensor_d,
AccumulatorType initial_accum) {
// Map each thread to a unique tile of the output matrix
MatrixCoord output_coord(
MatrixCoord::Index((threadIdx.x + blockIdx.x * blockDim.x) * OutputTile::kRow),
MatrixCoord::Index((threadIdx.y + blockIdx.y * blockDim.y) * OutputTile::kColumn)
);
// Compute the general matrix product
thread::Gemm<
TensorRefA,
TensorRefB,
TensorRefC,
ScalarType,
AccumulatorType,
OutputTile,
InnerProductOp,
ConvertOp
> gemm(initial_accum);
gemm.multiply_add(
problem_size,
tensor_a,
tensor_b,
output_coord);
gemm.epilogue(problem_size, alpha, beta, tensor_c, tensor_d, output_coord);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
template <
typename TensorRefCollectionA,
typename TensorRefCollectionB,
typename TensorRefCollectionC,
typename ScalarType,
typename AccumulatorType,
typename OutputTile,
typename InnerProductOp,
typename ConvertOp
>
__global__ void BatchedGemm(
gemm::GemmCoord problem_size,
ScalarType alpha,
TensorRefCollectionA tensor_collection_a,
TensorRefCollectionB tensor_collection_b,
ScalarType beta,
TensorRefCollectionC tensor_collection_c,
AccumulatorType initial_accum) {
// Obtain batch ID
int batch_id = blockIdx.z;
// Dereference based on batch_id
typename TensorRefCollectionA::TensorRef tensor_a = tensor_collection_a.at(batch_id);
typename TensorRefCollectionB::TensorRef tensor_b = tensor_collection_b.at(batch_id);
typename TensorRefCollectionC::TensorRef tensor_c = tensor_collection_c.at(batch_id);
// Map each thread to a unique tile of the output matrix
MatrixCoord output_coord(
(threadIdx.x + blockIdx.x * blockDim.x) * OutputTile::kColumn,
(threadIdx.y + blockIdx.y * blockDim.y) * OutputTile::kRow
);
// Compute the general matrix product
thread::Gemm<
typename TensorRefCollectionA::TensorRef,
typename TensorRefCollectionB::TensorRef,
typename TensorRefCollectionC::TensorRef,
ScalarType,
AccumulatorType,
OutputTile,
InnerProductOp,
ConvertOp
> gemm(initial_accum);
gemm.multiply_add(
problem_size,
tensor_a,
tensor_b,
output_coord);
gemm.epilogue(problem_size, alpha, beta, tensor_c, output_coord);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace device
} // namespace reference
} // namespace cutlass
| tools/util/include/cutlass/util/reference/device/kernel/gemm.h/0 | {
"file_path": "tools/util/include/cutlass/util/reference/device/kernel/gemm.h",
"repo_id": "tools",
"token_count": 1661
} | 70 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Reference implementation for GETT in host-side code.
*/
#pragma once
/////////////////////////////////////////////////////////////////////////////////////////////////
#include "cutlass/gemm/gemm.h"
#include "cutlass/complex.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/epilogue/thread/activation.h"
#include "cutlass/relatively_equal.h"
#include "cute/tensor.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass::reference::host {
template<class T, class = void>
struct ElementTraits {
using type = T;
};
template<class T>
struct ElementTraits<T, std::enable_if_t<!std::is_same_v<decltype(std::declval<T>().get()), void> > > {
using type = decltype(std::declval<T>().get());
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template<
class ElementAccumulator_,
class TensorA_, // (M, K, L)
class TensorB_ // (N, K, L)
>
struct GettMainloopParams {
using ElementAccumulator = ElementAccumulator_;
using TensorA = TensorA_;
using TensorB = TensorB_;
using EngineA = typename TensorA::engine_type;
using LayoutA = typename TensorA::layout_type;
using EngineB = typename TensorB::engine_type;
using LayoutB = typename TensorB::layout_type;
TensorA A{};
TensorB B{};
ComplexTransform transform_A = ComplexTransform::kNone;
ComplexTransform transform_B = ComplexTransform::kNone;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template<
class ElementScalar_,
class ElementScalingFactor_,
class ElementAccumulator_,
class ElementCompute_,
class TensorC_, // (M, N, L)
class TensorD_, // (M, N, L)
class VectorBias_ = TensorD_, // (M, 1)
class TensorAux_ = TensorD_, // (M, N, L)
class VectorAlpha_ = TensorD_, // (M, 1)
class VectorBeta_ = VectorAlpha_, // (M, 1)
class ActivationFunctor_ = cutlass::epilogue::thread::Identity<ElementCompute_>,
class BiasBinaryOp_ = cutlass::plus<ElementCompute_>,
bool PerColumnBias_ = false
>
struct GettEpilogueParams {
using ElementScalar = ElementScalar_;
using ElementScalingFactor = ElementScalingFactor_;
using ElementAccumulator = ElementAccumulator_;
using ElementCompute = ElementCompute_;
using TensorC = TensorC_;
using TensorD = TensorD_;
using TensorAux = TensorAux_;
using VectorBias = VectorBias_;
using VectorAlpha = VectorAlpha_;
using VectorBeta = VectorBeta_;
using ActivationFunctor = ActivationFunctor_;
using BiasBinaryOp = BiasBinaryOp_;
using EngineC = typename TensorC::engine_type;
using LayoutC = typename TensorC::layout_type;
using EngineD = typename TensorD::engine_type;
using LayoutD = typename TensorD::layout_type;
static constexpr bool PerColumnBias = PerColumnBias_;
ElementScalar alpha = ElementScalar(1);
ElementScalar beta = ElementScalar(0);
TensorC C{};
TensorD D{};
VectorBias Bias{};
TensorAux Aux{};
VectorAlpha Valpha{};
VectorBeta Vbeta{};
ElementCompute st = ElementCompute(1);
ElementAccumulator* abs_max_D = nullptr;
ElementAccumulator* abs_max_Aux = nullptr;
ElementScalingFactor scale_a = ElementScalingFactor(1);
ElementScalingFactor scale_b = ElementScalingFactor(1);
ElementScalingFactor scale_c = ElementScalingFactor(1);
ElementScalingFactor scale_d = ElementScalingFactor(1);
ElementScalingFactor scale_aux = ElementScalingFactor(1);
bool beta_per_channel_scaling = false;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// GETT - General Tensor-Tensor contraction reference kernel
template <
class MainloopParams,
class EpilogueParams
>
void Gett(
MainloopParams const& mainloop_params,
EpilogueParams const& epilogue_params)
{
static int constexpr kBlockM = 64;
static int constexpr kBlockN = 64;
#if defined(_OPENMP)
#pragma omp parallel for collapse(3)
#endif
for (int64_t l = 0; l < cute::size<2>(mainloop_params.A.layout()); ++l) {
for (int64_t m = 0; m < cute::size<0>(mainloop_params.A.layout()); m += kBlockM) {
for (int64_t n = 0; n < cute::size<0>(mainloop_params.B.layout()); n += kBlockN) {
typename MainloopParams::ElementAccumulator acc[kBlockM][kBlockN];
gett_mainloop(mainloop_params, m, n, l, acc);
gett_epilogue(epilogue_params, m, n, l, acc);
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// GETT - Mainloop
template <class MainloopParams, class ElementAccumulator, int kBlockM, int kBlockN>
void gett_mainloop(
MainloopParams const& mainloop_params,
int64_t m,
int64_t n,
int64_t l,
ElementAccumulator (&acc)[kBlockM][kBlockN])
{
static_assert(cute::rank(typename MainloopParams::LayoutA{}) == 3, "M, K, B");
static_assert(cute::rank(typename MainloopParams::LayoutB{}) == 3, "N, K, B");
using ElementA = typename ElementTraits<typename MainloopParams::EngineA::value_type>::type;
using ElementB = typename ElementTraits<typename MainloopParams::EngineB::value_type>::type;
using RingOp = multiply_add<ElementAccumulator, ElementAccumulator, ElementAccumulator>;
RingOp fma_op;
// Zero out accumulators
for (int m_b = 0; m_b < kBlockM; ++m_b) {
for (int n_b = 0; n_b < kBlockN; ++n_b) {
acc[m_b][n_b] = ElementAccumulator(0); // RingOp::AdditionIdentity
}
}
// Compute on this k-block
for (int64_t k = 0; k < cute::size<1>(mainloop_params.A.layout()); ++k) {
// Load A
ElementAccumulator a_frag[kBlockM];
for (int m_b = 0; m_b < kBlockM; ++m_b) {
if (m + m_b < cute::size<0>(mainloop_params.A.layout())) {
// Perform reference GEMM calculations at the accumulator's precision. Cast A value to accumulator type.
a_frag[m_b] = static_cast<ElementAccumulator>(ElementA(mainloop_params.A(m + m_b, k, l)));
if (mainloop_params.transform_A == ComplexTransform::kConjugate) {
a_frag[m_b] = conj(a_frag[m_b]);
}
} else {
a_frag[m_b] = ElementAccumulator(0); // RingOp::AdditionIdentity
}
}
// Load B
ElementAccumulator b_frag[kBlockN];
for (int n_b = 0; n_b < kBlockN; ++n_b) {
if (n + n_b < cute::size<0>(mainloop_params.B.layout())) {
// Perform reference GEMM calculations at the accumulator's precision. Cast A value to accumulator type.
b_frag[n_b] = static_cast<ElementAccumulator>(ElementB(mainloop_params.B(n + n_b, k, l)));
if (mainloop_params.transform_B == ComplexTransform::kConjugate) {
b_frag[n_b] = conj(b_frag[n_b]);
}
} else {
b_frag[n_b] = ElementAccumulator(0); // RingOp::AdditionIdentity
}
}
// do compute
for (int m_b = 0; m_b < kBlockM; ++m_b) {
for (int n_b = 0; n_b < kBlockN; ++n_b) {
acc[m_b][n_b] = fma_op(a_frag[m_b], b_frag[n_b], acc[m_b][n_b]);
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// GETT - Epilogue
template <class EpilogueParams, class ElementAccumulator, int kBlockM, int kBlockN>
void gett_epilogue(
EpilogueParams const& epilogue_params,
int64_t m,
int64_t n,
int64_t l,
ElementAccumulator (&acc)[kBlockM][kBlockN])
{
static_assert(cute::rank(typename EpilogueParams::LayoutC{}) == 3, "M, K, B");
static_assert(cute::rank(typename EpilogueParams::LayoutD{}) == 3, "N, K, B");
using ElementCompute = typename EpilogueParams::ElementCompute;
using ElementC = typename EpilogueParams::TensorC::value_type;
using ElementD = typename EpilogueParams::TensorD::value_type;
using ElementAux = typename EpilogueParams::TensorAux::value_type;
using ElementBias = typename EpilogueParams::VectorBias::value_type;
using ElementScalar = typename EpilogueParams::ElementScalar;
using ElementScalingFactor = typename EpilogueParams::ElementScalingFactor;
using ActivationFunctor = typename EpilogueParams::ActivationFunctor;
using BiasBinaryOp = typename EpilogueParams::BiasBinaryOp;
constexpr bool PerColBias = EpilogueParams::PerColumnBias;
constexpr bool IsScalingAndAmaxOutputNeeded =
cute::is_same_v<ElementD, cutlass::float_e4m3_t> or
cute::is_same_v<ElementD, cutlass::float_e5m2_t>;
constexpr bool IsScalingAndAmaxAuxOutputNeeded =
cute::is_same_v<ElementAux, cutlass::float_e4m3_t> or
cute::is_same_v<ElementAux, cutlass::float_e5m2_t>;
constexpr bool IsReLUAuxNeeded =
(cute::is_same_v<ActivationFunctor, cutlass::epilogue::thread::ReLu<ElementCompute>> or
cute::is_same_v<ActivationFunctor, cutlass::epilogue::thread::Clamp<ElementCompute>>) and
cute::is_same_v<ElementAux, cutlass::uint1b_t>;
constexpr bool IsClamp =
cute::is_same_v<ActivationFunctor, cutlass::epilogue::thread::Clamp<ElementCompute>>;
constexpr bool IsBackpropFusion =
cute::is_same_v<ActivationFunctor, cutlass::epilogue::thread::dGELU<ElementCompute>> or
cute::is_same_v<ActivationFunctor, cutlass::epilogue::thread::dReLU<ElementCompute>>;
// Input related converter
NumericConverter<ElementCompute, ElementAccumulator> accumulator_converter;
NumericConverter<ElementCompute, ElementC> source_converter;
NumericConverter<ElementCompute, ElementBias> bias_converter;
[[maybe_unused]] NumericConverter<ElementCompute, ElementAux> aux_source_converter;
// Scale related converter
NumericConverter<ElementCompute, ElementScalar> scale_converter;
NumericConverter<ElementCompute, ElementScalingFactor> scaling_factor_converter;
// Abs max converter
[[maybe_unused]] NumericConverter<ElementAccumulator, ElementCompute> abs_max_output_converter;
// Output related converter
NumericConverter<ElementD, ElementCompute> destination_converter;
NumericConverter<ElementAux, ElementCompute> aux_destination_converter;
NumericConverter<ElementBias, ElementCompute> dBias_converter;
// Epilogue operations
multiply_add<ElementCompute, ElementCompute, ElementCompute> epilogue_fma;
multiplies<ElementCompute> mul;
plus<ElementCompute> add;
// Activation operation
ActivationFunctor activation;
// Bias binary operation
BiasBinaryOp bias_op;
// Do conversion
ElementCompute converted_alpha = scale_converter(epilogue_params.alpha);
ElementCompute converted_beta = scale_converter(epilogue_params.beta);
ElementCompute converted_scale_a = scaling_factor_converter(epilogue_params.scale_a);
ElementCompute converted_scale_b = scaling_factor_converter(epilogue_params.scale_b);
ElementCompute converted_scale_c = scaling_factor_converter(epilogue_params.scale_c);
ElementCompute converted_scale_d = scaling_factor_converter(epilogue_params.scale_d);
ElementCompute converted_scale_aux = scaling_factor_converter(epilogue_params.scale_aux);
// Init local var
[[maybe_unused]] ElementCompute local_abs_max_output = ElementCompute(0);
[[maybe_unused]] ElementCompute local_abs_max_aux_output = ElementCompute(0);
converted_alpha = mul(converted_alpha, mul(converted_scale_a, converted_scale_b));
converted_beta = mul(converted_beta, converted_scale_c);
ElementCompute inter_accum[kBlockM][kBlockN];
for (int m_b = 0; m_b < kBlockM; ++m_b) {
ElementCompute local_dBias = ElementCompute(0);
for (int n_b = 0; n_b < kBlockN; ++n_b) {
if (m + m_b < cute::size<0>(epilogue_params.D.layout()) && n + n_b < cute::size<1>(epilogue_params.D.layout())) {
// Convert every type to ElementCompute first, do compute, convert to output type, write it out
ElementCompute converted_acc = accumulator_converter(acc[m_b][n_b]);
// per-row alpha
if (raw_pointer_cast(epilogue_params.Valpha.data())) {
converted_alpha = scale_converter(epilogue_params.Valpha(m + m_b));
}
ElementCompute output = mul(converted_alpha, converted_acc);
if (raw_pointer_cast(epilogue_params.Bias.data()) && not IsBackpropFusion) {
ElementCompute converted_bias = bias_converter(epilogue_params.Bias(PerColBias ? n + n_b : m + m_b));
output = bias_op(output, converted_bias);
}
if (raw_pointer_cast(epilogue_params.C.data())) {
ElementCompute converted_src = source_converter(epilogue_params.C(m + m_b, n + n_b, l));
// per-row beta
if (epilogue_params.Vbeta.data()) {
converted_beta = scale_converter(epilogue_params.Vbeta(m + m_b));
}
output = epilogue_fma(converted_beta, converted_src, output);
}
if constexpr (IsBackpropFusion) {
ElementAux aux_input = ElementAux(0);
if (raw_pointer_cast(epilogue_params.Aux.data())) {
aux_input = epilogue_params.Aux(m + m_b, n + n_b, l);
}
output = activation(output, aux_source_converter(aux_input));
local_dBias = add(local_dBias, output);
}
else {
if (raw_pointer_cast(epilogue_params.Aux.data())) {
auto aux_output = output;
if constexpr (IsScalingAndAmaxAuxOutputNeeded) {
maximum_absolute_value_reduction<ElementCompute, true> amax_op;
local_abs_max_aux_output = amax_op(local_abs_max_aux_output, aux_output);
aux_output = epilogue_fma(converted_scale_aux, aux_output, ElementCompute(0));
}
if constexpr (IsReLUAuxNeeded) {
epilogue_params.Aux(m + m_b, n + n_b, l) = not (aux_output < 0) ? uint1b_t(1) : uint1b_t(0);
} else {
epilogue_params.Aux(m + m_b, n + n_b, l) = aux_destination_converter(aux_output);
}
}
if constexpr (IsClamp) { // Treat Clamp as ReLU
output = activation(output, {0, std::numeric_limits<ElementCompute>::max()});
}
else {
output = activation(output);
}
}
if constexpr (IsScalingAndAmaxOutputNeeded) {
maximum_absolute_value_reduction<ElementCompute, true> amax_op;
local_abs_max_output = amax_op(local_abs_max_output, output);
output = epilogue_fma(converted_scale_d, output, ElementCompute(0));
}
inter_accum[m_b][n_b] = ElementCompute(output);
}
} // n_b
if (m + m_b < cute::size<0>(epilogue_params.D.layout()) && n < cute::size<1>(epilogue_params.D.layout())) {
if (raw_pointer_cast(epilogue_params.Bias.data()) && IsBackpropFusion) {
ElementCompute converted_dBias = bias_converter(epilogue_params.Bias(m + m_b));
local_dBias = add(local_dBias, converted_dBias);
epilogue_params.Bias(m + m_b) = dBias_converter(local_dBias);
}
}
} // m_b
for (int m_b = 0; m_b < kBlockM; ++m_b) {
for (int n_b = 0; n_b < kBlockN; ++n_b) {
if (m + m_b < cute::size<0>(epilogue_params.D.layout()) && n + n_b < cute::size<1>(epilogue_params.D.layout())) {
epilogue_params.D(m + m_b, n + n_b, l) = destination_converter(inter_accum[m_b][n_b]);
}
}
}
#if defined(_OPENMP)
#pragma omp critical(Abs_Max_Data_Update)
#endif
{
if constexpr (IsScalingAndAmaxOutputNeeded) {
if (epilogue_params.abs_max_D) {
*epilogue_params.abs_max_D = maximum_with_nan_propogation<ElementAccumulator>{}(
*epilogue_params.abs_max_D, abs_max_output_converter(local_abs_max_output));
}
}
if constexpr (IsScalingAndAmaxAuxOutputNeeded) {
if (epilogue_params.abs_max_Aux) {
*epilogue_params.abs_max_Aux = maximum_with_nan_propogation<ElementAccumulator>{}(
*epilogue_params.abs_max_Aux, abs_max_output_converter(local_abs_max_aux_output));
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <class TensorType>
auto make_layout_rank3(const TensorType& tensor) {
// append a batch mode of size 1 if we do not have tensors that are rank 3
return make_layout(
make_shape(cute::get<0>(tensor.shape()), cute::get<1>(tensor.shape()), cute::Int<1>{}),
make_stride(cute::get<0>(tensor.stride()), cute::get<1>(tensor.stride()), int64_t(cosize(tensor.layout()))));
}
/// GEMM - General Matrix-Matrix contraction without conjugation options
template <
class MainloopParams,
class EpilogueParams
>
void Gemm3x(
MainloopParams const& mainloop_params,
EpilogueParams const& epilogue_params)
{
using namespace cute;
static_assert(cute::rank(typename MainloopParams::LayoutA{}) == cute::rank(typename MainloopParams::LayoutB{}));
static_assert(cute::rank(typename EpilogueParams::LayoutC{}) == cute::rank(typename EpilogueParams::LayoutD{}));
static_assert(cute::rank(typename MainloopParams::LayoutA{}) == cute::rank(typename EpilogueParams::LayoutC{}));
if constexpr (cute::rank(typename MainloopParams::LayoutA{}) == 2) {
cute::Layout layout_A = make_layout_rank3(mainloop_params.A);
cute::Layout layout_B = make_layout_rank3(mainloop_params.B);
cute::Layout layout_C = make_layout_rank3(epilogue_params.C);
cute::Layout layout_D = make_layout_rank3(epilogue_params.D);
cute::Layout layout_Aux = make_layout_rank3(epilogue_params.Aux);
cute::Layout layout_Bias = make_layout_rank3(epilogue_params.Bias);
cute::Layout layout_Valpha = make_layout_rank3(epilogue_params.Valpha);
cute::Layout layout_Vbeta = make_layout_rank3(epilogue_params.Vbeta);
auto TensorA = make_tensor(mainloop_params.A.data(), layout_A);
auto TensorB = make_tensor(mainloop_params.B.data(), layout_B);
auto TensorC = make_tensor(epilogue_params.C.data(), layout_C);
auto TensorD = make_tensor(epilogue_params.D.data(), layout_D);
auto TensorAux = make_tensor(epilogue_params.Aux.data(), layout_Aux);
auto VectorBias = make_tensor(epilogue_params.Bias.data(), layout_Bias);
auto VectorAlpha = make_tensor(epilogue_params.Valpha.data(), layout_Valpha);
auto VectorBeta = make_tensor(epilogue_params.Vbeta.data(), layout_Vbeta);
// Reconstruct mainloop params
GettMainloopParams<typename MainloopParams::ElementAccumulator,
decltype(TensorA),
decltype(TensorB)>
mainloop_params_converted{TensorA,
TensorB,
mainloop_params.transform_A,
mainloop_params.transform_B};
// Reconstruct epilogue params
GettEpilogueParams<typename EpilogueParams::ElementScalar,
typename EpilogueParams::ElementScalingFactor,
typename EpilogueParams::ElementAccumulator,
typename EpilogueParams::ElementCompute,
decltype(TensorC),
decltype(TensorD),
decltype(VectorBias),
decltype(TensorAux),
decltype(VectorAlpha),
decltype(VectorBeta)
>
epilogue_params_converted{epilogue_params.alpha,
epilogue_params.beta,
TensorC,
TensorD,
VectorBias,
TensorAux,
VectorAlpha,
VectorBeta,
epilogue_params.abs_amax_D,
epilogue_params.abs_amax_Aux,
epilogue_params.scale_a,
epilogue_params.scale_b,
epilogue_params.scale_c,
epilogue_params.scale_d,
epilogue_params.scale_aux
};
Gett(mainloop_params_converted, epilogue_params_converted);
}
else {
// if we already have a batch mode, just pass it through
Gett(mainloop_params, epilogue_params);
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // cutlass::reference::host
/////////////////////////////////////////////////////////////////////////////////////////////////
| tools/util/include/cutlass/util/reference/host/gett.hpp/0 | {
"file_path": "tools/util/include/cutlass/util/reference/host/gett.hpp",
"repo_id": "tools",
"token_count": 9541
} | 71 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Reference implementation for TRMM in host-side code.
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/tensor_view.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/arch/mma.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/gemm.h"
namespace cutlass {
namespace reference {
namespace host {
/// Computes a Triangular Matrix Multiplication (tensors of rank=2) pointed to by TensorRef
/// objects.
template <
typename ElementA,
typename LayoutA,
SideMode SideModeA,
FillMode FillModeA,
DiagType DiagTypeA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType,
typename ComputeType,
typename InnerProductOp = multiply_add<ComputeType>,
typename ConvertOp = NumericConverter<ElementC, ScalarType>
>
void compute_trmm(
gemm::GemmCoord problem_size,
ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b,
TensorRef<ElementC, LayoutC> tensor_d,
ComputeType initial_accum) {
static_assert(
LayoutA::kRank == 2 &&
LayoutC::kRank == 2, "Tensors must be of rank 2");
static_assert(SideModeA != SideMode::kInvalid
, "Side Mode can either be Left or Right.");
static_assert(FillModeA == FillMode::kLower || FillModeA == FillMode::kUpper
, "Fill Mode can either be Lower or Upper.");
using CompareOp = typename TrMatrixCompareOp<FillModeA, DiagTypeA>::Type;
// Note: batch is ignored.
int const M = problem_size.m();
int const N = problem_size.n();
// Assuming correct k-dimension value is passed
int const K = problem_size.k();
// Blocking necessary to speedup reference implementation
int const Mblock = 16;
int const Nblock = 16;
ConvertOp convert_op;
InnerProductOp inner_product_op;
CompareOp compare_op;
for (int row_block = 0; row_block < M; row_block += Mblock) {
for (int col_block = 0; col_block < N; col_block += Nblock) {
ComputeType accum[Mblock][Nblock];
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Mblock; i++) {
accum[i][j] = initial_accum;
}
}
for (int k_block = 0; k_block < K; ++k_block) {
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Mblock; i++) {
int row = row_block + i;
int col = col_block + j;
if (row < M && col < N) {
ElementA a = ElementA();
ElementB b = ElementB();
if (SideModeA == SideMode::kLeft) {
a = (compare_op(row, k_block)) ?
(tensor_a.at(MatrixCoord(row, k_block))) : ElementA(0);
if (row == k_block && DiagTypeA == DiagType::kUnit) {
a = ElementA(1);
}
b = tensor_b.at(MatrixCoord(k_block, col));
} else if (SideModeA == SideMode::kRight) {
a = tensor_b.at(MatrixCoord(row, k_block));
b = (compare_op(k_block, col)) ?
tensor_a.at(MatrixCoord(k_block, col)) : ElementA(0);
if (k_block == col && DiagTypeA == DiagType::kUnit) {
b = ElementA(1);
}
}
ComputeType compute_a(cast_if_scalar<ComputeType>(a));
ComputeType compute_b(cast_if_scalar<ComputeType>(b));
accum[i][j] = inner_product_op(compute_a, compute_b, accum[i][j]);
}
}
}
}
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Mblock; i++) {
int row = row_block + i;
int col = col_block + j;
MatrixCoord coord = MatrixCoord(row, col);
if (row < M && col < N) {
tensor_d.at(coord) = convert_op(
alpha * ScalarType(accum[i][j]));
}
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA,
typename LayoutA,
SideMode SideModeA,
FillMode FillModeA,
DiagType DiagTypeA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType,
typename ComputeType,
typename InnerProductOp = cutlass::arch::OpMultiplyAdd
>
struct Trmm;
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for multiply-add
template <typename ElementA, typename LayoutA, SideMode SideModeA,
FillMode FillModeA, DiagType DiagTypeA,
typename ElementB, typename LayoutB,
typename ElementC, typename LayoutC,
typename ScalarType, typename ComputeType>
struct Trmm<ElementA, LayoutA, SideModeA, FillModeA, DiagTypeA, ElementB, LayoutB,
ElementC, LayoutC, ScalarType,
ComputeType, arch::OpMultiplyAdd> {
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b,
TensorRef<ElementC, LayoutC> tensor_d,
ComputeType initial_accum = ComputeType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_trmm<ElementA, LayoutA, SideModeA, FillModeA, DiagTypeA, ElementB, LayoutB,
ElementC, LayoutC, ScalarType, ComputeType, multiply_add<ComputeType>>(
problem_size, alpha, tensor_a, tensor_b, tensor_d, initial_accum);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace host
} // namespace reference
} // namespace cutlass
| tools/util/include/cutlass/util/reference/host/trmm.h/0 | {
"file_path": "tools/util/include/cutlass/util/reference/host/trmm.h",
"repo_id": "tools",
"token_count": 2997
} | 72 |
var searchData=
[
['matrix',['Matrix',['../namespacecutlass_1_1layout.html#af6b33640063b02d26c261efd25053e6c',1,'cutlass::layout']]],
['matrixlayout',['MatrixLayout',['../namespacecutlass.html#af99b012f0e1795ca7dc167b7b109dd19',1,'cutlass']]],
['matrixtransform',['MatrixTransform',['../namespacecutlass.html#ab7e605b25da48d89f98764c12d50b467',1,'cutlass']]]
];
| docs/search/enums_5.js/0 | {
"file_path": "docs/search/enums_5.js",
"repo_id": "docs",
"token_count": 158
} | 0 |
var searchData=
[
['sequential',['Sequential',['../structcutlass_1_1Distribution.html#a499f4023e0d42356ce71d38cc32bf92aa39d3cf55e90573c8d1dfb483cfb410dc',1,'cutlass::Distribution']]]
];
| docs/search/enumvalues_4.js/0 | {
"file_path": "docs/search/enumvalues_4.js",
"repo_id": "docs",
"token_count": 82
} | 1 |
var searchData=
[
['wmma_2eh',['wmma.h',['../wmma_8h.html',1,'']]],
['wmma_5farray_2eh',['wmma_array.h',['../wmma__array_8h.html',1,'']]],
['wmma_5fptx_2eh',['wmma_ptx.h',['../wmma__ptx_8h.html',1,'']]],
['wmma_5fsm70_2eh',['wmma_sm70.h',['../wmma__sm70_8h.html',1,'']]],
['wmma_5fsm72_2eh',['wmma_sm72.h',['../wmma__sm72_8h.html',1,'']]],
['wmma_5fsm75_2eh',['wmma_sm75.h',['../wmma__sm75_8h.html',1,'']]],
['wmma_5ftensor_5fop_5fpolicy_2eh',['wmma_tensor_op_policy.h',['../wmma__tensor__op__policy_8h.html',1,'']]]
];
| docs/search/files_13.js/0 | {
"file_path": "docs/search/files_13.js",
"repo_id": "docs",
"token_count": 290
} | 2 |
var searchData=
[
['gemm_5fbatched_2eh',['gemm_batched.h',['../kernel_2gemm__batched_8h.html',1,'']]],
['gemm_5fsplitk_5fparallel_2eh',['gemm_splitk_parallel.h',['../kernel_2gemm__splitk__parallel_8h.html',1,'']]],
['kernel_5flaunch_2eh',['kernel_launch.h',['../kernel__launch_8h.html',1,'']]]
];
| docs/search/files_9.js/0 | {
"file_path": "docs/search/files_9.js",
"repo_id": "docs",
"token_count": 141
} | 3 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a double-buffered threadblock-scoped GEMM kernel.
*/
#pragma once
#include "cutlass/aligned_buffer.h"
#include "cutlass/arch/memory.h"
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
#include "cutlass/gemm/warp/mma_tensor_op_fragment_iterator.h"
#include "threadblock/b2b_mma_base_smem_accumulator.h"
#include "cutlass/epilogue/threadblock/epilogue_smem_accumulator.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape0_,
/// Iterates over tiles of A operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorA0_,
/// Iterates over tiles of A operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorA0_,
/// Cache operation for operand A
cutlass::arch::CacheOperation::Kind CacheOpA0,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorB0_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB0_,
/// Cache operation for operand B
cutlass::arch::CacheOperation::Kind CacheOpB0,
/// Iterates over vectors of scale and bias vector in global memory
// (concept: VectorIterator)
typename IteratorAccumulatorScaleBias_,
/// Iterates over accumulator tile
typename FragmentIteratorAccumulator_,
/// Iterates over accumulator tile in shared memory
typename SmemIteratorD0_,
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape1_,
/// Iterates over the intermediate accumulator tile in shared memory
typename WarpIteratorA1_,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorB1_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB1_,
/// Cache operation for operand B
cutlass::arch::CacheOperation::Kind CacheOpB1,
/// Data type of accumulator matrix
typename ElementC_,
/// Data type of accumulator matrix
typename LayoutC_,
/// Output operator for 1st Gemm(concept: epilogue::thread::LinearCombinationClamp, etc...)
typename OutputOp_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy0_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy1_,
/// Number of stages,
int Stages,
/// Used for partial specialization
typename Enable = bool>
class B2bMmaMultistageSmemAccumulator :
public gemm::threadblock::B2bMmaBaseSmemAccumulator<Shape0_, Shape1_, Policy0_, Policy1_, SmemIteratorD0_, Stages> {
public:
///< Base class
using Base = gemm::threadblock::B2bMmaBaseSmemAccumulator<Shape0_, Shape1_, Policy0_, Policy1_, SmemIteratorD0_, Stages>;
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape0 = Shape0_;
///< Iterates over tiles of A operand in global memory
using IteratorA0 = IteratorA0_;
using IteratorA = IteratorA0;
///< Iterates over tiles of B operand in global memory
using IteratorB0 = IteratorB0_;
using IteratorB = IteratorB0;
///< Iterates over tiles of the scale and bias vectors in global memory
using IteratorAccumulatorScaleBias = IteratorAccumulatorScaleBias_;
///< Policy describing tuning details
using Policy0 = Policy0_;
using SmemIteratorA0 = SmemIteratorA0_;
using SmemIteratorB0 = SmemIteratorB0_;
using SmemIteratorD0 = SmemIteratorD0_; ///< Iterates over accumulator tile in shared memory
using FragmentIteratorAccumulator = FragmentIteratorAccumulator_; ///< Iterates over accumulator tile
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape1 = Shape1_;
///< Iterates over tiles of B operand in global memory
using IteratorB1 = IteratorB1_;
///< Policy describing tuning details
using Policy1 = Policy1_;
///< Export Policy0 as the threadblock-level Mma's policy
using Policy = Policy0;
using Shape = Shape0;
using SmemIteratorB1 = SmemIteratorB1_;
using WarpIteratorA1 = WarpIteratorA1_; ///< Iterates over the intermediate accumulator tile in shared memory
///< Data type of accumulator matrix
using ElementC = ElementC_;
///< Layout of accumulator matrix
using LayoutC = LayoutC_;
///< Epilogue after 1st Gemm
using OutputOp = OutputOp_;
static cutlass::arch::CacheOperation::Kind const kCacheOpA0 = CacheOpA0;
static cutlass::arch::CacheOperation::Kind const kCacheOpB0 = CacheOpB0;
static cutlass::arch::CacheOperation::Kind const kCacheOpB1 = CacheOpB1;
//
// Dependent types
//
/// Fragment of accumulator tile
using FragmentC0 = typename Policy0::Operator::FragmentC;
/// Warp-level Mma
using Operator0 = typename Policy0::Operator;
/// Fragment of Scale and Bias loaded from global memory
using FragmentA1ScaleBias = typename IteratorAccumulatorScaleBias::Fragment;
/// Fragment of accumulator tile
using FragmentC1 = typename Policy1::Operator::FragmentC;
/// Warp-level Mma
using Operator1 = typename Policy1::Operator;
/// Epilog in shared memory
using Epilogue0 = epilogue::threadblock::EpilogueSmemAccumulator<
SmemIteratorD0, ///< SmemTileIterator
FragmentIteratorAccumulator, ///< AccumulatorFragmentIterator
IteratorAccumulatorScaleBias, ///< ScaleBiasIterator
OutputOp>; ///< Output operator
/// Minimum architecture is Sm80 to support cp.async
using ArchTag = arch::Sm80;
/// Complex transform on A operand
static ComplexTransform const kTransformA0 = Operator0::kTransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB0 = Operator0::kTransformB;
/// Complex transform on B operand
static ComplexTransform const kTransformB1 = Operator1::kTransformB;
/// Complex transform exports needed by higher-level kernels
static ComplexTransform const kTransformA = kTransformA0;
static ComplexTransform const kTransformB = kTransformB0;
/// Internal structure exposed for introspection.
struct Detail {
static_assert(Base::kWarpGemmIterations0 > 1,
"The pipelined structure requires at least two warp-level "
"GEMM operations.");
static_assert(Base::kWarpGemmIterations1 > 1,
"The pipelined structure requires at least two warp-level "
"GEMM operations.");
/// Number of cp.async instructions to load one stage of operand A
static int const TBLoadIterationsA0 =
IteratorA0::ThreadMap::Iterations::kCount;
/// Number of cp.async instructions to load one stage of operand B
static int const TBLoadIterationsB0 =
IteratorB0::ThreadMap::Iterations::kCount;
/// Number of cp.async instructions to load one stage of operand B
static int const TBLoadIterationsB1 =
IteratorB1::ThreadMap::Iterations::kCount;
/// Number of stages
static int const kStages = Stages;
/// Number of cp.async instructions to load on group of operand A
static int const kAccessesPerGroupA0 =
(TBLoadIterationsA0 + Base::kWarpGemmIterations0 - 1) / Base::kWarpGemmIterations0;
/// Number of cp.async instructions to load on group of operand B
static int const kAccessesPerGroupB0 =
(TBLoadIterationsB0 + Base::kWarpGemmIterations0 - 1) / Base::kWarpGemmIterations0;
/// Number of cp.async instructions to load on group of operand B
static int const kAccessesPerGroupB1 =
(TBLoadIterationsB1 + Base::kWarpGemmIterations1 - 1) / Base::kWarpGemmIterations1;
};
private:
using WarpLoadedFragmentA0 = typename Operator0::FragmentA;
using WarpLoadedFragmentB0 = typename Operator0::FragmentB;
using WarpLoadedFragmentA1 = typename Operator1::FragmentA;
using WarpLoadedFragmentB1 = typename Operator1::FragmentB;
using WarpTransformedFragmentA0 = typename Operator0::TransformedFragmentA;
using WarpTransformedFragmentB0 = typename Operator0::TransformedFragmentB;
using WarpTransformedFragmentA1 = typename Operator1::TransformedFragmentA;
using WarpTransformedFragmentB1 = typename Operator1::TransformedFragmentB;
private:
//
// Data members
//
/// Iterator to write threadblock-scoped tile of A operand to shared memory
SmemIteratorA0 smem_iterator_A0_;
/// Iterator to write threadblock-scoped tile of B operand to shared memory
SmemIteratorB0 smem_iterator_B0_;
/// Shared Memory Iterator to store accumulator tile
SmemIteratorD0 smem_iterator_D0_;
/// Iterator to load a warp-scoped tile of A1 operand from intermediate accumulator tile
WarpIteratorA1 warp_tile_iterator_A1_;
/// Iterator to write threadblock-scoped tile of B operand to shared memory
SmemIteratorB1 smem_iterator_B1_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
B2bMmaMultistageSmemAccumulator(
///< Shared storage needed for internal use by threadblock-scoped GEMM
typename Base::B2bMmaSharedStorage &shared_storage,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx,
///< GEMM0 N is used for accumulator extent
int problem_size_0_n
):
Base(shared_storage, thread_idx, warp_idx, lane_idx),
smem_iterator_A0_(shared_storage.b2b_mma_shared_storage.shared_storage0.operand_A_ref(), thread_idx),
smem_iterator_B0_(shared_storage.b2b_mma_shared_storage.shared_storage0.operand_B_ref(), thread_idx),
smem_iterator_D0_(shared_storage.accumulator_shared_storage0.accum_ref(), lane_idx),
warp_tile_iterator_A1_(shared_storage.accumulator_shared_storage0.accum_ref(), {Base::WarpGemm1::kM, problem_size_0_n}, lane_idx ),
smem_iterator_B1_(shared_storage.b2b_mma_shared_storage.shared_storage1.operand_B_ref(), thread_idx)
{
// Compute warp location within threadblock tile by mapping the warp_id to
// three coordinates:
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_idx_mn_0 = warp_idx % (Base::WarpCount0::kM * Base::WarpCount0::kN);
int warp_idx_k_0 = warp_idx / (Base::WarpCount0::kM * Base::WarpCount0::kN);
int warp_idx_m_0 = warp_idx_mn_0 % Base::WarpCount0::kM;
int warp_idx_n_0 = warp_idx_mn_0 / Base::WarpCount0::kM;
int warp_idx_mn_1 = warp_idx % (Base::WarpCount1::kM * Base::WarpCount1::kN);
int warp_idx_k_1 = warp_idx / (Base::WarpCount1::kM * Base::WarpCount1::kN);
int warp_idx_m_1 = warp_idx_mn_1 % Base::WarpCount1::kM;
int warp_idx_n_1 = warp_idx_mn_1 / Base::WarpCount1::kM;
// Add per-warp offsets in units of warp-level tiles
this->warp_tile_iterator_A0_.add_tile_offset(
{warp_idx_m_0, Base::kWarpGemmIterations0 * warp_idx_k_0});
this->warp_tile_iterator_B0_.add_tile_offset(
{Base::kWarpGemmIterations0 * warp_idx_k_0, warp_idx_n_0});
warp_tile_iterator_A1_.add_tile_offset(
{warp_idx_m_1, Base::kWarpGemmIterations1 * warp_idx_k_1});
this->warp_tile_iterator_B1_.add_tile_offset(
{Base::kWarpGemmIterations1 * warp_idx_k_1, warp_idx_n_1});
// Add smem accumulator iterator warp offset
smem_iterator_D0_.add_tile_offset({ warp_idx_m_0 * SmemIteratorD0::TileIterations::kRow,
warp_idx_n_0 * SmemIteratorD0::TileIterations::kColumn});
}
CUTLASS_DEVICE
void copy_tiles_and_advance_0(IteratorA0 &iterator_A0, IteratorB0 &iterator_B0,
int group_start_A0 = 0, int group_start_B0 = 0) {
iterator_A0.set_iteration_index(group_start_A0 *
IteratorA0::kAccessesPerVector);
this->smem_iterator_A0_.set_iteration_index(group_start_A0);
// cp.async for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupA0; ++j) {
if (group_start_A0 + j < Detail::TBLoadIterationsA0) {
typename IteratorA0::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA0::AccessType *>(
this->smem_iterator_A0_.get());
int const kSrcBytes = sizeof_bits<typename IteratorA0::Element>::value *
IteratorA0::ThreadMap::kElementsPerAccess /
IteratorA0::kAccessesPerVector / 8;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorA0::kAccessesPerVector; ++v) {
auto gmem_ptr = iterator_A0.get();
cutlass::arch::cp_async<kSrcBytes, kCacheOpA0>(
dst_ptr + v, gmem_ptr, iterator_A0.valid());
++iterator_A0;
}
++this->smem_iterator_A0_;
}
}
iterator_B0.set_iteration_index(group_start_B0 *
IteratorB0::kAccessesPerVector);
this->smem_iterator_B0_.set_iteration_index(group_start_B0);
// cp.async for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupB0; ++j) {
if (group_start_B0 + j < Detail::TBLoadIterationsB0) {
typename IteratorB0::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB0::AccessType *>(
this->smem_iterator_B0_.get());
int const kSrcBytes = sizeof_bits<typename IteratorB0::Element>::value *
IteratorB0::ThreadMap::kElementsPerAccess /
IteratorB0::kAccessesPerVector / 8;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB0::kAccessesPerVector; ++v) {
auto gmem_ptr = iterator_B0.get();
cutlass::arch::cp_async<kSrcBytes, kCacheOpB0>(
dst_ptr + v, gmem_ptr, iterator_B0.valid());
++iterator_B0;
}
++this->smem_iterator_B0_;
}
}
}
CUTLASS_DEVICE
void copy_tiles_and_advance_1(IteratorB1 &iterator_B1,
int group_start_B1 = 0) {
iterator_B1.set_iteration_index(group_start_B1 *
IteratorB1::kAccessesPerVector);
this->smem_iterator_B1_.set_iteration_index(group_start_B1);
// cp.async for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupB1; ++j) {
if (group_start_B1 + j < Detail::TBLoadIterationsB1) {
typename IteratorB1::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB1::AccessType *>(
this->smem_iterator_B1_.get());
int const kSrcBytes = sizeof_bits<typename IteratorB1::Element>::value *
IteratorB1::ThreadMap::kElementsPerAccess /
IteratorB1::kAccessesPerVector / 8;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB1::kAccessesPerVector; ++v) {
auto gmem_ptr = iterator_B1.get();
cutlass::arch::cp_async<kSrcBytes, kCacheOpB1>(
dst_ptr + v, gmem_ptr, iterator_B1.valid());
++iterator_B1;
}
++this->smem_iterator_B1_;
}
}
}
/// Perform a threadblock-scoped matrix multiply-accumulate
CUTLASS_DEVICE
void operator()(
///< problem size of GEMM
int gemm_k_iterations_0,
///< destination accumulator tile
FragmentC1 &accum,
///< iterator over A0 operand in global memory
IteratorA0 iterator_A0,
///< iterator over B0 operand in global memory
IteratorB0 iterator_B0,
///< iterator over A1 operand scale vector in global memory
IteratorAccumulatorScaleBias iterator_accum0_scale,
///< iterator over A1 operand bias vector in global memory
IteratorAccumulatorScaleBias iterator_accum0_bias,
///< iterator over B1 operand in global memory
IteratorB1 iterator_B1,
///< initial value of accumulator
FragmentC0 const &src_accum,
///< epilogue operation after 1st Gemm
OutputOp output_op_0)
{
//
// Prologue
//
// Issue several complete stages
CUTLASS_PRAGMA_UNROLL
for (int stage = 0; stage < Base::kStages - 1;
++stage, --gemm_k_iterations_0) {
iterator_A0.clear_mask(gemm_k_iterations_0 == 0);
iterator_B0.clear_mask(gemm_k_iterations_0 == 0);
iterator_A0.set_iteration_index(0);
this->smem_iterator_A0_.set_iteration_index(0);
// cp.async for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::TBLoadIterationsA0; ++j) {
typename IteratorA0::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA0::AccessType *>(
this->smem_iterator_A0_.get());
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorA0::kAccessesPerVector; ++v) {
int const kSrcBytes =
sizeof_bits<typename IteratorA0::Element>::value *
IteratorA0::ThreadMap::kElementsPerAccess /
IteratorA0::kAccessesPerVector / 8;
int src_bytes = (iterator_A0.valid() ? kSrcBytes : 0);
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA0>(
dst_ptr + v, iterator_A0.get(), iterator_A0.valid());
++iterator_A0;
}
++this->smem_iterator_A0_;
}
iterator_B0.set_iteration_index(0);
this->smem_iterator_B0_.set_iteration_index(0);
// cp.async for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::TBLoadIterationsB0; ++j) {
typename IteratorB0::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB0::AccessType *>(
this->smem_iterator_B0_.get());
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB0::kAccessesPerVector; ++v) {
int const kSrcBytes =
sizeof_bits<typename IteratorB0::Element>::value *
IteratorB0::ThreadMap::kElementsPerAccess /
IteratorB0::kAccessesPerVector / 8;
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB0>(
dst_ptr + v, iterator_B0.get(), iterator_B0.valid());
++iterator_B0;
}
++this->smem_iterator_B0_;
}
// Move to the next stage
iterator_A0.add_tile_offset({0, 1});
iterator_B0.add_tile_offset({1, 0});
this->smem_iterator_A0_.add_tile_offset({0, 1});
this->smem_iterator_B0_.add_tile_offset({1, 0});
// Defines the boundary of a stage of cp.async.
cutlass::arch::cp_async_fence();
}
// Perform accumulation in the 'd' output operand
FragmentC0 accum0 = src_accum;
// DEPBAR+SYNC
cutlass::arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
// Pair of fragments used to overlap shared memory loads and math
// instructions
WarpLoadedFragmentA0 warp_loaded_frag_A0[2];
WarpLoadedFragmentB0 warp_loaded_frag_B0[2];
WarpTransformedFragmentA0 warp_transformed_frag_A0[2];
WarpTransformedFragmentB0 warp_transformed_frag_B0[2];
Operator0 warp_mma0;
this->warp_tile_iterator_A0_.set_kgroup_index(0);
this->warp_tile_iterator_B0_.set_kgroup_index(0);
this->warp_tile_iterator_A0_.load(warp_loaded_frag_A0[0]);
this->warp_tile_iterator_B0_.load(warp_loaded_frag_B0[0]);
++this->warp_tile_iterator_A0_;
++this->warp_tile_iterator_B0_;
iterator_A0.clear_mask(gemm_k_iterations_0 == 0);
iterator_B0.clear_mask(gemm_k_iterations_0 == 0);
int smem_write_stage_idx = Base::kStages - 1;
int smem_read_stage_idx = 0;
warp_mma0.transform(warp_transformed_frag_A0[0], warp_transformed_frag_B0[0],
warp_loaded_frag_A0[0], warp_loaded_frag_B0[0]);
//
// Mainloop
//
CUTLASS_GEMM_LOOP
for (; gemm_k_iterations_0 > (-Base::kStages + 1);) {
//
// Loop over GEMM K dimension
//
// Computes a warp-level GEMM on data held in shared memory
// Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations0;
++warp_mma_k) {
// Load warp-level tiles from shared memory, wrapping to k offset if
// this is the last group as the case may be.
this->warp_tile_iterator_A0_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations0);
this->warp_tile_iterator_B0_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations0);
this->warp_tile_iterator_A0_.load(warp_loaded_frag_A0[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_B0_.load(warp_loaded_frag_B0[(warp_mma_k + 1) % 2]);
++this->warp_tile_iterator_A0_;
++this->warp_tile_iterator_B0_;
if (warp_mma_k > 0)
warp_mma0.transform(warp_transformed_frag_A0[warp_mma_k % 2],
warp_transformed_frag_B0[warp_mma_k % 2],
warp_loaded_frag_A0[warp_mma_k % 2],
warp_loaded_frag_B0[warp_mma_k % 2]);
warp_mma0(
accum0,
warp_transformed_frag_A0[warp_mma_k % 2],
warp_transformed_frag_B0[warp_mma_k % 2],
accum0
);
// Issue global->shared copies for the this stage
if (warp_mma_k < Base::kWarpGemmIterations0 - 1) {
int group_start_iteration_A0, group_start_iteration_B0;
group_start_iteration_A0 = warp_mma_k * Detail::kAccessesPerGroupA0;
group_start_iteration_B0 = warp_mma_k * Detail::kAccessesPerGroupB0;
copy_tiles_and_advance_0(iterator_A0, iterator_B0, group_start_iteration_A0,
group_start_iteration_B0);
}
if (warp_mma_k + 2 == Base::kWarpGemmIterations0) {
int group_start_iteration_A0, group_start_iteration_B0;
group_start_iteration_A0 =
(warp_mma_k + 1) * Detail::kAccessesPerGroupA0;
group_start_iteration_B0 =
(warp_mma_k + 1) * Detail::kAccessesPerGroupB0;
copy_tiles_and_advance_0(iterator_A0, iterator_B0, group_start_iteration_A0,
group_start_iteration_B0);
// Inserts a memory fence between stages of cp.async instructions.
cutlass::arch::cp_async_fence();
// Waits until kStages-2 stages have committed.
arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
// Move to the next stage
iterator_A0.add_tile_offset({0, 1});
iterator_B0.add_tile_offset({1, 0});
this->smem_iterator_A0_.add_tile_offset({0, 1});
this->smem_iterator_B0_.add_tile_offset({1, 0});
// Add negative offsets to return iterators to the 'start' of the
// circular buffer in shared memory
if (smem_write_stage_idx == (Base::kStages - 1)) {
this->smem_iterator_A0_.add_tile_offset({0, -Base::kStages});
this->smem_iterator_B0_.add_tile_offset({-Base::kStages, 0});
smem_write_stage_idx = 0;
} else {
++smem_write_stage_idx;
}
if (smem_read_stage_idx == (Base::kStages - 1)) {
this->warp_tile_iterator_A0_.add_tile_offset(
{0, -Base::kStages * Policy0::kPartitionsK *
Base::kWarpGemmIterations0});
this->warp_tile_iterator_B0_.add_tile_offset(
{-Base::kStages * Policy0::kPartitionsK *
Base::kWarpGemmIterations0,
0});
smem_read_stage_idx = 0;
} else {
++smem_read_stage_idx;
}
--gemm_k_iterations_0;
iterator_A0.clear_mask(gemm_k_iterations_0 == 0);
iterator_B0.clear_mask(gemm_k_iterations_0 == 0);
}
// Do any conversions feeding the first stage at the end of the loop so
// we can start right away on mma instructions
if (warp_mma_k + 1 == Base::kWarpGemmIterations0)
warp_mma0.transform(warp_transformed_frag_A0[(warp_mma_k + 1) % 2],
warp_transformed_frag_B0[(warp_mma_k + 1) % 2],
warp_loaded_frag_A0[(warp_mma_k + 1) % 2],
warp_loaded_frag_B0[(warp_mma_k + 1) % 2]);
}
}
// Insert fence and wait for all outstanding cp.async operations to commit.
cutlass::arch::cp_async_fence();
cutlass::arch::cp_async_wait<0>();
__syncthreads();
/// Epilogue for the first Implicit Gemm
Epilogue0 epilogue0;
epilogue0(output_op_0, smem_iterator_D0_, accum0, iterator_accum0_scale, iterator_accum0_bias);
__syncthreads();
// 2nd Gemm
//
// Prologue
//
int gemm_k_iterations_1 = Shape0::kN / Shape1::kK;
// Issue several complete stages
CUTLASS_PRAGMA_UNROLL
for (int stage = 0; stage < Base::kStages - 1;
++stage, --gemm_k_iterations_1) {
iterator_B1.clear_mask(gemm_k_iterations_1 == 0);
iterator_B1.set_iteration_index(0);
this->smem_iterator_B1_.set_iteration_index(0);
// cp.async for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::TBLoadIterationsB1; ++j) {
typename IteratorB1::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB1::AccessType *>(
this->smem_iterator_B1_.get());
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB1::kAccessesPerVector; ++v) {
int const kSrcBytes =
sizeof_bits<typename IteratorB1::Element>::value *
IteratorB1::ThreadMap::kElementsPerAccess /
IteratorB1::kAccessesPerVector / 8;
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB1>(
dst_ptr + v, iterator_B1.get(), iterator_B1.valid());
++iterator_B1;
}
++this->smem_iterator_B1_;
}
// Move to the next stage
iterator_B1.add_tile_offset({1, 0});
this->smem_iterator_B1_.add_tile_offset({1, 0});
// Defines the boundary of a stage of cp.async.
cutlass::arch::cp_async_fence();
}
// DEPBAR+SYNC
cutlass::arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
// Pair of fragments used to overlap shared memory loads and math
// instructions
WarpLoadedFragmentA1 warp_loaded_frag_A1[2];
WarpLoadedFragmentB1 warp_loaded_frag_B1[2];
WarpTransformedFragmentA1 warp_transformed_frag_A1[2];
WarpTransformedFragmentB1 warp_transformed_frag_B1[2];
Operator1 warp_mma1;
warp_tile_iterator_A1_.load(warp_loaded_frag_A1[0]);
++warp_tile_iterator_A1_;
this->warp_tile_iterator_B1_.set_kgroup_index(0);
this->warp_tile_iterator_B1_.load(warp_loaded_frag_B1[0]);
++this->warp_tile_iterator_B1_;
iterator_B1.clear_mask(gemm_k_iterations_1 == 0);
smem_write_stage_idx = Base::kStages - 1;
smem_read_stage_idx = 0;
warp_mma1.transform(warp_transformed_frag_A1[0], warp_transformed_frag_B1[0],
warp_loaded_frag_A1[0], warp_loaded_frag_B1[0]);
//
// Mainloop
//
CUTLASS_PRAGMA_UNROLL
for ( gemm_k_iterations_1 = Shape0::kN / Shape1::kK - (Base::kStages - 1);
gemm_k_iterations_1 > (-Base::kStages + 1); gemm_k_iterations_1--) {
//
// Loop over GEMM K dimension
//
// Computes a warp-level GEMM on data held in shared memory
// Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations1;
++warp_mma_k) {
// Load warp-level tile from accumulator fragment
// skip warp tile loading for the last kgroup
if(gemm_k_iterations_1 > (-Base::kStages + 2) || warp_mma_k < Base::kWarpGemmIterations1 - 1) {
warp_tile_iterator_A1_.load(warp_loaded_frag_A1[(warp_mma_k + 1) % 2]);
}
++warp_tile_iterator_A1_;
// Load warp-level tiles from shared memory, wrapping to k offset if
// this is the last group as the case may be.
this->warp_tile_iterator_B1_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations1);
this->warp_tile_iterator_B1_.load(warp_loaded_frag_B1[(warp_mma_k + 1) % 2]);
++this->warp_tile_iterator_B1_;
if (warp_mma_k > 0)
warp_mma1.transform(warp_transformed_frag_A1[warp_mma_k % 2],
warp_transformed_frag_B1[warp_mma_k % 2],
warp_loaded_frag_A1[warp_mma_k % 2],
warp_loaded_frag_B1[warp_mma_k % 2]);
warp_mma1(
accum,
warp_transformed_frag_A1[warp_mma_k % 2],
warp_transformed_frag_B1[warp_mma_k % 2],
accum
);
// Issue global->shared copies for the this stage
if (warp_mma_k < Base::kWarpGemmIterations1 - 1) {
int group_start_iteration_B1;
group_start_iteration_B1 = warp_mma_k * Detail::kAccessesPerGroupB1;
copy_tiles_and_advance_1(iterator_B1, group_start_iteration_B1);
}
if (warp_mma_k + 2 == Base::kWarpGemmIterations1) {
int group_start_iteration_B1;
group_start_iteration_B1 =
(warp_mma_k + 1) * Detail::kAccessesPerGroupB1;
copy_tiles_and_advance_1(iterator_B1, group_start_iteration_B1);
// Inserts a memory fence between stages of cp.async instructions.
cutlass::arch::cp_async_fence();
// Waits until kStages-2 stages have committed.
arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
// Move to the next stage
iterator_B1.add_tile_offset({1, 0});
this->smem_iterator_B1_.add_tile_offset({1, 0});
// Add negative offsets to return iterators to the 'start' of the
// circular buffer in shared memory
if (smem_write_stage_idx == (Base::kStages - 1)) {
this->smem_iterator_B1_.add_tile_offset({-Base::kStages, 0});
smem_write_stage_idx = 0;
} else {
++smem_write_stage_idx;
}
if (smem_read_stage_idx == (Base::kStages - 1)) {
this->warp_tile_iterator_B1_.add_tile_offset(
{-Base::kStages * Policy1::kPartitionsK *
Base::kWarpGemmIterations1,
0});
smem_read_stage_idx = 0;
} else {
++smem_read_stage_idx;
}
iterator_B1.clear_mask(gemm_k_iterations_1 == 1);
}
// Do any conversions feeding the first stage at the end of the loop so
// we can start right away on mma instructions
if (warp_mma_k + 1 == Base::kWarpGemmIterations1)
warp_mma1.transform(warp_transformed_frag_A1[(warp_mma_k + 1) % 2],
warp_transformed_frag_B1[(warp_mma_k + 1) % 2],
warp_loaded_frag_A1[(warp_mma_k + 1) % 2],
warp_loaded_frag_B1[(warp_mma_k + 1) % 2]);
}
}
// Commit and drain all pending and predicated cp.async pnz from the GEMM mainloop
cutlass::arch::cp_async_fence();
cutlass::arch::cp_async_wait<0>();
__syncthreads();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/13_two_tensor_op_fusion/threadblock/b2b_mma_multistage_smem_accumulator.h/0 | {
"file_path": "examples/13_two_tensor_op_fusion/threadblock/b2b_mma_multistage_smem_accumulator.h",
"repo_id": "examples",
"token_count": 14776
} | 4 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
In the normal GEMM, the fast changing dimension of a matrix always has stride
equals to 1, e.g. ColumnMajor and RowMajor matrix. Affine2 matrix can have
larger than 1 stride in both dimensions. To support such layout, we need to
change to method to visit the global memory:
1. We can only visit 1 element a time because elements are not stored
consecutively anymore. Vectorized load/store is not possible.
2. One extra multiplication is needed in calculating the global memory
address
addr = base_pointer + coord1 * stride1 + coord2 * stride2
The rest part of GEMM which includes shared memory load/store, mma comutation
is the same.
This example uses Ampere fp64 tensore core Affine2 GEMM as an example. SIMT
(e.g. sgemm, dgemm) has support Affine2 layout.
*/
#include <iostream>
#include <sstream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm_universal.h"
#include "cutlass/reduction/device/reduce_split_k.h"
#include "cutlass/reduction/kernel/reduce_split_k.h"
#include "cutlass/reduction/thread/reduction_operators.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "helper.h"
// The code section below describes datatype for input, output tensors and computation between
// elements
using ElementAccumulator = double; // Data type of accumulator
using ElementComputeEpilogue = ElementAccumulator; // Data type of epilogue computation
using ElementInputA = double; // Data type of elements in input tensor
using ElementInputB = double; // Data type of elements in input tensor
using ElementOutput = double; // Data type of elements in output tensor
// Since Affine2 explicitly lists the strides of both dimensions, it does not really matter if
// it is columnmajor and rowmajor. However, it helps CUTLASS to improve the load locality if
// CUTLASS can know which dimension of A/B operand has smaller stride or more dense.
//
// Affine2 ColumnMajor means the row stride is smaller and Affine2 RowMajor means the column
// stride is smaller.
//
// The Affine2 epilogue reuses AffineN epilogue so it does not need to specify column majore
// or row major.
using LayoutInputA = cutlass::layout::AffineRank2ColumnMajor;
using LayoutInputB = cutlass::layout::AffineRank2RowMajor;
using LayoutOutput = cutlass::layout::AffineRankN<2>;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm80;
// This code section describes the tile size a thread block will compute
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 16>; // Threadblock tile shape
// This code section describes tile size a warp will compute
using WarpShape = cutlass::gemm::GemmShape<32, 64, 16>; // Warp tile shape
// This code section describes the size of MMA op
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; // TensorCore instruction shape
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>;
// Number of pipelines you want to use
constexpr int NumStages = 3;
// This code section describes the epilogue part of the kernel, we use default value
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // Data type of output matrix.
1, // The number of elements per memory
// access has. It has to be 1 for
// affine2.
ElementAccumulator,
ElementComputeEpilogue>;
using Gemm = typename cutlass::gemm::device::GemmUniversal<
ElementInputA, LayoutInputA,
ElementInputB, LayoutInputB,
ElementOutput, LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
1,
1,
cutlass::arch::OpMultiplyAdd
>;
/////////////////////////////////////////////////////////////////////////////////////////////////
int run() {
// Construct Gemm ProblemSize with user defined output size
cutlass::gemm::GemmCoord problem_size = {1024, 512, 1024};
// Stride factor shows the distance between two elements in the differnet dimensions. The
// first data is the logical distance between two rows, the second is between two columns.
// CUTLASS has a utility tool cutlass::layout::Affine2Layout_Factory<Layout>::layout_factory
// to help to convert stride_factor to the two strides.
//
// It is also totally fine to compute the strides directly without using the utility to
// construct the affine2 layout.
typename LayoutInputA::Stride::Index stride_factor_A[] = {3, 4};
typename LayoutInputB::Stride::Index stride_factor_B[] = {5, 6};
typename LayoutOutput::Stride::Index stride_factor_C[] = {7, 8};
// Initialize tensors using CUTLASS helper functions
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(problem_size.mk(),
cutlass::layout::Affine2Layout_Factory<LayoutInputA>::layout_factory(problem_size.mk(),
stride_factor_A));
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(problem_size.kn(),
cutlass::layout::Affine2Layout_Factory<LayoutInputB>::layout_factory(problem_size.kn(),
stride_factor_B));
// Create matrix C used to load for bias addition.
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(problem_size.mn(),
cutlass::layout::Affine2Layout_Factory<LayoutOutput>::layout_factory(problem_size.mn(),
stride_factor_C));
// Create matrix D used to store output from CUTLASS kernel
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(problem_size.mn(),
cutlass::layout::Affine2Layout_Factory<LayoutOutput>::layout_factory(problem_size.mn(),
stride_factor_C));
// Create matrix D with dimensions M x N used to store output from reference
// kernel
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(problem_size.mn(),
cutlass::layout::Affine2Layout_Factory<LayoutOutput>::layout_factory(problem_size.mn(),
stride_factor_C));
// Fill input and output matrices on host using CUTLASS helper functions
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(4),
ElementInputA(-4),
0); // <- Fill matrix A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputB(4),
ElementInputB(-4),
0); // <- Fill matrix B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c.host_view(),
1,
ElementOutput(4),
ElementOutput(-4),
0); // <- Fill matrix C on host with uniform-distribution random data
cutlass::reference::host::TensorFill(
tensor_d.host_view()); // <- fill matrix D on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_c.sync_device();
tensor_d.sync_device();
tensor_ref_d.sync_device();
// Initialize alpha for dot product computation
ElementComputeEpilogue alpha = ElementComputeEpilogue(1);
ElementComputeEpilogue beta = ElementComputeEpilogue(1);
cutlass::gemm::GemmUniversalMode mode = cutlass::gemm::GemmUniversalMode::kGemm;
int batch_count = 1;
// Create a tuple of gemm kernel arguments. This is later passed as arguments to launch
// instantiated CUTLASS kernel
typename Gemm::Arguments arguments{
mode,
problem_size,
batch_count,
{alpha, beta},
tensor_a.device_ref().data(), // <- reference to matrix A on device
tensor_b.device_ref().data(), // <- reference to matrix B on device
tensor_c.device_ref().data(), // <- reference to matrix C on device
tensor_d.device_ref().data(), // <- reference to matrix D on device
tensor_a.layout().capacity(problem_size.mk()),
tensor_b.layout().capacity(problem_size.kn()),
tensor_c.layout().capacity(problem_size.mn()),
tensor_d.layout().capacity(problem_size.mn()),
tensor_a.layout().stride(),
tensor_b.layout().stride(),
tensor_c.layout().stride(),
tensor_d.layout().stride()
};
// Instantiate CUTLASS kernel depending on templates
Gemm gemm_op;
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size = Gemm::get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
// Check the problem size is supported or not
cutlass::Status status = gemm_op.can_implement(arguments);
CUTLASS_CHECK(status);
// Initialize CUTLASS kernel with arguments and workspace pointer
status = gemm_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(status);
// Launch initialized CUTLASS kernel
status = gemm_op();
CUTLASS_CHECK(status);
//
// Create instantiation for device reference gemm kernel
//
// Launch device reference to compute strictly the product A * B
cutlass::reference::device::Gemm<
ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementAccumulator> gemm_device;
gemm_device
(
problem_size,
alpha,
tensor_a.device_ref(),
tensor_b.device_ref(),
beta,
tensor_c.device_ref(),
tensor_ref_d.device_ref()
);
// Wait for kernels to finish
cudaDeviceSynchronize();
// Copy output data from CUTLASS and reference kernel to host for comparison
tensor_d.sync_host();
tensor_ref_d.sync_host();
bool pass = cutlass::reference::host::TensorEquals(tensor_d.host_view(),
tensor_ref_d.host_view());
// Check if output from CUTLASS kernel and reference kernel are equal or not
std::cout << (pass
? "Passed"
: "Failed")
<< std::endl;
CUTLASS_CHECK(status);
return (pass ? 0 : -1);
}
int main(int argc, char const **args) {
bool notSupported = false;
// Ampere Tensor Core operations exposed with mma.sync are first available in CUDA 11.0.
//
// CUTLASS must be compiled with CUDA 11 Toolkit to run Conv2dFprop examples.
if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0))) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
cudaDeviceProp props;
CUDA_CHECK(cudaGetDeviceProperties(&props, 0));
if (!(props.major >= 8)) {
std::cerr << "Ampere Tensor Ops must be run on a machine with compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
return 0;
}
return run();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/18_ampere_fp64_tensorop_affine2_gemm/ampere_fp64_tensorop_affine2_gemm.cu/0 | {
"file_path": "examples/18_ampere_fp64_tensorop_affine2_gemm/ampere_fp64_tensorop_affine2_gemm.cu",
"repo_id": "examples",
"token_count": 4988
} | 5 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
This example shows how to compute 2d transposed convolution, also known as deconvolution, using CUTLASS
conv2d Dgrad kernels. Although two operations are computationaly equivalent, some care is needed to correctly
set up a problem size for CUTLASS.
In deep learning, transposed convolution is sometimes used for upscaling feature maps. This example
demonstrates the 2x upscaling case using the strided Dgrad kernel.
*/
#include <iostream>
#include <sstream>
#include "cutlass/cutlass.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/conv/kernel/default_conv2d_dgrad.h"
#include "cutlass/conv/device/implicit_gemm_convolution.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/device/convolution.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
// The code section below describes datatype for input, output tensors and computation between
// elements
using cutlass::layout::TensorNHWC;
using cutlass::TensorRef;
using ElementAccumulator = cutlass::half_t; // Data type of accumulator
using ElementComputeEpilogue = cutlass::half_t; // Data type of epilogue computation (alpha, beta)
using ElementInputA = cutlass::half_t; // Data type of elements in input tensor
using ElementInputB = cutlass::half_t; // Data type of elements in input tensor
using ElementOutput = cutlass::half_t; // Data type of elements in output tensor
using ElementC = ElementOutput;
using ElementCompute = ElementComputeEpilogue;
using LayoutInputA = TensorNHWC;
using LayoutInputB = TensorNHWC;
using LayoutOutput = TensorNHWC;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm80;
// This code section describes the tile size a thread block will compute
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; // Threadblock tile shape
// This code section describes tile size a warp will compute
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; // Warp tile shape
// This code section describes the size of MMA op
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; // TensorCore instruction shape
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::conv::threadblock::StridedDgradIdentityThreadblockSwizzle<1>;
// Number of pipelines you want to use
constexpr int NumStages = 3;
// This code section describe iterator algorithm selected is Analytic or Optimized
static cutlass::conv::IteratorAlgorithm const IteratorAlgorithm = cutlass::conv::IteratorAlgorithm::kOptimized;
// This code section describes the epilogue part of the kernel, we use default value
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementCompute, // Data type of output matrix.
128 / cutlass::sizeof_bits<ElementCompute>::value, // The number of elements per vectorized.
// memory access. This becomes the vector width of
// math instructions in the epilogue too.
ElementAccumulator, // Data type of accumulator
ElementComputeEpilogue>; // Data type for alpha/beta in linear combination
using Conv2dDgradKernel = typename cutlass::conv::kernel::DefaultConv2dDgrad<
ElementInputA, LayoutInputA,
ElementInputB, LayoutInputB,
ElementAccumulator, LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
cutlass::arch::OpMultiplyAdd,
IteratorAlgorithm,
cutlass::conv::StrideSupport::kStrided // Use the strided Dgrad specialization
>::Kernel;
using ImplicitGemm = cutlass::conv::device::ImplicitGemmConvolution<Conv2dDgradKernel>;
/////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
cutlass::Tensor4DCoord input_size;
cutlass::Tensor4DCoord filter_size;
cutlass::Tensor4DCoord padding;
cutlass::MatrixCoord conv_stride;
cutlass::MatrixCoord dilation;
bool reference_check;
bool measure_performance;
int iterations;
ElementComputeEpilogue alpha;
ElementComputeEpilogue beta;
std::string tag;
Options():
help(false),
input_size(1, 32, 32, 32),
filter_size(32, 3, 3, 16),
padding(1, 1, 1, 1),
conv_stride(2, 2),
dilation(1, 1),
reference_check(true),
measure_performance(false),
iterations(20),
alpha(1),
beta(0) {}
// Verify the problem size is compatible with the CUTLASS Convolution implementation.
bool valid() {
//
// CUTLASS attempts to load 128b vectors of cutlass::half_t (F16) elements. Consequently,
// all pointers, strides, and tensor extents must be divisible by 8 elements.
//
int const kAlignment = 8;
if ((input_size.c() % kAlignment) ||
(filter_size.n() % kAlignment)) {
// misaligned tensors
return false;
}
// Invalid padding
if ((padding.h() != filter_size.h() / 2) ||
(padding.w() != filter_size.w() / 2)) {
return false;
}
return true;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
if (cmd.check_cmd_line_flag("skip-ref-check")) {
reference_check = false;
}
if (cmd.check_cmd_line_flag("perf-check")) {
measure_performance = true;
}
cmd.get_cmd_line_argument("n", input_size.n());
cmd.get_cmd_line_argument("h", input_size.h());
cmd.get_cmd_line_argument("w", input_size.w());
cmd.get_cmd_line_argument("c", input_size.c());
// Filter layout is CRSK
cmd.get_cmd_line_argument("k", filter_size.c());
cmd.get_cmd_line_argument("r", filter_size.h());
cmd.get_cmd_line_argument("s", filter_size.w());
filter_size.n() = input_size.c();
cmd.get_cmd_line_argument("alpha", alpha);
cmd.get_cmd_line_argument("beta", beta);
cmd.get_cmd_line_argument("iterations", iterations);
cmd.get_cmd_line_argument("tag", tag);
if (filter_size.h() == 3 && filter_size.w() == 3) {
padding = {1, 1, 1, 1};
}
else {
filter_size.h() = 1;
filter_size.w() = 1;
padding = {0, 0, 0, 0};
}
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "34_transposed_conv2d example\n\n"
<< " This example shows how to compute 2d transposed convolution, also known as\n"
<< " deconvolution, using CUTLASS conv2d Dgrad kernels. Although two operations are\n"
<< " computationaly equivalent, some care is needed to correctly set up a problem size.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --n=<int> Input tensor extent N\n"
<< " --h=<int> Input tensor extent H\n"
<< " --w=<int> Input tensor extent W\n"
<< " --c=<int> Input tensor extent C\n"
<< " --k=<int> Filter extent K\n"
<< " --r=<int> Filter extent R\n"
<< " --s=<int> Filter extent S\n\n"
<< " --alpha=<float> Epilogue scalar alpha\n"
<< " --beta=<float> Epilogue scalar beta\n\n"
<< " --skip-ref-check If set (true), skip reference check on the host\n"
<< " --perf-check If set (true), performance is measured.\n"
<< " --iterations=<int> Number of profiling iterations to perform.\n"
<< " --tag=<string> String to replicate across the first column in the results table\n";
out << "\n\nExamples:\n\n"
<< "$ ./examples/34_transposed_conv2d/34_transposed_conv2d --n=8 --h=32 --w=32 --c=16 --k=32 --r=3 --s=3\n\n";
return out;
}
/// Computes the output tensor size (NPQK)
cutlass::Tensor4DCoord output_size() const {
// Here, out_pad corresponds to "output_padding" of conv2d_transpose op in deep learning frameworks.
// See for example https://pytorch.org/docs/stable/generated/torch.nn.ConvTranspose2d.html
int out_pad_h = conv_stride.row() > 1 ? 1 : 0;
int out_pad_w = conv_stride.column() > 1 ? 1 : 0;
int out_h = (input_size.h() - 1) * conv_stride.row() - 2 * padding.n() + (((filter_size.h() - 1) * dilation.row() + 1)) + out_pad_h;
int out_w = (input_size.w() - 1) * conv_stride.column() - 2 * padding.w() + (((filter_size.w() - 1) * dilation.column() + 1)) + out_pad_w;
return cutlass::Tensor4DCoord(input_size.n(), out_h, out_w, filter_size.c());
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of multiply-adds = NHWC * KRS
// Note that the input with the layout NHWC corresponds to the output from the perspective of dgrad,
// and that the filter layout is CRSK.
int64_t fmas = input_size.product() * int64_t(filter_size.h() * filter_size.w() * filter_size.n());
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
cutlass::Status reference_check;
cudaError_t error;
Result():
runtime_ms(0),
gflops(0),
status(cutlass::Status::kSuccess),
reference_check(cutlass::Status::kInvalid),
error(cudaSuccess) { }
static std::ostream & print_header(std::ostream &out, Options const &options) {
if (!options.tag.empty()) {
out << "Name,";
}
out << "Layer,N,H,W,C,K,R,S,Stride_H,Stride_W,Runtime,GFLOPs";
return out;
}
std::ostream & print(std::ostream &out, int idx, Options const &options) {
if (!options.tag.empty()) {
out << options.tag << ",";
}
out
<< "conv_" << idx << ","
<< options.input_size.n() << ","
<< options.input_size.h() << ","
<< options.input_size.w() << ","
<< options.input_size.c() << ","
<< options.filter_size.c() << ","
<< options.filter_size.h() << ","
<< options.filter_size.w() << ","
<< options.conv_stride.row() << ","
<< options.conv_stride.column() << ","
<< runtime_ms << ","
<< gflops;
return out;
}
};
// This is the same as Conv2dDgrad in tools/util/include/cutlass/util/reference/host/convolution.h,
// only variable names have been adapted for transposed conv2d.
void Conv2dTransposeReference(
cutlass::conv::Conv2dProblemSize problem_size,
TensorRef<ElementInputA, LayoutInputA> tensor_a,
TensorRef<ElementInputB, LayoutInputB> tensor_b,
TensorRef<ElementC, LayoutOutput> tensor_c,
TensorRef<ElementC, LayoutOutput> tensor_d,
ElementCompute alpha,
ElementCompute beta) {
int H = problem_size.P;
int W = problem_size.Q;
int P = problem_size.H;
int Q = problem_size.W;
int K = problem_size.C;
int C = problem_size.K;
for (int n = 0; n < problem_size.N; ++n) {
for (int p = 0; p < P; ++p) {
for (int q = 0; q < Q; ++q) {
for (int k = 0; k < K; ++k) {
ElementAccumulator acc = ElementAccumulator();
for (int r = 0; r < problem_size.R; ++r) {
for (int s = 0; s < problem_size.S; ++s) {
for (int c = 0; c < C; ++c) {
int filter_r = r;
int filter_s = s;
int h = p + problem_size.pad_h - filter_r * problem_size.dilation_h;
int w = q + problem_size.pad_w - filter_s * problem_size.dilation_w;
if (h >= 0 && (h % problem_size.stride_h) == 0 &&
w >= 0 && (w % problem_size.stride_w) == 0) {
h = h / problem_size.stride_h;
w = w / problem_size.stride_w;
if (h < H && w < W) {
ElementInputA a = tensor_a.at(cutlass::make_Coord(n, h, w, c));
ElementInputB b = tensor_b.at(cutlass::make_Coord(c, r, s, k));
acc += ElementAccumulator(a) * ElementAccumulator(b);
}
}
} // for (C)
} // for (S)
} // for (R)
// Apply Epilogue, compute ElementCompute, convert and store ElementC
ElementC c_ref = ElementC();
if (beta != ElementCompute()) {
c_ref = tensor_c.at(cutlass::make_Coord(n, p, q, k));
}
tensor_d.at(cutlass::make_Coord(n, p, q, k)) = alpha * ElementCompute(acc) + beta * ElementCompute(c_ref);
} // for (K)
} // for (W)
} // for (H)
} // for (N)
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Runs one benchmark
Result profile_convolution(Options const &options) {
std::cout << "Output shape: " << options.output_size() << std::endl;
Result result;
//
// Allocate host-device tensors using the CUTLASS Utilities.
//
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_a(options.input_size);
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_b(options.filter_size);
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_c(options.output_size());
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_d(options.output_size());
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(options.output_size());
//
// Initialize tensors
//
// Fill tensor A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(7),
ElementInputA(-8),
0);
// Fill tensor B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputB(7),
ElementInputB(-8),
0);
// Fill tensor C and D on host with zeros
cutlass::reference::host::TensorFill(tensor_c.host_view());
cutlass::reference::host::TensorFill(tensor_d.host_view());
// Fill tensor D for reference on host with zeros
cutlass::reference::host::TensorFill(tensor_ref_d.host_view());
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_c.sync_device();
tensor_d.sync_device();
//
// Define arguments for CUTLASS Convolution
//
cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation;
// Construct Conv2dProblemSize with user defined output size
// The input in transposed conv2d corresponds to the output in the equivalent dgrad.
// Similarly for the output.
// Although the filter layout is CRSK from the perspective of conv2d transpose,
// the filter size does not need to change for setting up the problem size.
// There is no need to transpose the filter tensor either.
cutlass::conv::Conv2dProblemSize problem_size(
options.output_size(),
options.filter_size,
options.padding,
options.conv_stride,
options.dilation,
options.input_size,
mode
);
typename ImplicitGemm::Arguments arguments{
problem_size,
tensor_a.device_ref(),
tensor_b.device_ref(),
tensor_c.device_ref(),
tensor_d.device_ref(),
{options.alpha, options.beta}
};
//
// Initialize CUTLASS Convolution
//
ImplicitGemm implicit_gemm;
size_t workspace_size = implicit_gemm.get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
result.status = implicit_gemm.can_implement(arguments);
CUTLASS_CHECK(result.status);
result.status = implicit_gemm.initialize(arguments, workspace.get());
CUTLASS_CHECK(result.status);
result.status = implicit_gemm();
CUTLASS_CHECK(result.status);
// // Skip reference check since there is no reference code for conv2d transpose in cutlass.
if (options.reference_check) {
tensor_d.sync_host();
std::cout << "Verification on host...\n";
Conv2dTransposeReference(problem_size,
tensor_a.host_ref(),
tensor_b.host_ref(),
tensor_c.host_ref(),
tensor_ref_d.host_ref(),
options.alpha, options.beta);
bool passed = cutlass::reference::host::TensorEquals(tensor_d.host_view(), tensor_ref_d.host_view());
if (!passed) {
result.reference_check = cutlass::Status::kErrorInternal;
std::cout << "ERROR - results miscompared.\n";
}
else {
result.reference_check = cutlass::Status::kSuccess;
std::cout << "Passed.\n";
}
}
if (options.measure_performance) {
cudaEvent_t events[2];
for (auto & event : events) {
result.error = cudaEventCreate(&event);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
}
// Record an event at the start of a series of convolution operations.
result.error = cudaEventRecord(events[0]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Launch a sequence of implicit GEMM operations on the device
for (int iteration = 0; iteration < options.iterations; ++iteration) {
result.status = implicit_gemm();
CUTLASS_CHECK(result.status);
}
// Record an event when the convolutions have been launched.
result.error = cudaEventRecord(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Wait for work on the device to complete.
result.error = cudaEventSynchronize(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Print average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(options.iterations);
result.gflops = options.gflops(result.runtime_ms / 1000.0);
// Cleanup
for (auto event : events) {
(void)cudaEventDestroy(event);
}
}
return result;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char const **args) {
bool notSupported = false;
// Ampere Tensor Core operations exposed with mma.sync are first available in CUDA 11.0.
//
// CUTLASS must be compiled with CUDA 11 Toolkit to run Conv2dFprop examples.
if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0))) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
cudaDeviceProp props;
CUDA_CHECK(cudaGetDeviceProperties(&props, 0));
if (!(props.major >= 8)) {
std::cerr << "Ampere Tensor Ops must be run on a machine with compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
return 0;
}
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
// Execute one problem size
if (!options.valid()) {
std::cerr << "Invalid problem." << std::endl;
return -1;
}
Result result = profile_convolution(options);
Result::print_header(std::cout, options) << std::endl;
result.print(std::cout, 1, options) << std::endl;
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/34_transposed_conv2d/34_transposed_conv2d.cu/0 | {
"file_path": "examples/34_transposed_conv2d/34_transposed_conv2d.cu",
"repo_id": "examples",
"token_count": 8367
} | 6 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines additional layout functions used in Permute GEMM example to simplify
computing reference permutations of 4/5D tensors when source data is column-major.
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include "assert.h"
#endif
#include "cutlass/cutlass.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/coord.h"
#include "cutlass/tensor_coord.h"
namespace cutlass {
namespace layout {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Mapping function for 4-D CWHN tensors.
class TensorCWHN {
public:
/// Logical rank of tensor
static int const kRank = 4;
/// Rank of stride vector
static int const kStrideRank = 3;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate (n, h, w, c)
using TensorCoord = Tensor4DCoord;
/// Stride vector
using Stride = Coord<kStrideRank>;
private:
//
// Data members
//
/// Stride data member - [n, hn, whn]
Stride stride_;
public:
//
// Methods
//
/// Constructor
CUTLASS_HOST_DEVICE
TensorCWHN(Stride const &stride = Stride(0)): stride_(stride) { }
/// Constructor
CUTLASS_HOST_DEVICE
TensorCWHN(
typename Stride::Index stride_h, ///< number of elements between adjacent N coordinates
typename Stride::Index stride_w, ///< number of elements between adjacent C coordinates
typename Stride::Index stride_c ///< number of elements between adjacent W coordinates
):
stride_(make_Coord(stride_h, stride_w, stride_c)) { }
/// Constructor
// Once convolutions implement 64b stride this ctor can be deleted
CUTLASS_HOST_DEVICE
TensorCWHN(Coord<kStrideRank, LongIndex> const &stride):
stride_(make_Coord(
static_cast<typename Stride::Index>(stride[0]),
static_cast<typename Stride::Index>(stride[1]),
static_cast<typename Stride::Index>(stride[2]))
) { }
/// Helper returns a layout to a tightly packed WCNH tensor.
CUTLASS_HOST_DEVICE
static TensorCWHN packed(TensorCoord const &extent) {
return TensorCWHN(
make_Coord(
extent.n(),
extent.h() * extent.n(),
extent.w() * extent.h() * extent.n()
)
);
}
/// Returns the offset of a coordinate (n, h, w, c) in linear memory.
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return coord.n() +
LongIndex(stride_[0] * coord.h()) +
LongIndex(stride_[1] * coord.w()) +
LongIndex(stride_[2] * coord.c());
}
/// Returns the offset of a pitchlinear coordinate in linear memory.
CUTLASS_HOST_DEVICE
LongIndex operator()(PitchLinearCoord coord) const {
return coord.contiguous() + LongIndex(coord.strided() * stride_[2]);
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
// it does not make sense if the extent is larger than stride
// and we could not rely on the capacity calculation in such cases
// we could move this checkers to debug code only
if ((extent.n() > stride_[0])
|| (extent.h() * stride_[0] > stride_[1])
|| (extent.w() * stride_[1] > stride_[2])) {
assert(0);
}
return extent.c() * stride_[2];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Mapping function for 4-D NHCW tensors.
class TensorNHCW {
public:
/// Logical rank of tensor
static int const kRank = 4;
/// Rank of stride vector
static int const kStrideRank = 3;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate (n, h, w, c)
using TensorCoord = Tensor4DCoord;
/// Stride vector
using Stride = Coord<kStrideRank>;
private:
//
// Data members
//
/// Stride data member - [w, cw, hcw]
Stride stride_;
public:
//
// Methods
//
/// Constructor
CUTLASS_HOST_DEVICE
TensorNHCW(Stride const &stride = Stride(0)): stride_(stride) { }
/// Constructor
CUTLASS_HOST_DEVICE
TensorNHCW(
typename Stride::Index stride_c, ///< number of elements between adjacent C coordinates
typename Stride::Index stride_h, ///< number of elements between adjacent H coordinates
typename Stride::Index stride_n ///< number of elements between adjacent N coordinates
):
stride_(make_Coord(stride_c, stride_h, stride_n)) { }
/// Constructor
// Once convolutions implement 64b stride this ctor can be deleted
CUTLASS_HOST_DEVICE
TensorNHCW(Coord<kStrideRank, LongIndex> const &stride):
stride_(make_Coord(
static_cast<typename Stride::Index>(stride[0]),
static_cast<typename Stride::Index>(stride[1]),
static_cast<typename Stride::Index>(stride[2]))
) { }
/// Helper returns a layout to a tightly packed WCNH tensor.
CUTLASS_HOST_DEVICE
static TensorNHCW packed(TensorCoord const &extent) {
return TensorNHCW(
make_Coord(
extent.w(),
extent.c() * extent.w(),
extent.h() * extent.c() * extent.w()
)
);
}
/// Returns the offset of a coordinate (n, h, w, c) in linear memory.
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return coord.w() +
LongIndex(stride_[0] * coord.c()) +
LongIndex(stride_[1] * coord.h()) +
LongIndex(stride_[2] * coord.n());
}
/// Returns the offset of a pitchlinear coordinate in linear memory.
CUTLASS_HOST_DEVICE
LongIndex operator()(PitchLinearCoord coord) const {
return coord.contiguous() + LongIndex(coord.strided() * stride_[2]);
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
// it does not make sense if the extent is larger than stride
// and we could not rely on the capacity calculation in such cases
// we could move this checkers to debug code only
if ((extent.w() > stride_[0])
|| (extent.c() * stride_[0] > stride_[1])
|| (extent.h() * stride_[1] > stride_[2])) {
assert(0);
}
return extent.n() * stride_[2];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Mapping function for 4-D NHCW tensors.
class TensorNCWH {
public:
/// Logical rank of tensor
static int const kRank = 4;
/// Rank of stride vector
static int const kStrideRank = 3;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate (n, h, w, c)
using TensorCoord = Tensor4DCoord;
/// Stride vector
using Stride = Coord<kStrideRank>;
private:
//
// Data members
//
/// Stride data member - [h, wh, cwh]
Stride stride_;
public:
//
// Methods
//
/// Constructor
CUTLASS_HOST_DEVICE
TensorNCWH(Stride const &stride = Stride(0)): stride_(stride) { }
/// Constructor
CUTLASS_HOST_DEVICE
TensorNCWH(
typename Stride::Index stride_w, ///< number of elements between adjacent C coordinates
typename Stride::Index stride_c, ///< number of elements between adjacent H coordinates
typename Stride::Index stride_n ///< number of elements between adjacent N coordinates
):
stride_(make_Coord(stride_w, stride_c, stride_n)) { }
/// Constructor
// Once convolutions implement 64b stride this ctor can be deleted
CUTLASS_HOST_DEVICE
TensorNCWH(Coord<kStrideRank, LongIndex> const &stride):
stride_(make_Coord(
static_cast<typename Stride::Index>(stride[0]),
static_cast<typename Stride::Index>(stride[1]),
static_cast<typename Stride::Index>(stride[2]))
) { }
/// Helper returns a layout to a tightly packed WCNH tensor.
CUTLASS_HOST_DEVICE
static TensorNCWH packed(TensorCoord const &extent) {
return TensorNCWH(
make_Coord(
extent.h(),
extent.w() * extent.h(),
extent.c() * extent.w() * extent.h()
)
);
}
/// Returns the offset of a coordinate (n, h, w, c) in linear memory.
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return coord.h() +
LongIndex(stride_[0] * coord.w()) +
LongIndex(stride_[1] * coord.c()) +
LongIndex(stride_[2] * coord.n());
}
/// Returns the offset of a pitchlinear coordinate in linear memory.
CUTLASS_HOST_DEVICE
LongIndex operator()(PitchLinearCoord coord) const {
return coord.contiguous() + LongIndex(coord.strided() * stride_[2]);
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
// it does not make sense if the extent is larger than stride
// and we could not rely on the capacity calculation in such cases
// we could move this checkers to debug code only
if ((extent.h() > stride_[0])
|| (extent.w() * stride_[0] > stride_[1])
|| (extent.c() * stride_[1] > stride_[2])) {
assert(0);
}
return extent.n() * stride_[2];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Mapping function for 5-D CWHDN tensors.
class TensorCWHDN {
public:
/// Logical rank of tensor
static int const kRank = 5;
/// Rank of stride vector
static int const kStrideRank = 4;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate (n, d, h, w, c)
using TensorCoord = Tensor5DCoord;
/// Stride vector
using Stride = Coord<kStrideRank>;
private:
//
// Data members
//
/// Stride data member - [n, dn, hdn, whdn]
Stride stride_;
public:
//
// Methods
//
/// Constructor
CUTLASS_HOST_DEVICE
TensorCWHDN(Stride const &stride = Stride(0)): stride_(stride) { }
/// Constructor
CUTLASS_HOST_DEVICE
TensorCWHDN(
typename Stride::Index n,
typename Stride::Index dn,
typename Stride::Index hdn,
typename Stride::Index whdn):
stride_(make_Coord(n, dn, hdn, whdn)) { }
/// Constructor
// Once convolutions implement 64b stride this ctor can be deleted
CUTLASS_HOST_DEVICE
TensorCWHDN(Coord<kStrideRank, LongIndex> const &stride):
stride_(make_Coord(
static_cast<typename Stride::Index>(stride[0]),
static_cast<typename Stride::Index>(stride[1]),
static_cast<typename Stride::Index>(stride[2]),
static_cast<typename Stride::Index>(stride[3]))
) { }
/// Helper returns a layout to a tightly packed CWHDN tensor.
CUTLASS_HOST_DEVICE
static TensorCWHDN packed(TensorCoord const &extent) {
return TensorCWHDN(
make_Coord(
extent.n(),
extent.d() * extent.n(),
extent.h() * extent.d() * extent.n(),
extent.w() * extent.h() * extent.d() * extent.n()
)
);
}
/// Returns the offset of a coordinate (n, d, h, w, c) in linear memory.
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return coord.n() +
LongIndex(stride_[0] * coord.d()) +
LongIndex(stride_[1] * coord.h()) +
LongIndex(stride_[2] * coord.w()) +
LongIndex(stride_[3] * coord.c());
}
/// Returns the offset of a pitchlinear coordinate in linear memory.
CUTLASS_HOST_DEVICE
LongIndex operator()(PitchLinearCoord coord) const {
return coord.contiguous() + LongIndex(coord.strided() * stride_[3]);
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
// it does not make sense if the extent is larger than stride
// and we could not rely on the capacity calculation in such cases
// we could move this checkers to debug code only
if ((extent.n() > stride_[0])
|| (extent.d() * stride_[0] > stride_[1])
|| (extent.h() * stride_[1] > stride_[2])
|| (extent.w() * stride_[2] > stride_[3])) {
assert(0);
}
return extent.c() * stride_[3];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace layout
} // namespace cutlass
| examples/39_gemm_permute/layouts.h/0 | {
"file_path": "examples/39_gemm_permute/layouts.h",
"repo_id": "examples",
"token_count": 5404
} | 7 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Functor performing linear combination operations used by epilogues.
*/
#pragma once
#include <cuda_fp16.h>
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/epilogue/thread/activation.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/numeric_types.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <typename Element, int ElementsPerAccess>
struct ArrayExponential {
CUTLASS_HOST_DEVICE
Array<Element, ElementsPerAccess> operator()(
Array<Element, ElementsPerAccess> const& input) const {
Array<Element, ElementsPerAccess> result;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < ElementsPerAccess; ++i) {
result[i] = expf(input[i]);
}
return result;
}
};
template <int ElementsPerAccess>
struct ArrayExponential<half_t, ElementsPerAccess> {
CUTLASS_DEVICE
Array<half_t, ElementsPerAccess> operator()(
Array<half_t, ElementsPerAccess> const& input) const {
Array<half_t, ElementsPerAccess> result;
int const kVectorCount = ElementsPerAccess / 2;
__half2 const* input_ptr =
reinterpret_cast<__half2 const*>(input.raw_data());
__half2* res_ptr = reinterpret_cast<__half2*>(result.raw_data());
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kVectorCount; ++i) {
res_ptr[i] = h2exp(input_ptr[i]);
}
return result;
}
};
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Applies:
/// output <- (input - lse).exp()
template <
typename ElementOutput_, // output
typename ElementLSE_, // accumulator from LSE
typename ElementAccumulator_, // accumulator from matmul
typename ElementCompute_, // intermediate compute (and exp calculation)
int ElementsPerAccess>
class ApplyLogSumExp {
public:
using ElementOutput = ElementOutput_;
using ElementAccumulator = ElementAccumulator_;
using ElementCompute = ElementCompute_;
using ElementLSE = ElementLSE_;
static int const kElementsPerAccess = ElementsPerAccess;
static int const kCount = kElementsPerAccess;
static const ScaleType::Kind kScale =
cutlass::epilogue::thread::ScaleType::NoBetaScaling;
using FragmentOutput = Array<ElementOutput, kCount>;
using FragmentAccumulator = Array<ElementAccumulator, kElementsPerAccess>;
using FragmentCompute = Array<ElementCompute, kElementsPerAccess>;
using FragmentLSE = Array<ElementLSE, kElementsPerAccess>;
using FragmentScaleBias = FragmentLSE; // Used by epilogue_smem_accumulator.h
public:
//
// Methods
//
CUTLASS_HOST_DEVICE
ApplyLogSumExp() {}
/// Returns true if source is needed
CUTLASS_HOST_DEVICE
bool is_source_needed() const {
return true;
}
/// Functionally required for serial reduction in the epilogue
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {}
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const& AB,
FragmentLSE const& scale_unused,
// bias used as LSE
FragmentLSE const& bias) const {
FragmentCompute frag_AB = NumericArrayConverter<
ElementCompute,
ElementAccumulator,
kElementsPerAccess>()(AB);
FragmentCompute frag_lse_compute =
NumericArrayConverter<ElementCompute, ElementLSE, kElementsPerAccess>()(
bias);
FragmentCompute frag_compute;
minus<FragmentCompute> minus_lse;
detail::ArrayExponential<ElementCompute, kElementsPerAccess> apply_exp;
frag_compute = minus_lse(frag_AB, frag_lse_compute);
frag_compute = apply_exp(frag_compute);
return NumericArrayConverter<
ElementOutput,
ElementCompute,
kElementsPerAccess>()(frag_compute);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/41_fused_multi_head_attention/epilogue/epilogue_thread_apply_logsumexp.h/0 | {
"file_path": "examples/41_fused_multi_head_attention/epilogue/epilogue_thread_apply_logsumexp.h",
"repo_id": "examples",
"token_count": 1874
} | 8 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue iterator that supports prefetching
Mostly copied from "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
*/
#pragma once
#include "cutlass/arch/arch.h"
#include "cutlass/arch/memory.h"
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/epilogue/threadblock/output_tile_thread_map.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator_params.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator used to load and store output tile from global memory in
/// epilogue.
///
/// Satisfies: ReadableTileIterator | PredicatedTileIterator |
/// ForwardTileIterator
///
template <
typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap)
typename Element_, ///< Element data type
bool ScatterD = false, ///< Scatter D operand or not
bool UseCUDAStore = false>
class PredicatedTileIteratorPrefetch {
public:
using ThreadMap = ThreadMap_;
using Shape = typename ThreadMap::Shape;
using Element = Element_;
using Layout = layout::RowMajor;
using TensorRef = TensorRef<Element, Layout>;
using ConstTensorRef = typename TensorRef::ConstTensorRef;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorCoord = MatrixCoord;
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
static int const kThreads = ThreadMap::kThreads;
static int const kIterations = ThreadMap::Count::kTile;
static_assert(
ThreadMap::Iterations::kRow > 0,
"ThreadMap::Iterations::kRow must be > 0");
static_assert(
ThreadMap::Iterations::kGroup > 0,
"ThreadMap::Iterations::kGroup must be > 0");
static_assert(
ThreadMap::Iterations::kCluster > 0,
"ThreadMap::Iterations::kCluster must be > 0");
static_assert(
ThreadMap::Iterations::kColumn > 0,
"ThreadMap::Iterations::kColumn must be > 0");
/// Fragment object
using Fragment = Array<
Element,
ThreadMap::Iterations::kColumn * ThreadMap::Iterations::kRow *
ThreadMap::Iterations::kGroup * ThreadMap::Iterations::kCluster *
ThreadMap::kElementsPerAccess>;
/// Memory access size
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
//
// Parameters struct
//
/// Uses a non-template class
struct Params : PredicatedTileIteratorParams {
using Base = PredicatedTileIteratorParams;
CUTLASS_HOST_DEVICE
Params() {}
CUTLASS_HOST_DEVICE
Params(Layout const& layout)
: PredicatedTileIteratorParams(
layout.stride(0) * int(sizeof(AccessType)) / kElementsPerAccess,
make_OutputTileThreadMapDesc<ThreadMap>()) {}
CUTLASS_HOST_DEVICE
Params(Base const& base) : Base(base) {}
};
/// Mask object
struct Mask {
static int const kCount = ThreadMap::Iterations::kColumn;
/// Predicate state
bool predicates[kCount];
//
// Mask
//
CUTLASS_HOST_DEVICE
Mask() {
enable();
}
///< Efficiently disables all accesses guarded by mask
CUTLASS_HOST_DEVICE void clear() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = false;
}
}
///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask
CUTLASS_DEVICE void enable() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = true;
}
}
};
private:
//
// Data members
//
/// Parameters structure containing reference and precomputed state.
PredicatedTileIteratorParams params_;
/// Byte-level pointer
uint8_t* byte_pointer_;
/// Array of boolean values to contain steady-state predicates
Mask mask_;
/// Extent of the matrix tile in rows
Index extent_row_;
/// Extent of the matrix tile in rows
Index extent_column_;
/// A thread's starting row position (assuming steady-state predicates have
/// been computed)
Index thread_start_row_;
/// A thread's starting column
Index thread_start_column_;
/// Internal state counter
int state_[3];
/// Scatter indices
int const* indices_;
//
// Static asserts about internal strides
//
static_assert(sizeof(extent_row_) == 4, "Expected 32b extents");
static_assert(sizeof(thread_start_row_) == 4, "Expected 32b extents");
static_assert(
sizeof(PredicatedTileIteratorParams::stride) == 8,
"Expected 64b strides");
private:
//
// Methods
//
public:
//
// Methods
//
/// Constructor
CUTLASS_DEVICE
PredicatedTileIteratorPrefetch(
PredicatedTileIteratorParams const& params,
Element* pointer,
TensorCoord extent,
int thread_idx,
TensorCoord threadblock_offset = TensorCoord(),
int const* indices = nullptr)
: params_(params), indices_(indices) {
TensorCoord thread_offset =
ThreadMap::initial_offset(thread_idx) + threadblock_offset;
extent_row_ = extent.row();
extent_column_ = extent.column();
thread_start_row_ = thread_offset.row();
thread_start_column_ = thread_offset.column();
// Initialize predicates
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kColumn; ++c) {
mask_.predicates[c] =
((thread_offset.column() + ThreadMap::Delta::kColumn * c) <
extent.column());
}
// Null pointer performs no accesses
if (!pointer) {
mask_.clear();
}
if (ScatterD && !indices) {
mask_.clear();
}
// Initialize pointer
byte_pointer_ = reinterpret_cast<uint8_t*>(pointer) +
LongIndex(thread_offset.row()) * LongIndex(params_.stride) +
LongIndex(thread_offset.column()) * sizeof(AccessType) /
kElementsPerAccess;
if (ScatterD) {
byte_pointer_ = reinterpret_cast<uint8_t*>(pointer) +
LongIndex(thread_offset.column()) * sizeof(AccessType) /
kElementsPerAccess;
}
// Initialize internal state counter
state_[0] = state_[1] = state_[2] = 0;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_DEVICE
void prefetch_all() {
CUTLASS_PRAGMA_UNROLL
for (int iter = 0; iter < kIterations; ++iter) {
prefetch();
++(*this);
}
}
CUTLASS_DEVICE
void prefetch() {
uint8_t* byte_pointer = byte_pointer_;
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster;
++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int row_offset = row * ThreadMap::Delta::kRow +
group * ThreadMap::Delta::kGroup +
cluster * ThreadMap::Delta::kCluster;
AccessType* memory_pointer =
reinterpret_cast<AccessType*>(byte_pointer);
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn;
++column) {
// on windows using unsigned long here gives the error
// error: asm operand type size(4) does not match
// type/size implied by constraint 'l'
uint64_t addr = (uint64_t)((void*)&memory_pointer
[column * ThreadMap::Delta::kColumn /
kElementsPerAccess]);
asm volatile("prefetch.global.L1 [ %1 ];" : "=l"(addr) : "l"(addr));
}
if (row + 1 < ThreadMap::Iterations::kRow) {
if (!ScatterD) {
byte_pointer += params_.increment_row;
}
}
}
if (group + 1 < ThreadMap::Iterations::kGroup) {
byte_pointer += params_.increment_group;
}
}
if (cluster + 1 < ThreadMap::Iterations::kCluster) {
byte_pointer += params_.increment_cluster;
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_byte_offset(Fragment& frag, int64_t byte_offset) const {
uint8_t* byte_pointer = byte_pointer_;
AccessType* frag_ptr = reinterpret_cast<AccessType*>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster;
++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row +
ThreadMap::Iterations::kRow *
(group + ThreadMap::Iterations::kGroup * cluster));
int row_offset = row * ThreadMap::Delta::kRow +
group * ThreadMap::Delta::kGroup +
cluster * ThreadMap::Delta::kCluster;
bool row_guard = ((row_offset + thread_start_row_) < extent_row_);
AccessType* memory_pointer =
reinterpret_cast<AccessType*>(byte_pointer + byte_offset);
if (ScatterD && row_guard) {
assert(indices_);
memory_pointer = reinterpret_cast<AccessType*>(
byte_pointer + byte_offset +
LongIndex(indices_[row_offset + thread_start_row_]) *
LongIndex(params_.stride));
}
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn;
++column) {
bool guard = row_guard && mask_.predicates[column];
cutlass::arch::global_load<AccessType, sizeof(AccessType)>(
frag_ptr
[frag_row_idx * ThreadMap::Iterations::kColumn + column],
(void*)&memory_pointer
[column * ThreadMap::Delta::kColumn / kElementsPerAccess],
guard);
}
if (row + 1 < ThreadMap::Iterations::kRow) {
if (!ScatterD) {
byte_pointer += params_.increment_row;
}
}
}
if (group + 1 < ThreadMap::Iterations::kGroup) {
byte_pointer += params_.increment_group;
}
}
if (cluster + 1 < ThreadMap::Iterations::kCluster) {
byte_pointer += params_.increment_cluster;
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment& frag) const {
load_with_byte_offset(frag, 0);
}
/// Stores a fragment to memory
CUTLASS_DEVICE
void store_with_byte_offset(Fragment const& frag, int64_t byte_offset) const {
uint8_t* byte_pointer = byte_pointer_;
AccessType const* frag_ptr = reinterpret_cast<AccessType const*>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster;
++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row +
ThreadMap::Iterations::kRow *
(group + ThreadMap::Iterations::kGroup * cluster));
int row_offset = row * ThreadMap::Delta::kRow +
group * ThreadMap::Delta::kGroup +
cluster * ThreadMap::Delta::kCluster;
bool row_guard = ((row_offset + thread_start_row_) < extent_row_);
AccessType* memory_pointer =
reinterpret_cast<AccessType*>(byte_pointer + byte_offset);
if (ScatterD && row_guard) {
assert(indices_);
memory_pointer = reinterpret_cast<AccessType*>(
byte_pointer + byte_offset +
LongIndex(indices_[row_offset + thread_start_row_]) *
LongIndex(params_.stride));
}
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn;
++column) {
bool guard = row_guard && mask_.predicates[column];
if (UseCUDAStore) {
if (guard) {
memory_pointer
[column * ThreadMap::Delta::kColumn / kElementsPerAccess] =
frag_ptr
[frag_row_idx * ThreadMap::Iterations::kColumn +
column];
}
} else {
cutlass::arch::global_store<AccessType, sizeof(AccessType)>(
frag_ptr
[frag_row_idx * ThreadMap::Iterations::kColumn + column],
(void*)&memory_pointer
[column * ThreadMap::Delta::kColumn / kElementsPerAccess],
guard);
}
}
if (row + 1 < ThreadMap::Iterations::kRow) {
if (!ScatterD) {
byte_pointer += params_.increment_row;
}
}
}
if (group + 1 < ThreadMap::Iterations::kGroup) {
byte_pointer += params_.increment_group;
}
}
if (cluster + 1 < ThreadMap::Iterations::kCluster) {
byte_pointer += params_.increment_cluster;
}
}
}
/// Stores a fragment to memory
CUTLASS_DEVICE
void store(Fragment const& frag) const {
store_with_byte_offset(frag, 0);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void downsample_load_with_byte_offset(
Fragment& frag,
int64_t byte_offset,
int convolution_P,
int convolution_Q,
int add_P,
int add_Q,
int problem_N) const {
uint8_t* byte_pointer = byte_pointer_;
AccessType* frag_ptr = reinterpret_cast<AccessType*>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster;
++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row +
ThreadMap::Iterations::kRow *
(group + ThreadMap::Iterations::kGroup * cluster));
int row_offset = row * ThreadMap::Delta::kRow +
group * ThreadMap::Delta::kGroup +
cluster * ThreadMap::Delta::kCluster;
bool row_guard = ((row_offset + thread_start_row_) < extent_row_);
int output_row = row_offset + thread_start_row_;
int output_N = output_row / (convolution_P * convolution_Q);
int output_PQ = output_row % (convolution_P * convolution_Q);
int output_P = output_PQ / convolution_Q;
int output_Q = output_PQ % convolution_Q;
int input_row = output_N * 2 * convolution_P * 2 * convolution_Q +
(2 * output_P + add_P) * 2 * convolution_Q + 2 * output_Q + add_Q;
int64_t byte_offset =
(input_row - output_row) * problem_N * sizeof(float);
AccessType* memory_pointer =
reinterpret_cast<AccessType*>(byte_pointer + byte_offset);
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn;
++column) {
bool guard = row_guard && mask_.predicates[column];
cutlass::arch::global_load<AccessType, sizeof(AccessType)>(
frag_ptr
[frag_row_idx * ThreadMap::Iterations::kColumn + column],
(void*)&memory_pointer
[column * ThreadMap::Delta::kColumn / kElementsPerAccess],
guard);
}
if (row + 1 < ThreadMap::Iterations::kRow) {
byte_pointer += params_.increment_row;
}
}
if (group + 1 < ThreadMap::Iterations::kGroup) {
byte_pointer += params_.increment_group;
}
}
if (cluster + 1 < ThreadMap::Iterations::kCluster) {
byte_pointer += params_.increment_cluster;
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void upsample_load_with_byte_offset(
Fragment& frag,
int64_t byte_offset,
int convolution_P,
int convolution_Q,
int add_P,
int add_Q,
int problem_N) const {
uint8_t* byte_pointer = byte_pointer_;
AccessType* frag_ptr = reinterpret_cast<AccessType*>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster;
++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row +
ThreadMap::Iterations::kRow *
(group + ThreadMap::Iterations::kGroup * cluster));
int row_offset = row * ThreadMap::Delta::kRow +
group * ThreadMap::Delta::kGroup +
cluster * ThreadMap::Delta::kCluster;
bool row_guard = ((row_offset + thread_start_row_) < extent_row_);
int output_row = row_offset + thread_start_row_;
int output_N = output_row / (convolution_P * convolution_Q);
int output_PQ = output_row % (convolution_P * convolution_Q);
int output_P = output_PQ / convolution_Q;
int output_Q = output_PQ % convolution_Q;
int row_add_P = add_P;
int row_add_Q = add_Q;
if (output_P > convolution_P - 2)
row_add_P = 0;
if (output_Q > convolution_Q - 2)
row_add_Q = 0;
int input_row = output_N * (convolution_P / 2) * (convolution_Q / 2) +
((output_P + row_add_P) / 2) * (convolution_Q / 2) +
(output_Q + row_add_Q) / 2;
int64_t byte_offset =
(input_row - output_row) * problem_N * sizeof(float);
AccessType* memory_pointer =
reinterpret_cast<AccessType*>(byte_pointer + byte_offset);
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn;
++column) {
bool guard = row_guard && mask_.predicates[column];
cutlass::arch::global_load<AccessType, sizeof(AccessType)>(
frag_ptr
[frag_row_idx * ThreadMap::Iterations::kColumn + column],
(void*)&memory_pointer
[column * ThreadMap::Delta::kColumn / kElementsPerAccess],
guard);
}
if (row + 1 < ThreadMap::Iterations::kRow) {
byte_pointer += params_.increment_row;
}
}
if (group + 1 < ThreadMap::Iterations::kGroup) {
byte_pointer += params_.increment_group;
}
}
if (cluster + 1 < ThreadMap::Iterations::kCluster) {
byte_pointer += params_.increment_cluster;
}
}
}
CUTLASS_DEVICE
MatrixCoord thread_start() const {
return MatrixCoord(thread_start_row_, thread_start_column_);
}
/// Need to get the thread start row from the tile iterator
CUTLASS_DEVICE
int32_t thread_start_row() const {
return thread_start_row_;
}
/// Need to get the thread start row from the tile iterator
CUTLASS_DEVICE
int32_t thread_start_column() const {
return thread_start_column_;
}
/// Extent of the matrix in rows
CUTLASS_DEVICE
Index extent_row() const {
return extent_row_;
}
/// Extent of the matrix in columns
CUTLASS_DEVICE
Index extent_column() const {
return extent_column_;
}
/// Advances to the next position to load or store
CUTLASS_HOST_DEVICE
PredicatedTileIteratorPrefetch& operator++() {
++state_[0];
if (!ScatterD) {
byte_pointer_ += params_.advance_row;
}
thread_start_row_ += ThreadMap::Shape::kRow;
if (state_[0] == ThreadMap::Count::kRow) {
state_[0] = 0;
++state_[1];
byte_pointer_ += params_.advance_group;
thread_start_row_ += (ThreadMap::Shape::kGroup - 1) *
ThreadMap::Shape::kRow * ThreadMap::Count::kRow;
if (state_[1] == ThreadMap::Count::kGroup) {
state_[1] = 0;
++state_[2];
byte_pointer_ += params_.advance_cluster;
thread_start_row_ += ThreadMap::Count::kGroup *
ThreadMap::Shape::kGroup * ThreadMap::Count::kRow *
ThreadMap::Shape::kRow;
if (state_[2] == ThreadMap::Count::kCluster) {
state_[2] = 0;
byte_pointer_ += params_.advance_tile;
}
}
}
return *this;
}
///< Efficiently disables all accesses guarded by mask
CUTLASS_DEVICE void clear_mask() {
mask_.clear();
}
///< Efficiently enables all accesses guarded by mask
CUTLASS_DEVICE void enable_mask() {
mask_.enable();
}
///< Sets the mask
CUTLASS_DEVICE void get_mask(Mask& mask) const {
mask = mask_;
}
///< Sets the mask
CUTLASS_DEVICE void set_mask(Mask const& mask) {
mask_ = mask;
}
};
template <typename IT>
struct MakePrefetchableIterator {
using Iterator = PredicatedTileIteratorPrefetch<
typename IT::ThreadMap,
typename IT::Element>;
};
///////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| examples/41_fused_multi_head_attention/iterators/epilogue_predicated_tile_iterator.h/0 | {
"file_path": "examples/41_fused_multi_head_attention/iterators/epilogue_predicated_tile_iterator.h",
"repo_id": "examples",
"token_count": 10089
} | 9 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/epilogue/thread/linear_combination_clamp.h"
#include "cutlass/epilogue/thread/conversion_op.h"
#include "cutlass/epilogue/thread/reduction_op.h"
#include "cutlass/transform/threadblock/regular_tile_iterator_pitch_linear.h"
#include "cutlass/epilogue/warp/fragment_iterator_tensor_op.h"
#include "cutlass/epilogue/warp/fragment_iterator_complex_tensor_op.h"
#include "cutlass/epilogue/warp/tile_iterator_tensor_op.h"
#include "cutlass/epilogue/warp/tile_iterator_tensor_op_mixed.h"
#include "cutlass/epilogue/threadblock/default_thread_map_tensor_op.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
#include "cutlass/epilogue/threadblock/shared_load_iterator.h"
#include "cutlass/epilogue/threadblock/shared_load_iterator_mixed.h"
// #include "cutlass/epilogue/threadblock/epilogue.h"
#include "cutlass/epilogue/threadblock/interleaved_epilogue.h"
#include "fused_bias_act_epilogue.h"
#include "../warp/fused_bias_act_fragment_iterator_tensor_op.h"
#include "output_tile_thread_map_for_fused_bias.h"
#include "default_thread_map_tensor_op_for_fused_bias.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues for TensorOps.
template <
typename Shape_,
typename WarpMmaTensorOp_,
int PartitionsK,
typename OutputOp_,
int ElementsPerAccess
>
struct DefaultFusedBiasActEpilogueTensorOp {
using Shape = Shape_;
using WarpMmaTensorOp = WarpMmaTensorOp_;
static int const kPartitionsK = PartitionsK;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = ElementsPerAccess;
using ElementOutput = typename OutputOp::ElementOutput;
using LayoutC = typename WarpMmaTensorOp::LayoutC;
using ElementAccumulator = typename WarpMmaTensorOp::ElementC;
//
// Thread map
//
using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapTensorOpForFusedBias<
Shape,
typename WarpMmaTensorOp::Shape,
kPartitionsK,
ElementOutput,
kElementsPerAccess
>::Type;
using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator<
OutputTileThreadMap,
ElementOutput
>;
using AccumulatorFragmentIterator = typename std::conditional<is_complex<ElementOutput>::value,
cutlass::epilogue::warp::FragmentIteratorComplexTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
LayoutC>,
cutlass::epilogue::warp::FusedBiasActFragmentIteratorTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
LayoutC> >::type;
//
// Define the epilogue
//
using Epilogue = cutlass::epilogue::threadblock::FusedBiasActEpilogue<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputTileIterator,
AccumulatorFragmentIterator,
OutputOp
>;
};
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| examples/44_multi_gemm_ir_and_codegen/fixed_impl/epilogue/threadblock/default_bias_act_epilogue_tensor_op.h/0 | {
"file_path": "examples/44_multi_gemm_ir_and_codegen/fixed_impl/epilogue/threadblock/default_bias_act_epilogue_tensor_op.h",
"repo_id": "examples",
"token_count": 2215
} | 10 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Hopper Grouped GEMM example using CUTLASS 3 APIs for NVIDIA Hopper architecture.
This example demonstrates an implementation of Grouped GEMM using a TMA + GMMA
warp-specialized cooperative kernel.
For this example all scheduling work is performed on the device.
The new feature showcased in this example is on-the-fly modification of TMA descriptors
to move between groups/problem_count (represented by groups).
To run this example:
$ ./examples/57_hopper_grouped_gemm/57_hopper_grouped_gemm --m=2048 --n=2048 --k=2048 --groups=10
The above example command makes all 10 groups to be sized at the given m, n, k sizes.
Skipping any of the problem dimensions randomizes it across the different groups.
Same applies for alpha and beta values that are randomized across the different groups.
To run this example for a set of problems using the benchmark option:
$ ./examples/57_hopper_grouped_gemm/57_hopper_grouped_gemm --benchmark=./test_benchmark.txt
Where the test_benchmark.txt may look as such:
0 256x512x128
1 256x512x512
2 512x256x128
3 256x256x128
4 256x512x1024
5 1024x512x128 and so on
*/
#include <iostream>
#include <fstream>
#include <sstream>
#include <vector>
#include <float.h>
#include "cutlass/cutlass.h"
#include "cute/tensor.hpp"
#include "cutlass/tensor_ref.h"
#include "cutlass/epilogue/collective/default_epilogue.hpp"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/gemm/dispatch_policy.hpp"
#include "cutlass/gemm/group_array_problem_shape.hpp"
#include "cutlass/gemm/collective/collective_builder.hpp"
#include "cutlass/epilogue/collective/collective_builder.hpp"
#include "cutlass/gemm/device/gemm_universal_adapter.h"
#include "cutlass/gemm/kernel/gemm_universal.hpp"
#include "cutlass/util/command_line.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/packed_stride.hpp"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/device/tensor_compare.h"
#include "cutlass/util/reference/device/tensor_fill.h"
#include "helper.h"
using namespace cute;
using ProblemShape = cutlass::gemm::GroupProblemShape<Shape<int,int,int>>; // <M,N,K> per group
using ElementA = cutlass::float_e4m3_t; // Element type for A matrix operand
using ElementB = cutlass::float_e5m2_t; // Element type for B matrix operand
using ElementC = cutlass::half_t; // Element type for C and D matrix operands
#if defined(CUTLASS_ARCH_MMA_MODIFIABLE_TMA_SM90_SUPPORTED)
/////////////////////////////////////////////////////////////////////////////////////////////////
/// GEMM kernel configurations
/////////////////////////////////////////////////////////////////////////////////////////////////
// A matrix configuration
using LayoutA = cutlass::layout::RowMajor; // Layout type for A matrix operand
constexpr int AlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value; // Alignment of A matrix in units of elements (up to 16 bytes)
// B matrix configuration
using LayoutB = cutlass::layout::ColumnMajor; // Layout type for B matrix operand
constexpr int AlignmentB = 128 / cutlass::sizeof_bits<ElementB>::value; // Alignment of B matrix in units of elements (up to 16 bytes)
// C/D matrix configuration
using LayoutC = cutlass::layout::ColumnMajor; // Layout type for C and D matrix operands
constexpr int AlignmentC = 128 / cutlass::sizeof_bits<ElementC>::value; // Alignment of C matrix in units of elements (up to 16 bytes)
// Core kernel configurations
using ElementAccumulator = float; // Element type for internal accumulation
using ArchTag = cutlass::arch::Sm90; // Tag indicating the minimum SM that supports the intended feature
using OperatorClass = cutlass::arch::OpClassTensorOp; // Operator class tag
using TileShape = Shape<_256,_128,_64>; // Threadblock-level tile size
using ClusterShape = Shape<_2,_2,_1>; // Shape of the threadblocks in a cluster
using StageCountType = cutlass::gemm::collective::StageCountAuto; // Stage count maximized based on the tile size
using KernelSchedule = cutlass::gemm::KernelPtrArrayTmaWarpSpecializedCooperativeFP8FastAccum; // Kernel to launch
using EpilogueSchedule = cutlass::epilogue::PtrArrayNoSmemWarpSpecialized; // Epilogue to launch
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape, ClusterShape,
cutlass::epilogue::collective::EpilogueTileAuto,
ElementAccumulator, ElementAccumulator,
ElementC, LayoutC *, AlignmentC,
ElementC, LayoutC *, AlignmentC,
EpilogueSchedule
>::CollectiveOp;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
ArchTag, OperatorClass,
ElementA, LayoutA *, AlignmentA,
ElementB, LayoutB *, AlignmentB,
ElementAccumulator,
TileShape, ClusterShape,
cutlass::gemm::collective::StageCountAutoCarveout<
static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
KernelSchedule
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
ProblemShape,
CollectiveMainloop,
CollectiveEpilogue
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
// Reference device GEMM implementation type
using DeviceGemmReference = cutlass::reference::device::Gemm<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
ElementAccumulator>;
using StrideA = typename Gemm::GemmKernel::UnderlyingStrideA;
using StrideB = typename Gemm::GemmKernel::UnderlyingStrideB;
using StrideC = typename Gemm::GemmKernel::UnderlyingStrideC;
using StrideD = typename Gemm::GemmKernel::UnderlyingStrideD;
// Host-side allocations
std::vector<int64_t> offset_A;
std::vector<int64_t> offset_B;
std::vector<int64_t> offset_C;
std::vector<int64_t> offset_D;
std::vector<StrideA> stride_A_host;
std::vector<StrideB> stride_B_host;
std::vector<StrideC> stride_C_host;
std::vector<StrideD> stride_D_host;
std::vector<ElementAccumulator> alpha_host;
std::vector<ElementAccumulator> beta_host;
// Device-side allocations
cutlass::DeviceAllocation<typename ProblemShape::UnderlyingProblemShape> problem_sizes;
cutlass::DeviceAllocation<typename Gemm::ElementA> block_A;
cutlass::DeviceAllocation<typename Gemm::ElementB> block_B;
cutlass::DeviceAllocation<typename Gemm::ElementC> block_C;
cutlass::DeviceAllocation<typename Gemm::EpilogueOutputOp::ElementOutput> block_D;
cutlass::DeviceAllocation<typename Gemm::EpilogueOutputOp::ElementOutput> block_ref_D;
cutlass::DeviceAllocation<const typename Gemm::ElementA *> ptr_A;
cutlass::DeviceAllocation<const typename Gemm::ElementB *> ptr_B;
cutlass::DeviceAllocation<const typename Gemm::ElementC *> ptr_C;
cutlass::DeviceAllocation<typename Gemm::EpilogueOutputOp::ElementOutput *> ptr_D;
cutlass::DeviceAllocation<typename Gemm::EpilogueOutputOp::ElementOutput *> ptr_ref_D;
cutlass::DeviceAllocation<StrideA> stride_A;
cutlass::DeviceAllocation<StrideB> stride_B;
cutlass::DeviceAllocation<StrideC> stride_C;
cutlass::DeviceAllocation<StrideD> stride_D;
// Note, this is an array of pointers to alpha and beta scaling values per group
cutlass::DeviceAllocation<ElementAccumulator*> alpha_device;
cutlass::DeviceAllocation<ElementAccumulator*> beta_device;
cutlass::DeviceAllocation<ElementAccumulator> block_alpha;
cutlass::DeviceAllocation<ElementAccumulator> block_beta;
#endif // defined(CUTLASS_ARCH_MMA_MODIFIABLE_TMA_SM90_SUPPORTED)
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Testbed utility types
/////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help = false;
float alpha = FLT_MAX;
float beta = FLT_MAX;
int iterations = 10;
int m = 1024, n = 2048, k = 512, groups = 10;
std::string benchmark_path;
std::vector<typename ProblemShape::UnderlyingProblemShape> problem_sizes_host;
int const tma_alignment_bits = 128;
int const alignment = tma_alignment_bits / cutlass::sizeof_bits<ElementA>::value;
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
return;
}
cmd.get_cmd_line_argument("m", m);
cmd.get_cmd_line_argument("n", n);
cmd.get_cmd_line_argument("k", k);
cmd.get_cmd_line_argument("groups", groups);
cmd.get_cmd_line_argument("alpha", alpha, FLT_MAX);
cmd.get_cmd_line_argument("beta", beta, FLT_MAX);
cmd.get_cmd_line_argument("iterations", iterations);
cmd.get_cmd_line_argument("benchmark", benchmark_path);
// Decide how to initialize the problems
if (!benchmark_path.empty()) {
if (!benchmark_problems()) {
problem_sizes_host.clear();
return;
}
}
else {
randomize_problems(cmd);
}
}
void randomize_problems(cutlass::CommandLine &cmd) {
int cmd_line_m = -1, cmd_line_n = -1, cmd_line_k = -1;
cmd.get_cmd_line_argument("m", cmd_line_m);
cmd.get_cmd_line_argument("n", cmd_line_n);
cmd.get_cmd_line_argument("k", cmd_line_k);
problem_sizes_host.reserve(groups);
for (int i = groups; i > 0; i--) {
int m = cmd_line_m;
int n = cmd_line_n;
int k = cmd_line_k;
if (m < 1) {
m = ((rand() % 512) + 1);
}
if (n < 1) {
n = ((rand() % 512) + 1);
}
if (k < 1) {
k = alignment * ((rand() % 64) + 1);
}
problem_sizes_host.push_back({m, n, k});
}
}
/// Load a benchmark
bool benchmark_problems() {
std::ifstream file(benchmark_path);
if (!file.good()) {
return false;
}
while (file.good()) {
int idx = -1;
std::string extent_str;
file >> idx >> extent_str;
if (idx < 0 || extent_str.empty()) {
break;
}
cutlass::gemm::GemmCoord extent;
std::vector<std::string> tokens;
cutlass::CommandLine::tokenize(tokens, extent_str, 'x');
for (int i = 0; i < int(tokens.size()); ++i) {
int x = std::atoi(tokens.at(i).c_str());
// round up
if (x % alignment) {
x += (alignment - (x % alignment));
}
extent.at(i) = x;
}
if (extent.product()) {
problem_sizes_host.push_back({extent.m(), extent.n(), extent.k()});
}
}
groups = static_cast<int>(problem_sizes_host.size());
return true;
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "57_hopper_grouped_gemm\n\n"
<< " Hopper FP8 Grouped GEMM using a Warp Specialized kernel.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement\n\n"
<< " --m=<int> Sets the M extent of the GEMM for all groups\n"
<< " --n=<int> Sets the N extent of the GEMM for all groups\n"
<< " --k=<int> Sets the K extent of the GEMM for all groups\n"
<< " --groups=<int> Sets the number of individual GEMM problems for Grouped GEMM\n"
<< " --alpha=<f32> Epilogue scalar alpha\n"
<< " --beta=<f32> Epilogue scalar beta\n\n"
<< " --iterations=<int> Number of profiling iterations to perform\n\n"
<< " --benchmark=<str> Executes a benchmark problem size.\n";
out
<< "\n\nExamples:\n\n"
<< "$ " << "57_hopper_grouped_gemm" << " --m=1024 --n=512 --k=1024 --groups=10 --alpha=2 --beta=0.707 \n\n";
return out;
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s, std::vector<typename ProblemShape::UnderlyingProblemShape> problem_sizes_host) const
{
// Number of real-valued multiply-adds
uint64_t fmas = uint64_t();
for (auto const & problem : problem_sizes_host) {
fmas += static_cast<uint64_t>(get<0>(problem)) *
static_cast<uint64_t>(get<1>(problem)) *
static_cast<uint64_t>(get<2>(problem));
}
// Two flops per multiply-add
uint64_t flop = uint64_t(2) * uint64_t(fmas);
double gflop = double(flop) / double(1.0e9);
return gflop / runtime_s;
}
};
/// Result structure
struct Result
{
double avg_runtime_ms = 0.0;
double gflops = 0.0;
cutlass::Status status = cutlass::Status::kSuccess;
cudaError_t error = cudaSuccess;
bool passed = false;
};
#if defined(CUTLASS_ARCH_MMA_MODIFIABLE_TMA_SM90_SUPPORTED)
/////////////////////////////////////////////////////////////////////////////////////////////////
/// GEMM setup and evaluation
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Helper to initialize a block of device data
template <class Element>
bool initialize_block(
cutlass::DeviceAllocation<Element>& block,
uint64_t seed=2023) {
Element scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
if (bits_input == 1) {
scope_max = static_cast<Element>(2);
scope_min = static_cast<Element>(0);
} else if (bits_input <= 8) {
scope_max = static_cast<Element>(2);
scope_min = static_cast<Element>(-2);
} else {
scope_max = static_cast<Element>(8);
scope_min = static_cast<Element>(-8);
}
cutlass::reference::device::BlockFillRandomUniform(
block.get(), block.size(), seed, scope_max, scope_min, 0);
return true;
}
/// Allocates device-side data
void allocate(const Options &options) {
int64_t total_elements_A = 0;
int64_t total_elements_B = 0;
int64_t total_elements_C = 0;
int64_t total_elements_D = 0;
for (int32_t i = 0; i < options.groups; ++i) {
auto problem = options.problem_sizes_host.at(i);
auto M = get<0>(problem);
auto N = get<1>(problem);
auto K = get<2>(problem);
offset_A.push_back(total_elements_A);
offset_B.push_back(total_elements_B);
offset_C.push_back(total_elements_C);
offset_D.push_back(total_elements_D);
int64_t elements_A = M * K;
int64_t elements_B = K * N;
int64_t elements_C = M * N;
int64_t elements_D = M * N;
total_elements_A += elements_A;
total_elements_B += elements_B;
total_elements_C += elements_C;
total_elements_D += elements_D;
stride_A_host.push_back(cutlass::make_cute_packed_stride(StrideA{}, cute::make_shape(M, K, Int<1>{})));
stride_B_host.push_back(cutlass::make_cute_packed_stride(StrideB{}, cute::make_shape(N, K, Int<1>{})));
stride_C_host.push_back(cutlass::make_cute_packed_stride(StrideC{}, cute::make_shape(M, N, Int<1>{})));
stride_D_host.push_back(cutlass::make_cute_packed_stride(StrideD{}, cute::make_shape(M, N, Int<1>{})));
}
block_A.reset(total_elements_A);
block_B.reset(total_elements_B);
block_C.reset(total_elements_C);
block_D.reset(total_elements_D);
block_ref_D.reset(total_elements_D);
block_alpha.reset(options.groups);
block_beta.reset(options.groups);
}
/// Initialize operands to be used in the GEMM and reference GEMM
void initialize(const Options &options) {
uint64_t seed = 2020;
problem_sizes.reset(options.groups);
problem_sizes.copy_from_host(options.problem_sizes_host.data());
//
// Assign pointers
//
std::vector<ElementA *> ptr_A_host(options.groups);
std::vector<ElementB *> ptr_B_host(options.groups);
std::vector<ElementC *> ptr_C_host(options.groups);
std::vector<ElementC *> ptr_D_host(options.groups);
std::vector<ElementAccumulator *> ptr_alpha_host(options.groups);
std::vector<ElementAccumulator *> ptr_beta_host(options.groups);
for (int32_t i = 0; i < options.groups; ++i) {
ptr_A_host.at(i) = block_A.get() + offset_A.at(i);
ptr_B_host.at(i) = block_B.get() + offset_B.at(i);
ptr_C_host.at(i) = block_C.get() + offset_C.at(i);
ptr_D_host.at(i) = block_D.get() + offset_D.at(i);
alpha_host.push_back((options.alpha == FLT_MAX) ? static_cast<ElementAccumulator>((rand() % 5) + 1) : options.alpha);
beta_host.push_back((options.beta == FLT_MAX) ? static_cast<ElementAccumulator>(rand() % 5) : options.beta);
ptr_alpha_host.at(i) = block_alpha.get() + i;
ptr_beta_host.at(i) = block_beta.get() + i;
}
ptr_A.reset(options.groups);
ptr_A.copy_from_host(ptr_A_host.data());
ptr_B.reset(options.groups);
ptr_B.copy_from_host(ptr_B_host.data());
ptr_C.reset(options.groups);
ptr_C.copy_from_host(ptr_C_host.data());
ptr_D.reset(options.groups);
ptr_D.copy_from_host(ptr_D_host.data());
stride_A.reset(options.groups);
stride_A.copy_from_host(stride_A_host.data());
stride_B.reset(options.groups);
stride_B.copy_from_host(stride_B_host.data());
stride_C.reset(options.groups);
stride_C.copy_from_host(stride_C_host.data());
stride_D.reset(options.groups);
stride_D.copy_from_host(stride_D_host.data());
alpha_device.reset(options.groups);
alpha_device.copy_from_host(ptr_alpha_host.data());
beta_device.reset(options.groups);
beta_device.copy_from_host(ptr_beta_host.data());
initialize_block(block_A, seed + 2023);
initialize_block(block_B, seed + 2022);
initialize_block(block_C, seed + 2021);
block_alpha.copy_from_host(alpha_host.data());
block_beta.copy_from_host(beta_host.data());
}
/// Populates a Gemm::Arguments structure from the given commandline options
typename Gemm::Arguments args_from_options(const Options &options, bool host_problem_shapes_available = true)
{
cutlass::KernelHardwareInfo hw_info;
// Change device_id to another value if you are running on a machine with multiple GPUs and wish
// to use a GPU other than that with device ID 0.
hw_info.device_id = 0;
hw_info.sm_count = cutlass::KernelHardwareInfo::query_device_multiprocessor_count(hw_info.device_id);
typename Gemm::EpilogueOutputOp::Params params;
if (options.alpha != FLT_MAX && options.beta != FLT_MAX) {
// If both alpha/beta are provided (via cmd line args) and are scalar, i.e., same alpha/beta applies to all batches.
params = typename Gemm::EpilogueOutputOp::Params(
ElementAccumulator(options.alpha), ElementAccumulator(options.beta));
}
else {
// If pointers to alpha/beta are provided, i.e., alpha/beta can differ between batches/groups.
params = typename Gemm::EpilogueOutputOp::Params(alpha_device.get(), beta_device.get());
}
typename Gemm::Arguments arguments;
if (host_problem_shapes_available) {
arguments = typename Gemm::Arguments {
cutlass::gemm::GemmUniversalMode::kGrouped,
{options.groups, problem_sizes.get(), options.problem_sizes_host.data()},
{ptr_A.get(), stride_A.get(), ptr_B.get(), stride_B.get()},
{params, ptr_C.get(), stride_C.get(), ptr_D.get(), stride_D.get()},
hw_info
};
}
else {
arguments = typename Gemm::Arguments {
cutlass::gemm::GemmUniversalMode::kGrouped,
{options.groups, problem_sizes.get(), nullptr},
{ptr_A.get(), stride_A.get(), ptr_B.get(), stride_B.get()},
{params, ptr_C.get(), stride_C.get(), ptr_D.get(), stride_D.get()},
hw_info
};
}
return arguments;
}
bool verify(const Options &options) {
bool passed = true;
for (int32_t i = 0; i < options.groups; ++i) {
auto problem = options.problem_sizes_host.at(i);
auto M = get<0>(problem);
auto N = get<1>(problem);
auto K = get<2>(problem);
cutlass::TensorRef ref_A(block_A.get() + offset_A.at(i), Gemm::LayoutA::packed({M, K}));
cutlass::TensorRef ref_B(block_B.get() + offset_B.at(i), Gemm::LayoutB::packed({K, N}));
cutlass::TensorRef ref_C(block_C.get() + offset_C.at(i), Gemm::LayoutC::packed({M, N}));
cutlass::TensorRef ref_D(block_ref_D.get() + offset_D.at(i), Gemm::LayoutD::packed({M, N}));
//
// Compute reference output
//
// Create instantiation for device reference gemm kernel
DeviceGemmReference gemm_reference;
// Launch device reference gemm kernel
gemm_reference(
{M, N, K},
ElementAccumulator(alpha_host.at(i)),
ref_A,
ref_B,
ElementAccumulator(beta_host.at(i)),
ref_C,
ref_D);
// Wait for kernel to finish
CUDA_CHECK(cudaDeviceSynchronize());
// Check if output from CUTLASS kernel and reference kernel are equal or not
passed &= cutlass::reference::device::BlockCompareEqual(block_ref_D.get() + offset_D.at(i), block_D.get() + offset_D.at(i), M * N);
#if 0
std::cout << "Group: " << i << " Status: " << passed << std::endl;
#endif
}
return passed;
}
/// Execute a given example GEMM computation
template <typename Gemm>
int run(Options &options, bool host_problem_shapes_available = true)
{
allocate(options);
initialize(options);
// Instantiate CUTLASS kernel depending on templates
Gemm gemm;
// Create a structure of gemm kernel arguments suitable for invoking an instance of Gemm
auto arguments = args_from_options(options, host_problem_shapes_available);
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size = Gemm::get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
// Check if the problem size is supported or not
CUTLASS_CHECK(gemm.can_implement(arguments));
// Initialize CUTLASS kernel with arguments and workspace pointer
CUTLASS_CHECK(gemm.initialize(arguments, workspace.get()));
// Correctness / Warmup iteration
CUTLASS_CHECK(gemm.run());
// Check if output from CUTLASS kernel and reference kernel are equal or not
Result result;
result.passed = verify(options);
std::cout << " Disposition: " << (result.passed ? "Passed" : "Failed") << std::endl;
if (!result.passed) {
exit(-1);
}
// Run profiling loop
if (options.iterations > 0)
{
GpuTimer timer;
timer.start();
for (int iter = 0; iter < options.iterations; ++iter) {
CUTLASS_CHECK(gemm.initialize(arguments, workspace.get()));
CUTLASS_CHECK(gemm.run());
}
timer.stop();
// Compute average setup and runtime and GFLOPs.
float elapsed_ms = timer.elapsed_millis();
result.avg_runtime_ms = double(elapsed_ms) / double(options.iterations);
result.gflops = options.gflops(result.avg_runtime_ms / 1000.0, options.problem_sizes_host);
std::cout << " Problem Sizes, Alpha, Beta " << std::endl;
for (int32_t i = 0; i < options.groups; ++i) {
std::cout << " " << options.problem_sizes_host.at(i);
std::cout << ", " << alpha_host.at(i) << ", " << beta_host.at(i) << std::endl;
}
std::cout << " Groups : " << options.groups << std::endl;
std::cout << " Avg runtime : " << result.avg_runtime_ms << " ms" << std::endl;
std::cout << " GFLOPS : " << result.gflops << std::endl;
}
return 0;
}
#endif // defined(CUTLASS_ARCH_MMA_MODIFIABLE_TMA_SM90_SUPPORTED)
///////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char const **args) {
// CUTLASS must be compiled with CUDA 12.3 Toolkit to run this example
if (__CUDACC_VER_MAJOR__ < 12 || (__CUDACC_VER_MAJOR__ == 12 && __CUDACC_VER_MINOR__ < 3)) {
std::cerr << "This example requires CUDA 12.3 or newer.\n";
// Returning zero so this test passes on older Toolkits. Its actions are no-op.
return 0;
}
cudaDeviceProp props;
int current_device_id;
CUDA_CHECK(cudaGetDevice(¤t_device_id));
CUDA_CHECK(cudaGetDeviceProperties(&props, current_device_id));
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (props.major < 9) {
std::cerr
<< "This example requires a GPU of NVIDIA's Hopper Architecture or "
<< "later (compute capability 90 or greater).\n";
return 0;
}
//
// Parse options
//
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
//
// Evaluate CUTLASS kernels
//
#if defined(CUTLASS_ARCH_MMA_MODIFIABLE_TMA_SM90_SUPPORTED)
run<Gemm>(options);
run<Gemm>(options, false /*host_problem_shapes_available*/);
#endif
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/57_hopper_grouped_gemm/57_hopper_grouped_gemm.cu/0 | {
"file_path": "examples/57_hopper_grouped_gemm/57_hopper_grouped_gemm.cu",
"repo_id": "examples",
"token_count": 10024
} | 11 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include <cstdlib>
#include <cstdio>
#include <cassert>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <cute/tensor.hpp>
#include "cutlass/util/print_error.hpp"
#include "cutlass/util/GPU_Clock.hpp"
#include "cutlass/util/helper_cuda.hpp"
template <class ProblemShape, class CtaTiler,
class TA, class AStride, class ASmemLayout, class TiledCopyA,
class TB, class BStride, class BSmemLayout, class TiledCopyB,
class TC, class CStride, class CSmemLayout, class TiledMma,
class Alpha, class Beta>
__global__ static
__launch_bounds__(decltype(size(TiledMma{}))::value)
void
gemm_device(ProblemShape shape_MNK, CtaTiler cta_tiler,
TA const* A, AStride dA, ASmemLayout sA_layout, TiledCopyA copy_a,
TB const* B, BStride dB, BSmemLayout sB_layout, TiledCopyB copy_b,
TC * C, CStride dC, CSmemLayout , TiledMma mma,
Alpha alpha, Beta beta)
{
using namespace cute;
// Preconditions
CUTE_STATIC_ASSERT_V(rank(shape_MNK) == Int<3>{}); // (M, N, K)
CUTE_STATIC_ASSERT_V(rank(cta_tiler) == Int<3>{}); // (BLK_M, BLK_N, BLK_K)
CUTE_STATIC_ASSERT_V(size(copy_a) == size(mma)); // NumThreads
CUTE_STATIC_ASSERT_V(size(copy_b) == size(mma)); // NumThreads
static_assert(is_static<ASmemLayout>::value);
static_assert(is_static<BSmemLayout>::value);
static_assert(is_static<CSmemLayout>::value);
CUTE_STATIC_ASSERT_V(size<0>(ASmemLayout{}) == size<0>(cta_tiler)); // BLK_M
CUTE_STATIC_ASSERT_V(size<0>(CSmemLayout{}) == size<0>(cta_tiler)); // BLK_M
CUTE_STATIC_ASSERT_V(size<0>(BSmemLayout{}) == size<1>(cta_tiler)); // BLK_N
CUTE_STATIC_ASSERT_V(size<1>(CSmemLayout{}) == size<1>(cta_tiler)); // BLK_N
CUTE_STATIC_ASSERT_V(size<1>(ASmemLayout{}) == size<2>(cta_tiler)); // BLK_K
CUTE_STATIC_ASSERT_V(size<1>(BSmemLayout{}) == size<2>(cta_tiler)); // BLK_K
CUTE_STATIC_ASSERT_V(congruent(select<0,2>(shape_MNK), dA)); // dA strides for shape MK
CUTE_STATIC_ASSERT_V(congruent(select<1,2>(shape_MNK), dB)); // dB strides for shape NK
CUTE_STATIC_ASSERT_V(congruent(select<0,1>(shape_MNK), dC)); // dC strides for shape MN
//
// Full and Tiled Tensors
//
// Represent the full tensors
Tensor mA = make_tensor(make_gmem_ptr(A), select<0,2>(shape_MNK), dA); // (M,K)
Tensor mB = make_tensor(make_gmem_ptr(B), select<1,2>(shape_MNK), dB); // (N,K)
Tensor mC = make_tensor(make_gmem_ptr(C), select<0,1>(shape_MNK), dC); // (M,N)
// Get the appropriate blocks for this thread block
auto cta_coord = make_coord(blockIdx.x, blockIdx.y, _); // (m,n,k)
Tensor gA = local_tile(mA, cta_tiler, cta_coord, Step<_1, X,_1>{}); // (BLK_M,BLK_K,k)
Tensor gB = local_tile(mB, cta_tiler, cta_coord, Step< X,_1,_1>{}); // (BLK_N,BLK_K,k)
Tensor gC = local_tile(mC, cta_tiler, cta_coord, Step<_1,_1, X>{}); // (BLK_M,BLK_N)
// Shared memory buffers
__shared__ TA smemA[cosize_v<ASmemLayout>];
__shared__ TB smemB[cosize_v<BSmemLayout>];
Tensor sA = make_tensor(make_smem_ptr(smemA), sA_layout); // (BLK_M,BLK_K)
Tensor sB = make_tensor(make_smem_ptr(smemB), sB_layout); // (BLK_N,BLK_K)
//
// Partition the copying of A and B tiles across the threads
//
// TUTORIAL: Example of partitioning via a TiledCopy
ThrCopy thr_copy_a = copy_a.get_slice(threadIdx.x);
Tensor tAgA = thr_copy_a.partition_S(gA); // (CPY,CPY_M,CPY_K,k)
Tensor tAsA = thr_copy_a.partition_D(sA); // (CPY,CPY_M,CPY_K)
// Allocate registers same shape/layout as partitioned data
Tensor tArA = make_fragment_like(tAsA); // (CPY,CPY_M,CPY_K)
ThrCopy thr_copy_b = copy_b.get_slice(threadIdx.x);
Tensor tBgB = thr_copy_b.partition_S(gB); // (CPY,CPY_N,CPY_K,k)
Tensor tBsB = thr_copy_b.partition_D(sB); // (CPY,CPY_N,CPY_K)
// Allocate registers same shape/layout as partitioned data
Tensor tBrB = make_fragment_like(tBsB); // (CPY,CPY_N,CPY_K)
CUTE_STATIC_ASSERT_V(size<1>(tAgA) == size<1>(tAsA)); // CPY_M
CUTE_STATIC_ASSERT_V(size<1>(tAgA) == size<1>(tArA)); // CPY_M
CUTE_STATIC_ASSERT_V(size<2>(tAgA) == size<2>(tAsA)); // CPY_K
CUTE_STATIC_ASSERT_V(size<2>(tAgA) == size<2>(tArA)); // CPY_K
CUTE_STATIC_ASSERT_V(size<1>(tBgB) == size<1>(tBsB)); // CPY_N
CUTE_STATIC_ASSERT_V(size<1>(tBgB) == size<1>(tBrB)); // CPY_N
CUTE_STATIC_ASSERT_V(size<2>(tBgB) == size<2>(tBsB)); // CPY_K
CUTE_STATIC_ASSERT_V(size<2>(tBgB) == size<2>(tBrB)); // CPY_K
// Copy gmem to rmem for k_tile=0
copy(copy_a, tAgA(_,_,_,0), tArA);
copy(copy_b, tBgB(_,_,_,0), tBrB);
//
// Define A/B partitioning and C accumulators
//
// TUTORIAL: Example of partitioning via a TiledMMA
ThrMMA thr_mma = mma.get_slice(threadIdx.x);
Tensor tCsA = thr_mma.partition_A(sA); // (MMA,MMA_M,MMA_K)
Tensor tCsB = thr_mma.partition_B(sB); // (MMA,MMA_N,MMA_K)
Tensor tCgC = thr_mma.partition_C(gC); // (MMA,MMA_M,MMA_N)
// Allocate the accumulators -- same size as the projected data
Tensor tCrC = thr_mma.make_fragment_C(tCgC); // (MMA,MMA_M,MMA_N)
CUTE_STATIC_ASSERT_V( shape(tCrC) == shape(tCgC)); // (MMA,MMA_M,MMA_N)
CUTE_STATIC_ASSERT_V(size<1>(tCgC) == size<1>(tCsA)); // MMA_M
CUTE_STATIC_ASSERT_V(size<2>(tCgC) == size<1>(tCsB)); // MMA_N
CUTE_STATIC_ASSERT_V(size<2>(tCsA) == size<2>(tCsB)); // MMA_K
// Clear the accumulators
clear(tCrC);
#if 0
if(thread0()) {
print(" mA : "); print( mA); print("\n");
print(" gA : "); print( gA); print("\n");
print(" sA : "); print( sA); print("\n");
print("tAgA : "); print(tAgA); print("\n");
print("tAsA : "); print(tAsA); print("\n");
print("tArA : "); print(tArA); print("\n");
}
#endif
#if 0
if(thread0()) {
print(" mB : "); print( mB); print("\n");
print(" gB : "); print( gB); print("\n");
print(" sB : "); print( sB); print("\n");
print("tBgB : "); print(tBgB); print("\n");
print("tBsB : "); print(tBsB); print("\n");
print("tArA : "); print(tArA); print("\n");
}
#endif
#if 0
if(thread0()) {
print(" mC : "); print( mC); print("\n");
print(" gC : "); print( gC); print("\n");
print("tCsA : "); print(tCsA); print("\n");
print("tCsB : "); print(tCsB); print("\n");
print("tCgC : "); print(tCgC); print("\n");
print("tCrC : "); print(tCrC); print("\n");
}
#endif
#if 1
// TUTORIAL: Example of an inner loop that pipelines compute with reads
// from global memory by staging through register and shared memory.
// Data is read from global to registers, then to shared via the TiledCopy partitions
// gemm(.) operates on the shared memory directly via the TiledMMA partitions
auto K_TILE_MAX = size<3>(tAgA);
for (int k_tile = 0; k_tile < K_TILE_MAX; ++k_tile)
{
// Copy rmem to smem with tA|tB thread-partitioned tensors
__syncthreads(); // Wait for all threads to consume smem
copy(tArA, tAsA);
copy(tBrB, tBsB);
__syncthreads(); // Wait for all threads to consume smem
// Copy gmem to rmem for k_tile+1 with tA|tB thread-partitioned tensors
int k_tile_next = (k_tile + 1 < K_TILE_MAX) ? k_tile + 1 : k_tile;
copy(copy_a, tAgA(_,_,_,k_tile_next), tArA);
copy(copy_b, tBgB(_,_,_,k_tile_next), tBrB);
// TUTORIAL: The above call to copy(copy_a, tAgA(_,_,_,k_tile_next), tArA) is equivalent to
// CUTE_UNROLL
// for (int k = 0; k < size<1>(tCsA); ++k) {
// CUTE_UNROLL
// for (int m = 0; m < size<0>(tCrC); ++m) {
// copy_a.call(tAgA(_,m,k), tArA(_,m,k);
// }
// }
// Compute gemm on mma-partitioned smem
gemm(mma, tCsA, tCsB, tCrC);
// TUTORIAL: The above call to gemm(tCsA, tCsB, tCrC) is equivalent to
// CUTE_UNROLL
// for (int k = 0; k < size<1>(tCsA); ++k) {
// CUTE_UNROLL
// for (int m = 0; m < size<0>(tCrC); ++m) {
// CUTE_UNROLL
// for (int n = 0; n < size<1>(tCrC); ++n) {
// mma.call(tCsA(_,m,k), tCsB(_,n,k), tCrC(_,m,n);
// }
// }
// }
}
#endif
//
// Epilogue
//
axpby(alpha, tCrC, beta, tCgC);
}
// Setup params for a NT GEMM
template <class TA, class TB, class TC,
class Alpha, class Beta>
void
gemm_nt(int m, int n, int k,
Alpha alpha,
TA const* A, int ldA,
TB const* B, int ldB,
Beta beta,
TC * C, int ldC,
cudaStream_t stream = 0)
{
using namespace cute;
// Define shapes (dynamic)
auto M = int(m);
auto N = int(n);
auto K = int(k);
auto prob_shape = make_shape(M, N, K); // (M, N, K)
// Define NT strides (mixed)
auto dA = make_stride(Int<1>{}, ldA); // (dM, dK)
auto dB = make_stride(Int<1>{}, ldB); // (dN, dK)
auto dC = make_stride(Int<1>{}, ldC); // (dM, dN)
// Define CTA tile sizes (static)
auto bM = Int<128>{};
auto bN = Int<128>{};
auto bK = Int< 8>{};
auto cta_tiler = make_shape(bM, bN, bK); // (BLK_M, BLK_N, BLK_K)
// Define the smem layouts (static)
auto sA = make_layout(make_shape(bM, bK)); // (m,k) -> smem_idx; m-major
auto sB = make_layout(make_shape(bN, bK)); // (n,k) -> smem_idx; n-major
auto sC = make_layout(make_shape(bM, bN)); // (m,n) -> smem_idx; m-major
// Define the thread layouts (static)
// TUTORIAL: Construct TiledCopy with a particular Copy_Atom to use and
// define the partitioning pattern to apply.
// Each thread will (try to) copy 4x1 elements of type TA using 128-bit copy.
// Use 32x8 of these threads.
TiledCopy copyA = make_tiled_copy(Copy_Atom<UniversalCopy<uint128_t>, TA>{},
Layout<Shape<_32,_8>>{}, // Thr layout 32x8 m-major
Layout<Shape< _4,_1>>{}); // Val layout 4x1 m-major
TiledCopy copyB = make_tiled_copy(Copy_Atom<UniversalCopy<uint128_t>, TB>{},
Layout<Shape<_32,_8>>{}, // Thr layout 32x8 n-major
Layout<Shape< _4,_1>>{}); // Val layout 4x1 n-major
// TUTORIAL: Construct TiledMMA with a particular MMA_Atom to use and
// define the partitioning pattern to apply.
// Use a 1x1x1 FMA on the types TC += TA * TB. Each atom requires a single thread.
// Reproduce that atom 16x16x1 times (m-major) across threads so that we use 256 threads.
TiledMMA mmaC = make_tiled_mma(UniversalFMA<TC,TA,TB>{},
Layout<Shape<_16,_16,_1>>{}); // 16x16x1 UniversalFMA
#if 0
print(copyA);
print(copyB);
print(mmaC);
#endif
#if 0
print_latex(copyA);
print_latex(copyB);
print_latex(mmaC);
#endif
dim3 dimBlock(size(mmaC));
dim3 dimGrid(size(ceil_div(M, bM)),
size(ceil_div(N, bN)));
gemm_device<<<dimGrid, dimBlock, 0, stream>>>
(prob_shape, cta_tiler,
A, dA, sA, copyA,
B, dB, sB, copyB,
C, dC, sC, mmaC,
alpha, beta);
}
// Setup params for a TN GEMM
template <class TA, class TB, class TC,
class Alpha, class Beta>
void
gemm_tn(int m, int n, int k,
Alpha alpha,
TA const* A, int ldA,
TB const* B, int ldB,
Beta beta,
TC * C, int ldC,
cudaStream_t stream = 0)
{
using namespace cute;
// Define shapes (dynamic)
auto M = int(m);
auto N = int(n);
auto K = int(k);
auto prob_shape = make_shape(M, N, K); // (M, N, K)
// Define TN strides (mixed)
auto dA = make_stride(ldA, Int<1>{}); // (dM, dK)
auto dB = make_stride(ldB, Int<1>{}); // (dN, dK)
auto dC = make_stride(Int<1>{}, ldC); // (dM, dN)
// Define CTA tile sizes (static)
auto bM = Int<128>{};
auto bN = Int<128>{};
auto bK = Int< 8>{};
auto cta_tiler = make_shape(bM, bN, bK); // (BLK_M, BLK_N, BLK_K)
// Define the smem layouts (static)
auto sA = make_layout(make_shape ( bM, bK),
make_stride(Int<1>{}, bM+Int<1>{})); // (m,k) -> smem_idx; padded m-major
auto sB = make_layout(make_shape ( bN, bK),
make_stride(Int<1>{}, bN+Int<1>{})); // (n,k) -> smem_idx; padded n-major
auto sC = make_layout(make_shape(bM, bN)); // (m,n) -> smem_idx
// TUTORIAL: Construct TiledCopy to define the Copy_Atom to use and the
// partitioning pattern to apply.
// Each thread will copy 1x1 elements of type TA.
// Use 32x8 of these threads arranged in k-major.
TiledCopy copyA = make_tiled_copy(Copy_Atom<UniversalCopy<TA>, TA>{},
Layout<Shape<_32,_8>,Stride<_8,_1>>{}, // Thr layout 32x8 k-major
Layout<Shape< _1,_1>>{}); // Val layout 1x1
TiledCopy copyB = make_tiled_copy(Copy_Atom<UniversalCopy<TB>, TB>{},
Layout<Shape<_32,_8>,Stride<_8,_1>>{}, // Thr layout 32x8 k-major
Layout<Shape< _1,_1>>{}); // Val layout 1x1
// TUTORIAL: Construct TiledMMA to define the MMA_Atom to use and the
// partitioning pattern to apply.
// Use a 1x1x1 FMA on the types TC += TA * TB. Each atom requires a single thread.
// Reproduce that atom 16x16x1 times (m-major) across threads so that we use 256 threads.
TiledMMA mmaC = make_tiled_mma(UniversalFMA<TC,TA,TB>{},
Layout<Shape<_16,_16,_1>>{}); // 16x16x1 TiledMMA
#if 0
print(copyA);
print(copyB);
print(mmaC);
#endif
#if 0
print_latex(copyA);
print_latex(copyB);
print_latex(mmaC);
#endif
dim3 dimBlock(size(mmaC));
dim3 dimGrid(size(ceil_div(M, bM)),
size(ceil_div(N, bN)));
gemm_device<<<dimGrid, dimBlock, 0, stream>>>
(prob_shape, cta_tiler,
A, dA, sA, copyA,
B, dB, sB, copyB,
C, dC, sC, mmaC,
alpha, beta);
}
template <class TA, class TB, class TC,
class Alpha, class Beta>
void
gemm(char transA, char transB, int m, int n, int k,
Alpha alpha,
TA const* A, int ldA,
TB const* B, int ldB,
Beta beta,
TC * C, int ldC,
cudaStream_t stream = 0)
{
if (transA == 'N' && transB == 'T') {
return gemm_nt(m, n, k, alpha, A, ldA, B, ldB, beta, C, ldC, stream);
} else
if (transA == 'T' && transB == 'N') {
return gemm_tn(m, n, k, alpha, A, ldA, B, ldB, beta, C, ldC, stream);
}
assert(false && "Not implemented");
}
int main(int argc, char** argv)
{
int m = 5120;
if (argc >= 2)
sscanf(argv[1], "%d", &m);
int n = 5120;
if (argc >= 3)
sscanf(argv[2], "%d", &n);
int k = 4096;
if (argc >= 4)
sscanf(argv[3], "%d", &k);
char transA = 'N';
if (argc >= 5)
sscanf(argv[4], "%c", &transA);
char transB = 'T';
if (argc >= 6)
sscanf(argv[5], "%c", &transB);
using TA = float;
using TB = float;
using TC = float;
using TI = float;
TI alpha = 1.0;
TI beta = 0.0;
std::cout << "M = " << m << std::endl;
std::cout << "N = " << n << std::endl;
std::cout << "K = " << k << std::endl;
std::cout << "C = A^" << transA << " B^" << transB << std::endl;
cute::device_init(0);
thrust::host_vector<TA> h_A(m*k);
thrust::host_vector<TB> h_B(n*k);
thrust::host_vector<TC> h_C(m*n);
for (int j = 0; j < m*k; ++j) h_A[j] = static_cast<TA>( 2*(rand() / double(RAND_MAX)) - 1 );
for (int j = 0; j < n*k; ++j) h_B[j] = static_cast<TB>( 2*(rand() / double(RAND_MAX)) - 1 );
for (int j = 0; j < m*n; ++j) h_C[j] = static_cast<TC>(-1);
thrust::device_vector<TA> d_A = h_A;
thrust::device_vector<TB> d_B = h_B;
thrust::device_vector<TC> d_C = h_C;
double gflops = (2.0*m*n*k) * 1e-9;
const int timing_iterations = 100;
GPU_Clock timer;
int ldA = 0, ldB = 0, ldC = m;
if (transA == 'N') {
ldA = m;
} else if (transA == 'T') {
ldA = k;
} else {
assert(false);
}
if (transB == 'N') {
ldB = k;
} else if (transB == 'T') {
ldB = n;
} else {
assert(false);
}
// Run once
d_C = h_C;
gemm(transA, transB, m, n, k,
alpha,
d_A.data().get(), ldA,
d_B.data().get(), ldB,
beta,
d_C.data().get(), ldC);
CUTE_CHECK_LAST();
thrust::host_vector<TC> cute_result = d_C;
// Timing iterations
timer.start();
for (int i = 0; i < timing_iterations; ++i) {
gemm(transA, transB, m, n, k,
alpha,
d_A.data().get(), ldA,
d_B.data().get(), ldB,
beta,
d_C.data().get(), ldC);
}
double cute_time = timer.seconds() / timing_iterations;
CUTE_CHECK_LAST();
printf("CUTE_GEMM: [%6.1f]GFlop/s (%6.4f)ms\n", gflops / cute_time, cute_time*1000);
return 0;
}
| examples/cute/tutorial/sgemm_2.cu/0 | {
"file_path": "examples/cute/tutorial/sgemm_2.cu",
"repo_id": "examples",
"token_count": 9366
} | 12 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/util/type_traits.hpp>
#include <cute/numeric/complex.hpp>
/** C++14 <functional> extensions */
namespace cute {
/**************/
/** Identity **/
/**************/
struct identity {
template <class T>
CUTE_HOST_DEVICE constexpr
decltype(auto) operator()(T&& arg) const {
return static_cast<T&&>(arg);
}
};
template <class R>
struct constant_fn {
template <class... T>
CUTE_HOST_DEVICE constexpr
decltype(auto) operator()(T&&...) const {
return r_;
}
R r_;
};
/***********/
/** Unary **/
/***********/
#define CUTE_LEFT_UNARY_OP(NAME,OP) \
struct NAME { \
template <class T> \
CUTE_HOST_DEVICE constexpr \
decltype(auto) operator()(T&& arg) const { \
return OP static_cast<T&&>(arg); \
} \
}
#define CUTE_RIGHT_UNARY_OP(NAME,OP) \
struct NAME { \
template <class T> \
CUTE_HOST_DEVICE constexpr \
decltype(auto) operator()(T&& arg) const { \
return static_cast<T&&>(arg) OP ; \
} \
}
#define CUTE_NAMED_UNARY_OP(NAME,OP) \
struct NAME { \
template <class T> \
CUTE_HOST_DEVICE constexpr \
decltype(auto) operator()(T&& arg) const { \
return OP (static_cast<T&&>(arg)); \
} \
}
CUTE_LEFT_UNARY_OP(unary_plus, +);
CUTE_LEFT_UNARY_OP(negate, -);
CUTE_LEFT_UNARY_OP(bit_not, ~);
CUTE_LEFT_UNARY_OP(logical_not, !);
CUTE_LEFT_UNARY_OP(dereference, *);
CUTE_LEFT_UNARY_OP(address_of, &);
CUTE_LEFT_UNARY_OP(pre_increment, ++);
CUTE_LEFT_UNARY_OP(pre_decrement, --);
CUTE_RIGHT_UNARY_OP(post_increment, ++);
CUTE_RIGHT_UNARY_OP(post_decrement, --);
CUTE_NAMED_UNARY_OP(abs_fn, abs);
CUTE_NAMED_UNARY_OP(conjugate, cute::conj);
#undef CUTE_LEFT_UNARY_OP
#undef CUTE_RIGHT_UNARY_OP
#undef CUTE_NAMED_UNARY_OP
template <int Shift_>
struct shift_right_const {
static constexpr int Shift = Shift_;
template <class T>
CUTE_HOST_DEVICE constexpr
decltype(auto) operator()(T&& arg) const {
return static_cast<T&&>(arg) >> Shift;
}
};
template <int Shift_>
struct shift_left_const {
static constexpr int Shift = Shift_;
template <class T>
CUTE_HOST_DEVICE constexpr
decltype(auto) operator()(T&& arg) const {
return static_cast<T&&>(arg) << Shift;
}
};
/************/
/** Binary **/
/************/
#define CUTE_BINARY_OP(NAME,OP) \
struct NAME { \
template <class T, class U> \
CUTE_HOST_DEVICE constexpr \
decltype(auto) operator()(T&& lhs, U&& rhs) const { \
return static_cast<T&&>(lhs) OP static_cast<U&&>(rhs); \
} \
}
#define CUTE_NAMED_BINARY_OP(NAME,OP) \
struct NAME { \
template <class T, class U> \
CUTE_HOST_DEVICE constexpr \
decltype(auto) operator()(T&& lhs, U&& rhs) const { \
return OP (static_cast<T&&>(lhs), static_cast<U&&>(rhs)); \
} \
}
CUTE_BINARY_OP(plus, +);
CUTE_BINARY_OP(minus, -);
CUTE_BINARY_OP(multiplies, *);
CUTE_BINARY_OP(divides, /);
CUTE_BINARY_OP(modulus, %);
CUTE_BINARY_OP(plus_assign, +=);
CUTE_BINARY_OP(minus_assign, -=);
CUTE_BINARY_OP(multiplies_assign, *=);
CUTE_BINARY_OP(divides_assign, /=);
CUTE_BINARY_OP(modulus_assign, %=);
CUTE_BINARY_OP(bit_and, &);
CUTE_BINARY_OP(bit_or, |);
CUTE_BINARY_OP(bit_xor, ^);
CUTE_BINARY_OP(left_shift, <<);
CUTE_BINARY_OP(right_shift, >>);
CUTE_BINARY_OP(bit_and_assign, &=);
CUTE_BINARY_OP(bit_or_assign, |=);
CUTE_BINARY_OP(bit_xor_assign, ^=);
CUTE_BINARY_OP(left_shift_assign, <<=);
CUTE_BINARY_OP(right_shift_assign, >>=);
CUTE_BINARY_OP(logical_and, &&);
CUTE_BINARY_OP(logical_or, ||);
CUTE_BINARY_OP(equal_to, ==);
CUTE_BINARY_OP(not_equal_to, !=);
CUTE_BINARY_OP(greater, >);
CUTE_BINARY_OP(less, <);
CUTE_BINARY_OP(greater_equal, >=);
CUTE_BINARY_OP(less_equal, <=);
CUTE_NAMED_BINARY_OP(max_fn, cute::max);
CUTE_NAMED_BINARY_OP(min_fn, cute::min);
#undef CUTE_BINARY_OP
#undef CUTE_NAMED_BINARY_OP
/**********/
/** Fold **/
/**********/
#define CUTE_FOLD_OP(NAME,OP) \
struct NAME##_unary_rfold { \
template <class... T> \
CUTE_HOST_DEVICE constexpr \
auto operator()(T&&... t) const { \
return (t OP ...); \
} \
}; \
struct NAME##_unary_lfold { \
template <class... T> \
CUTE_HOST_DEVICE constexpr \
auto operator()(T&&... t) const { \
return (... OP t); \
} \
}; \
struct NAME##_binary_rfold { \
template <class U, class... T> \
CUTE_HOST_DEVICE constexpr \
auto operator()(U&& u, T&&... t) const { \
return (t OP ... OP u); \
} \
}; \
struct NAME##_binary_lfold { \
template <class U, class... T> \
CUTE_HOST_DEVICE constexpr \
auto operator()(U&& u, T&&... t) const { \
return (u OP ... OP t); \
} \
}
CUTE_FOLD_OP(plus, +);
CUTE_FOLD_OP(minus, -);
CUTE_FOLD_OP(multiplies, *);
CUTE_FOLD_OP(divides, /);
CUTE_FOLD_OP(modulus, %);
CUTE_FOLD_OP(plus_assign, +=);
CUTE_FOLD_OP(minus_assign, -=);
CUTE_FOLD_OP(multiplies_assign, *=);
CUTE_FOLD_OP(divides_assign, /=);
CUTE_FOLD_OP(modulus_assign, %=);
CUTE_FOLD_OP(bit_and, &);
CUTE_FOLD_OP(bit_or, |);
CUTE_FOLD_OP(bit_xor, ^);
CUTE_FOLD_OP(left_shift, <<);
CUTE_FOLD_OP(right_shift, >>);
CUTE_FOLD_OP(bit_and_assign, &=);
CUTE_FOLD_OP(bit_or_assign, |=);
CUTE_FOLD_OP(bit_xor_assign, ^=);
CUTE_FOLD_OP(left_shift_assign, <<=);
CUTE_FOLD_OP(right_shift_assign, >>=);
CUTE_FOLD_OP(logical_and, &&);
CUTE_FOLD_OP(logical_or, ||);
CUTE_FOLD_OP(equal_to, ==);
CUTE_FOLD_OP(not_equal_to, !=);
CUTE_FOLD_OP(greater, >);
CUTE_FOLD_OP(less, <);
CUTE_FOLD_OP(greater_equal, >=);
CUTE_FOLD_OP(less_equal, <=);
#undef CUTE_FOLD_OP
/**********/
/** Meta **/
/**********/
template <class Fn, class Arg>
struct bound_fn {
template <class T>
CUTE_HOST_DEVICE constexpr
decltype(auto)
operator()(T&& arg) {
return fn_(arg_, static_cast<T&&>(arg));
}
Fn fn_;
Arg arg_;
};
template <class Fn, class Arg>
CUTE_HOST_DEVICE constexpr
auto
bind(Fn const& fn, Arg const& arg) {
return bound_fn<Fn,Arg>{fn, arg};
}
} // end namespace cute
| include/cute/algorithm/functional.hpp/0 | {
"file_path": "include/cute/algorithm/functional.hpp",
"repo_id": "include",
"token_count": 6329
} | 13 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/arch/mma.hpp>
#include <cute/atom/mma_traits.hpp>
#include <cute/tensor.hpp>
#include <cute/util/type_traits.hpp>
namespace cute {
template <class... Args>
struct MMA_Atom;
template <class MMAOperation>
struct MMA_Atom<MMAOperation> : MMA_Atom<MMA_Traits<MMAOperation>>
{};
template <class... Args>
struct MMA_Atom<MMA_Traits<Args...>>
: MMA_Traits<Args...>
{
using Traits = MMA_Traits<Args...>;
// Element value types from the MMA_Traits
using ValTypeD = typename Traits::ValTypeD;
using ValTypeA = typename Traits::ValTypeA;
using ValTypeB = typename Traits::ValTypeB;
using ValTypeC = typename Traits::ValTypeC;
// Thr-Val layouts from the MMA_Traits
using Shape_MNK = typename Traits::Shape_MNK;
using ThrID = typename Traits::ThrID;
using LayoutC_TV = typename Traits::CLayout;
using LayoutA_TV = typename Traits::ALayout;
using LayoutB_TV = typename Traits::BLayout;
// Fragment value types from the MMA_Traits (optional, defaults to Val type)
using FrgTypeD = typename detail::FrgTypeC_or_Default<Traits>::type;
using FrgTypeA = typename detail::FrgTypeA_or_Default<Traits>::type;
using FrgTypeB = typename detail::FrgTypeB_or_Default<Traits>::type;
using FrgTypeC = typename detail::FrgTypeC_or_Default<Traits>::type;
// Additional Trait parameters/transformations
template <class... TraitsArgs>
CUTE_HOST_DEVICE
auto
with(TraitsArgs&&... args) const {
auto traits = Traits::with(static_cast<TraitsArgs&&>(args)...);
return MMA_Atom<decltype(traits)>{traits};
}
//
// Tensor call interfaces
//
// Cast, check, and call fma
template <class TD, class DLayout,
class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout>
CUTE_HOST_DEVICE constexpr
void
call(Tensor<TD, DLayout> & D,
Tensor<TA, ALayout> const& A,
Tensor<TB, BLayout> const& B,
Tensor<TC, CLayout> const& C) const
{
static_assert(DLayout::rank == 1, "Expected rank-1 D tensor");
static_assert(ALayout::rank == 1, "Expected rank-1 A tensor");
static_assert(BLayout::rank == 1, "Expected rank-1 B tensor");
static_assert(CLayout::rank == 1, "Expected rank-1 C tensor");
return mma_unpack(*this, D, A, B, C);
}
// Three arguments reproduces C
template <class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout>
CUTE_HOST_DEVICE constexpr
void
call(Tensor<TA, ALayout> const& A,
Tensor<TB, BLayout> const& B,
Tensor<TC, CLayout> & C) const
{
return call(C, A, B, C);
}
//
// make_fragment_A|B|C
// These functions are awkward as they expect already-partitioned tensors
// resulting from a previous call to partition_A|B|C
// The reasoning is that we can inspect the layout of the partitioned data
// and attempt to match it in generated fragment to promote vectorization
// when copying from partition to fragment.
//
template <class CTensor>
CUTE_HOST_DEVICE static constexpr
auto
make_fragment_C(CTensor&& ctensor)
{
// Check that this tensor is likely already partitioned
CUTE_STATIC_ASSERT_V(rank(ctensor) >= Int<3>{}); // VMN
CUTE_STATIC_ASSERT_V(size<0>(ctensor) == size<1>(LayoutC_TV{}));
// C is a bit special because we are after accumulators here
// The input/output type doesn't have to match the accumulator type
//static_assert(std::is_same<ValTypeC, typename remove_cvref_t<CTensor>::value_type>::value, "Expecting ValTypeC type");
// We'll never base the accumulator layout on the input tensor layout, so just return a FrgTypeC tensor
return make_tensor<FrgTypeC>(shape(ctensor));
}
template <class ATensor>
CUTE_HOST_DEVICE static constexpr
auto
make_fragment_A(ATensor&& atensor)
{
// Check that this tensor is likely already partitioned
CUTE_STATIC_ASSERT_V(rank(atensor) >= Int<3>{}); // VMK
CUTE_STATIC_ASSERT_V(size<0>(atensor) == size<1>(LayoutA_TV{}));
if constexpr (has_dereference<FrgTypeA>::value) {
// If the intended FrgTypeA is a view (of the current tensor), forward the whole
static_assert(is_same<ValTypeA, typename remove_cvref_t<ATensor>::value_type>::value
, "Expecting ValTypeA type");
return make_tensor<FrgTypeA>(static_cast<ATensor&&>(atensor));
} else {
// Else, the intended FrgTypeA is a value type, construct a new tensor with a fragment layout
return make_fragment_like<FrgTypeA>(atensor);
}
CUTE_GCC_UNREACHABLE;
}
template <class BTensor>
CUTE_HOST_DEVICE static constexpr
auto
make_fragment_B(BTensor&& btensor)
{
// Check that this tensor is likely already partitioned
CUTE_STATIC_ASSERT_V(rank(btensor) >= Int<3>{}); // VNK
CUTE_STATIC_ASSERT_V(size<0>(btensor) == size<1>(LayoutB_TV{}));
if constexpr (has_dereference<FrgTypeB>::value) {
// If the intended FrgTypeB is a view (of the current tensor), forward the whole
static_assert(is_same<ValTypeB, typename remove_cvref_t<BTensor>::value_type>::value
, "Expecting ValTypeB type");
return make_tensor<FrgTypeB>(static_cast<BTensor&&>(btensor));
} else {
// Else, the intended FrgTypeB is a value type, construct a new tensor with a fragment layout
return make_fragment_like<FrgTypeB>(btensor);
}
CUTE_GCC_UNREACHABLE;
}
};
//
// A tiling of mma atoms
//
template <class TiledMMA, class ThrCoord>
struct ThrMMA;
// @tparam MMA_Atom The MMA_Atom to use in the TiledMMA
// @tparam AtomLayoutMNK The MNK-tiling of the Atom to be performed.
// @tparam PermuationsMNK Permutations to apply to each MNK-mode before tiling for the Atom.
template <class MMA_Atom,
class AtomLayoutMNK,
class PermutationMNK = Tile<Underscore,Underscore,Underscore>>
struct TiledMMA : MMA_Atom
{
using Atom = MMA_Atom;
using AtomShape_MNK = typename MMA_Atom::Shape_MNK;
using AtomThrID = typename MMA_Atom::ThrID;
using AtomLayoutC_TV = typename MMA_Atom::LayoutC_TV;
using AtomLayoutA_TV = typename MMA_Atom::LayoutA_TV;
using AtomLayoutB_TV = typename MMA_Atom::LayoutB_TV;
static_assert( rank_v<AtomLayoutMNK> == 3, "TiledMMA requires rank-3 AtomLayoutMNK");
static_assert( rank_v<PermutationMNK> == 3, "TiledMMA requires rank-3 PermutationMNK");
static_assert( is_tuple<PermutationMNK>::value, "TiledMMA requires independent permutations of MNK.");
static_assert(is_static<PermutationMNK>::value, "TiledMMA requires static permutations of MNK.");
using ThrLayoutVMNK = decltype(tiled_product(AtomThrID{}, AtomLayoutMNK{}));
ThrLayoutVMNK thr_layout_vmnk_;
CUTE_HOST_DEVICE constexpr
TiledMMA(MMA_Atom const& mma_atom = {}, AtomLayoutMNK const& thr_layout_mnk = {})
: MMA_Atom(mma_atom),
thr_layout_vmnk_(tiled_product(AtomThrID{}, thr_layout_mnk)) {}
CUTE_HOST_DEVICE constexpr auto
get_thr_layout_vmnk() const {
return thr_layout_vmnk_;
}
// Tile a tensor or a layout from shape
// (M,N,...)
// to shape
// ((ThrV,(ThrM,ThrN)),(FrgV,(RestM,RestN,...)))
// where
// ThrV: The threads local to an MMA. layout<0>(ThrLayoutVMNK): ThrV -> thread_idx
// ThrM: The threads tiled in M. layout<1>(ThrLayoutVMNK): ThrM -> thread_idx
// ThrN: The threads tiled in N. layout<2>(ThrLayoutVMNK): ThrN -> thread_idx
// FrgV: The values local to an MMA.
// RestM: The values tiled in M.
// RestN: The values tiled in N.
template <class CTensor>
CUTE_HOST_DEVICE constexpr
auto
thrfrg_C(CTensor&& ctensor) const
{
CUTE_STATIC_ASSERT_V(rank(ctensor) >= Int<2>{});
//CUTE_STATIC_ASSERT_V(size<0>(ctensor) % size<0>(TiledShape_MNK{}) == Int<0>{});
//CUTE_STATIC_ASSERT_V(size<1>(ctensor) % size<1>(TiledShape_MNK{}) == Int<0>{});
// Reorder the tensor for the TiledAtom
auto t_tile = make_tile(get<0>(PermutationMNK{}),
get<1>(PermutationMNK{}));
auto t_tensor = logical_divide(ctensor, t_tile); // (PermM,PermN)
// Tile the tensor for the Atom
auto a_tile = make_tile(make_layout(size<0>(AtomShape_MNK{})),
make_layout(size<1>(AtomShape_MNK{})));
auto a_tensor = zipped_divide(t_tensor, a_tile); // ((AtomM,AtomN),(RestM,RestN))
// Transform the Atom mode from (M,K) to (Thr,Val)
auto tv_tensor = a_tensor.compose(AtomLayoutC_TV{},_); // ((ThrV,FrgV),(RestM,RestN))
// Tile the tensor for the C-threads
auto thr_tile = make_tile(_,
make_tile(make_layout(size<1>(thr_layout_vmnk_)),
make_layout(size<2>(thr_layout_vmnk_))));
auto thr_tensor = zipped_divide(tv_tensor, thr_tile); // ((ThrV,(ThrM,ThrN)),(FrgV,(RestM,RestN)))
return thr_tensor;
}
// Tile a tensor or a layout from shape
// (M,K,...)
// to shape
// ((ThrV,(ThrM,ThrK)),(FrgV,(RestM,RestK,...)))
// where
// ThrV: The threads local to an MMA. layout<0>(ThrLayoutVMNK): ThrV -> thread_idx
// ThrM: The threads tiled in M. layout<1>(ThrLayoutVMNK): ThrM -> thread_idx
// ThrK: The threads tiled in K. layout<3>(ThrLayoutVMNK): ThrK -> thread_idx
// FrgV: The values local to an MMA.
// RestM: The values tiled in M.
// RestK: The values tiled in K.
template <class ATensor>
CUTE_HOST_DEVICE constexpr
auto
thrfrg_A(ATensor&& atensor) const
{
CUTE_STATIC_ASSERT_V(rank(atensor) >= Int<2>{});
//CUTE_STATIC_ASSERT_V(size<0>(atensor) % size<0>(TiledShape_MNK{}) == Int<0>{});
//CUTE_STATIC_ASSERT_V(size<1>(atensor) % size<2>(TiledShape_MNK{}) == Int<0>{});
// Reorder the tensor for the TiledAtom
auto t_tile = make_tile(get<0>(PermutationMNK{}),
get<2>(PermutationMNK{}));
auto t_tensor = logical_divide(atensor, t_tile); // (PermM,PermK)
// Tile the tensor for the Atom
auto a_tile = make_tile(make_layout(size<0>(AtomShape_MNK{})),
make_layout(size<2>(AtomShape_MNK{})));
auto a_tensor = zipped_divide(t_tensor, a_tile); // ((AtomM,AtomK),(RestM,RestK))
// Transform the Atom mode from (M,K) to (Thr,Val)
auto tv_tensor = a_tensor.compose(AtomLayoutA_TV{},_); // ((ThrV,FrgV),(RestM,RestK))
// Tile the tensor for the Thread
auto thr_tile = make_tile(_,
make_tile(make_layout(size<1>(thr_layout_vmnk_)),
make_layout(size<3>(thr_layout_vmnk_))));
auto thr_tensor = zipped_divide(tv_tensor, thr_tile); // ((ThrV,(ThrM,ThrK)),(FrgV,(RestM,RestK)))
return thr_tensor;
}
// Tile a tensor or a layout from shape
// (N,K,...)
// to shape
// ((ThrV,(ThrN,ThrK)),(FrgV,(RestN,RestK,...)))
// where
// ThrV: The threads local to an MMA. layout<0>(ThrLayoutVMNK): ThrV -> thread_idx
// ThrN: The threads tiled in N. layout<2>(ThrLayoutVMNK): ThrN -> thread_idx
// ThrK: The threads tiled in K. layout<3>(ThrLayoutVMNK): ThrK -> thread_idx
// FrgV: The values local to an MMA.
// RestN: The values tiled in N.
// RestK: The values tiled in K.
template <class BTensor>
CUTE_HOST_DEVICE constexpr
auto
thrfrg_B(BTensor&& btensor) const
{
CUTE_STATIC_ASSERT_V(rank(btensor) >= Int<2>{});
//CUTE_STATIC_ASSERT_V(size<0>(btensor) % size<1>(TiledShape_MNK{}) == Int<0>{});
//CUTE_STATIC_ASSERT_V(size<1>(btensor) % size<2>(TiledShape_MNK{}) == Int<0>{});
// Reorder the tensor for the TiledAtom
auto t_tile = make_tile(get<1>(PermutationMNK{}),
get<2>(PermutationMNK{}));
auto t_tensor = logical_divide(btensor, t_tile); // (PermN,PermK)
// Tile the tensor for the Atom
auto b_tile = make_tile(make_layout(size<1>(AtomShape_MNK{})),
make_layout(size<2>(AtomShape_MNK{})));
auto b_tensor = zipped_divide(t_tensor, b_tile); // ((AtomN,AtomK),(RestN,RestK))
// Transform the Atom mode from (N,K) to (Thr,Val)
auto tv_tensor = b_tensor.compose(AtomLayoutB_TV{},_); // ((ThrV,FrgV),(RestN,RestK))
// Tile the tensor for the Thread
auto thr_tile = make_tile(_,
make_tile(make_layout(size<2>(thr_layout_vmnk_)),
make_layout(size<3>(thr_layout_vmnk_))));
auto thr_tensor = zipped_divide(tv_tensor, thr_tile); // ((ThrV,(ThrN,ThrK)),(FrgV,(RestN,RestK)))
return thr_tensor;
}
template <class ThrIdx,
__CUTE_REQUIRES(is_integral<ThrIdx>::value)>
CUTE_HOST_DEVICE constexpr
auto
get_slice(ThrIdx const& thr_idx) const
{
auto thr_vmnk = thr_layout_vmnk_.get_flat_coord(thr_idx);
return ThrMMA<TiledMMA, decltype(thr_vmnk)>{*this, thr_vmnk};
}
template <class ThrIdx,
__CUTE_REQUIRES(is_integral<ThrIdx>::value)>
CUTE_HOST_DEVICE constexpr
auto
get_thread_slice(ThrIdx const& thr_idx) const
{
return get_slice(thr_idx);
}
//
// Utility for printing and visualization
//
// The size of the MNK-mode
template <int I>
CUTE_HOST_DEVICE constexpr
auto
tile_size_mnk() const {
static_assert(0 <= I && I < 3);
auto core_size = size<I>(AtomShape_MNK{}) * size<I+1>(get_thr_layout_vmnk());
[[maybe_unused]] auto perm_size = size<I>(PermutationMNK{});
if constexpr (is_underscore<decltype(perm_size)>::value) {
return core_size;
} else {
return cute::max(core_size, perm_size);
}
CUTE_GCC_UNREACHABLE;
}
CUTE_HOST_DEVICE constexpr
auto
get_layoutC_MN() const
{
// (M,N) -> (M,N)
auto ref_C = make_layout(make_shape(tile_size_mnk<0>(), tile_size_mnk<1>()));
// (cthrid,val) -> (M,N)
auto layoutC_TV = thrfrg_C(ref_C);
// (M,N) -> (cthrid,frg)
auto layoutC_MN = right_inverse(layoutC_TV).with_shape(shape(ref_C));
// cthrid = (v,m,n) -> thr_idx
auto thrID_C = thr_layout_vmnk_(_,_,_,Int<0>{});
return cute::make_tuple(layoutC_MN, thrID_C);
}
CUTE_HOST_DEVICE constexpr
auto
get_layoutC_TV() const
{
// (M,N) -> (M,N)
auto ref_C = make_layout(make_shape(tile_size_mnk<0>(), tile_size_mnk<1>()));
// (cthrid,val) -> (M,N)
auto layoutC_TV = thrfrg_C(ref_C);
// thr_idx -> (ThrV,ThrM,ThrN,ThrK)
auto thridx_2_thrid = right_inverse(thr_layout_vmnk_);
// (thr_idx,val) -> (M,N)
return layoutC_TV.compose(thridx_2_thrid, _);
}
CUTE_HOST_DEVICE constexpr
auto
get_layoutA_MK() const
{
// (M,K) -> (M,K)
auto ref_A = make_layout(make_shape(tile_size_mnk<0>(), tile_size_mnk<2>()));
// (athrid,val) -> (M,K)
auto layoutA_TV = thrfrg_A(ref_A);
// (M,K) -> (athrid,frg)
auto layoutA_MK = right_inverse(layoutA_TV).with_shape(shape(ref_A));
// athrid = (v,m,k) -> thr_idx
auto thrID_A = thr_layout_vmnk_(_,_,Int<0>{},_);
return cute::make_tuple(layoutA_MK, thrID_A);
}
CUTE_HOST_DEVICE constexpr
auto
get_layoutA_TV() const
{
// (M,K) -> (M,K)
auto ref_A = make_layout(make_shape(tile_size_mnk<0>(), tile_size_mnk<2>()));
// (athrid,val) -> (M,K)
auto layoutA_TV = thrfrg_A(ref_A);
// (ThrV,(ThrM,ThrK)) -> (ThrV,(ThrM,ThrN,ThrK))
auto atile = make_tile(_,
make_tile(make_layout(make_shape (size<1>(thr_layout_vmnk_), size<2>(thr_layout_vmnk_)),
make_stride( Int<1>{} , Int<0>{} )),
_));
// thr_idx -> (ThrV,ThrM,ThrN,ThrK)
auto thridx_2_thrid = right_inverse(thr_layout_vmnk_);
// (thr_idx,val) -> (M,K)
return thrfrg_A(ref_A).compose(atile, _).compose(thridx_2_thrid, _);
}
CUTE_HOST_DEVICE constexpr
auto
get_layoutB_NK() const
{
// (N,K) -> (N,K)
auto ref_B = make_layout(make_shape(tile_size_mnk<1>(), tile_size_mnk<2>()));
// (bthrid,val) -> (N,K)
auto layoutB_TV = thrfrg_B(ref_B);
// (N,K) -> (bthrid,frg)
auto layoutB_NK = right_inverse(layoutB_TV).with_shape(shape(ref_B));
// bthrid = (v,n,k) -> thr_idx
auto thrID_B = thr_layout_vmnk_(_,Int<0>{},_,_);
return cute::make_tuple(layoutB_NK, thrID_B);
}
CUTE_HOST_DEVICE constexpr
auto
get_layoutB_TV() const
{
// (N,K) -> (N,K)
auto ref_B = make_layout(make_shape(tile_size_mnk<1>(), tile_size_mnk<2>()));
// (bthrid,val) -> (N,K)
auto layoutB_TV = thrfrg_B(ref_B);
// (ThrV,(ThrN,ThrK)) -> (ThrV,(ThrM,ThrN,ThrK))
auto btile = make_tile(_,
make_tile(make_layout(make_shape (size<1>(thr_layout_vmnk_), size<2>(thr_layout_vmnk_)),
make_stride( Int<0>{} , Int<1>{} )),
_));
// thr_idx -> (ThrV,ThrM,ThrN,ThrK)
auto thridx_2_thrid = right_inverse(thr_layout_vmnk_);
// (thr_idx,val) -> (N,K)
return thrfrg_B(ref_B).compose(btile, _).compose(thridx_2_thrid, _);
}
};
template <class TiledMMA, class ThrVMNK>
struct ThrMMA : TiledMMA
{
ThrVMNK thr_vmnk_;
template <class CTensor>
CUTE_HOST_DEVICE constexpr
auto
partition_C(CTensor&& ctensor) const
{
auto thr_tensor = make_tensor(static_cast<CTensor&&>(ctensor).data(), this->thrfrg_C(ctensor.layout()));
auto thr_vmn = make_coord(get<0>(thr_vmnk_), make_coord(get<1>(thr_vmnk_), get<2>(thr_vmnk_)));
return thr_tensor(thr_vmn, make_coord(_, repeat<rank<1,1>(thr_tensor)>(_)));
}
template <class ATensor>
CUTE_HOST_DEVICE constexpr
auto
partition_A(ATensor&& atensor) const
{
auto thr_tensor = make_tensor(static_cast<ATensor&&>(atensor).data(), this->thrfrg_A(atensor.layout()));
auto thr_vmk = make_coord(get<0>(thr_vmnk_), make_coord(get<1>(thr_vmnk_), get<3>(thr_vmnk_)));
return thr_tensor(thr_vmk, make_coord(_, repeat<rank<1,1>(thr_tensor)>(_)));
}
template <class BTensor>
CUTE_HOST_DEVICE constexpr
auto
partition_B(BTensor&& btensor) const
{
auto thr_tensor = make_tensor(static_cast<BTensor&&>(btensor).data(), this->thrfrg_B(btensor.layout()));
auto thr_vnk = make_coord(get<0>(thr_vmnk_), make_coord(get<2>(thr_vmnk_), get<3>(thr_vmnk_)));
return thr_tensor(thr_vnk, make_coord(_, repeat<rank<1,1>(thr_tensor)>(_)));
}
template <class CTensor>
CUTE_HOST_DEVICE constexpr
auto
partition_fragment_C(CTensor&& ctensor) const
{
return TiledMMA::make_fragment_C(partition_C(ctensor));
}
template <class ATensor>
CUTE_HOST_DEVICE constexpr
auto
partition_fragment_A(ATensor&& atensor) const
{
return TiledMMA::make_fragment_A(partition_A(atensor));
}
template <class BTensor>
CUTE_HOST_DEVICE constexpr
auto
partition_fragment_B(BTensor&& btensor) const
{
return TiledMMA::make_fragment_B(partition_B(btensor));
}
};
//
// These tile the MMA_Atom as a whole
//
template <class MMA_Op,
class MMAThrLayout = Layout<Shape<_1,_1,_1>>,
class Permutations = Tile<Underscore,Underscore,Underscore>>
CUTE_HOST_DEVICE constexpr
auto
make_tiled_mma(MMA_Atom<MMA_Op> const& mma_atom,
MMAThrLayout const& thr_layout = {},
Permutations const& permutations = {})
{
auto thr_layout_mnk = append<3>(thr_layout, Layout<_1,_0>{});
auto permutation_mnk = append<3>(permutations, _);
return TiledMMA<MMA_Atom<MMA_Op>,
decltype(thr_layout_mnk),
decltype(permutation_mnk)>{mma_atom, thr_layout_mnk};
}
template <class MMA_Op,
class MMAThrLayout = Layout<Shape<_1,_1,_1>>,
class Permutations = Tile<Underscore,Underscore,Underscore>>
CUTE_HOST_DEVICE constexpr
auto
make_tiled_mma(MMA_Op const&,
MMAThrLayout const& thr_layout = {},
Permutations const& permutations = {})
{
// Attempt to wrap in an MMA_Atom<> and forward
return make_tiled_mma(MMA_Atom<MMA_Op>{}, thr_layout, permutations);
}
//
// partition_fragment_C -- static context
//
template <class... Args, class Shape_MN>
CUTE_HOST_DEVICE constexpr
auto
partition_shape_C(TiledMMA<Args...> const& mma, Shape_MN const& shape_MN)
{
constexpr int R = rank_v<Shape_MN>;
static_assert(R >= 2, "Must have at least rank-2");
auto atomMNK = typename TiledMMA<Args...>::AtomShape_MNK{};
auto thrVMNK = typename TiledMMA<Args...>::ThrLayoutVMNK{};
auto V = shape<1>(typename TiledMMA<Args...>::AtomLayoutC_TV{});
auto M = shape_div(size<0>(shape_MN), size<0>(atomMNK) * size<1>(thrVMNK));
auto N = shape_div(size<1>(shape_MN), size<1>(atomMNK) * size<2>(thrVMNK));
return cute::tuple_cat(make_shape(V,M,N), take<2,R>(shape_MN));
}
template <class... Args, class Shape_MN>
CUTE_HOST_DEVICE constexpr
auto
partition_fragment_C(TiledMMA<Args...> const& mma, Shape_MN const& shapeMN)
{
return make_tensor<typename TiledMMA<Args...>::FrgTypeC>(partition_shape_C(mma, shapeMN));
}
// partition_fragment_A and partition_fragment_B often depend on the
// layout of A and B and/or the thread_idx that is requesting the partition.
// For these reasons, they should not be used in a static context.
// See TiledMMA::get_slice(thr_idx).partition_fragment_A(tensorA) instead.
template <class... Args, class Shape_MK>
CUTE_HOST_DEVICE constexpr
auto
partition_shape_A(TiledMMA<Args...> const& mma, Shape_MK const& shape_MK)
{
constexpr int R = rank_v<Shape_MK>;
static_assert(R >= 2, "Must have at least rank-2");
auto atomMNK = typename TiledMMA<Args...>::AtomShape_MNK{};
auto thrVMNK = typename TiledMMA<Args...>::ThrLayoutVMNK{};
auto V = shape<1>(typename TiledMMA<Args...>::AtomLayoutA_TV{});
auto M = shape_div(size<0>(shape_MK), size<0>(atomMNK) * size<1>(thrVMNK));
auto K = shape_div(size<1>(shape_MK), size<2>(atomMNK) * size<3>(thrVMNK));
return cute::tuple_cat(make_shape(V,M,K), take<2,R>(shape_MK));
}
template <class... Args, class Shape_NK>
CUTE_HOST_DEVICE constexpr
auto
partition_shape_B(TiledMMA<Args...> const& mma, Shape_NK const& shape_NK)
{
constexpr int R = rank_v<Shape_NK>;
static_assert(R >= 2, "Must have at least rank-2");
auto atomMNK = typename TiledMMA<Args...>::AtomShape_MNK{};
auto thrVMNK = typename TiledMMA<Args...>::ThrLayoutVMNK{};
auto V = shape<1>(typename TiledMMA<Args...>::AtomLayoutB_TV{});
auto N = shape_div(size<0>(shape_NK), size<1>(atomMNK) * size<2>(thrVMNK));
auto K = shape_div(size<1>(shape_NK), size<2>(atomMNK) * size<3>(thrVMNK));
return cute::tuple_cat(make_shape(V,N,K), take<2,R>(shape_NK));
}
//
// Size
//
template <int I, class... Args>
CUTE_HOST_DEVICE constexpr
auto
tile_size(TiledMMA<Args...> const& mma)
{
return mma.template tile_size_mnk<I>();
}
template <class... Args>
CUTE_HOST_DEVICE constexpr
auto
tile_shape(TiledMMA<Args...> const& mma)
{
return make_shape(tile_size<0>(mma), tile_size<1>(mma), tile_size<2>(mma));
}
// Deprecate?
template <int... I, class... Args>
CUTE_HOST_DEVICE constexpr
auto
size(TiledMMA<Args...> const& mma)
{
return size<I...>(mma.get_thr_layout_vmnk());
}
// Alias
template <int... I, class... Args>
CUTE_HOST_DEVICE constexpr
auto
thr_size(TiledMMA<Args...> const& mma)
{
return size<I...>(mma.get_thr_layout_vmnk());
}
//
// Display utilities
//
template <class... Args>
CUTE_HOST_DEVICE
void
print(MMA_Atom<MMA_Traits<Args...>> const&)
{
using Atom = MMA_Atom<MMA_Traits<Args...>>;
print("MMA_Atom\n");
print(" ThrID: "); print(typename Atom::ThrID{}); print("\n");
print(" Shape_MNK: "); print(typename Atom::Shape_MNK{}); print("\n");
print(" LayoutA_TV: "); print(typename Atom::LayoutA_TV{}); print("\n");
print(" LayoutB_TV: "); print(typename Atom::LayoutB_TV{}); print("\n");
print(" LayoutC_TV: "); print(typename Atom::LayoutC_TV{}); print("\n");
}
template <class Atom, class TiledThr, class TiledPerm>
CUTE_HOST_DEVICE
void
print(TiledMMA<Atom, TiledThr, TiledPerm> const& mma)
{
print("TiledMMA\n");
print(" ThrLayoutVMNK: "); print(mma.get_thr_layout_vmnk()); print("\n");
print(" PermutationMNK: "); print(TiledPerm{}); print("\n");
print(static_cast<Atom const&>(mma));
}
template <class TiledMMA, class ThrVMNK>
CUTE_HOST_DEVICE
void
print(ThrMMA<TiledMMA, ThrVMNK> const& thr_mma)
{
print("ThrMMA\n");
print(" Thr VMNK: "); print(thr_mma.thr_vmnk_); print("\n");
print(static_cast<TiledMMA>(thr_mma));
}
template <class... Args>
CUTE_HOST_DEVICE
void
print_latex(MMA_Atom<Args...> const& mma_atom)
{
print_latex(make_tiled_mma(mma_atom));
}
template <class... Args>
CUTE_HOST_DEVICE
void
print_latex(TiledMMA<Args...> const& mma)
{
auto layout_and_thrid_C = mma.get_layoutC_MN();
auto layoutC_MN = get<0>(layout_and_thrid_C);
auto thrID_C = get<1>(layout_and_thrid_C);
auto layout_and_thrid_A = mma.get_layoutA_MK();
auto layoutA_MK = get<0>(layout_and_thrid_A);
auto thrID_A = get<1>(layout_and_thrid_A);
auto layout_and_thrid_B = mma.get_layoutB_NK();
auto layoutB_NK = get<0>(layout_and_thrid_B);
auto thrID_B = get<1>(layout_and_thrid_B);
print_latex_mma(layoutC_MN, thrID_C,
layoutA_MK, thrID_A,
layoutB_NK, thrID_B);
}
// MNK MMA Layout to console printer
template <class LayoutC, class ThrIDC,
class LayoutA, class ThrIDA,
class LayoutB, class ThrIDB>
CUTE_HOST_DEVICE
void
print_layout_mma(LayoutC const& C, ThrIDC const& TC, // (m,n) -> (tid,vid) and tid -> thr_idx
LayoutA const& A, ThrIDA const& TA, // (m,k) -> (tid,vid) and tid -> thr_idx
LayoutB const& B, ThrIDB const& TB) // (n,k) -> (tid,vid) and tid -> thr_idx
{
CUTE_STATIC_ASSERT_V(rank(C) == Int<2>{});
CUTE_STATIC_ASSERT_V(rank(A) == Int<2>{});
CUTE_STATIC_ASSERT_V(rank(B) == Int<2>{});
assert(size<0>(A) == size<0>(C));
assert(size<0>(B) == size<1>(C));
assert(size<1>(A) == size<1>(B));
int a_width = size<1>(A) * 6 + 4;
// Print out B (white-shifted) k-by-n
for (int k = 0; k < size<1>(B); ++k) {
// Header
printf("%*s", a_width, "");
for (int n = 0; n < size<0>(B); ++n) printf("+-----");
printf("+\n");
// Values
printf("%*s", a_width, "");
for (int n = 0; n < size<0>(B); ++n) printf("|T%02dV%1d", int(TB(B(n,k) % size(TB))), int(B(n,k) / size(TB)));
printf("|\n");
}
// Footer
printf("%*s", a_width, "");
for (int n = 0; n < size<0>(B); ++n) printf("+-----");
printf("+\n\n");
// Print out A m-by-k and C m-by-n
for (int m = 0; m < size<0>(A); ++m) {
// Header
for (int k = 0; k < size<1>(A); ++k) printf("+-----");
printf("+ ");
for (int n = 0; n < size<1>(C); ++n) printf("+-----");
printf("+\n");
// Values
for (int k = 0; k < size<1>(A); ++k) printf("|T%02dV%1d", int(TA(A(m,k) % size(TA))), int(A(m,k) / size(TA)));
printf("| ");
for (int n = 0; n < size<1>(C); ++n) printf("|T%02dV%1d", int(TC(C(m,n) % size(TC))), int(C(m,n) / size(TC)));
printf("|\n");
}
// Footer
for (int k = 0; k < size<1>(A); ++k) printf("+-----");
printf("+ ");
for (int n = 0; n < size<1>(C); ++n) printf("+-----");
printf("+\n");
}
// MNK MMA Layout to Latex TIKZ -- 8-value color coded by thread
template <class LayoutC, class ThrIDC,
class LayoutA, class ThrIDA,
class LayoutB, class ThrIDB>
CUTE_HOST_DEVICE
void
print_latex_mma(LayoutC const& C, ThrIDC const& TC, // (m,n) -> (tid,vid) and tid -> thr_idx
LayoutA const& A, ThrIDA const& TA, // (m,k) -> (tid,vid) and tid -> thr_idx
LayoutB const& B, ThrIDB const& TB) // (n,k) -> (tid,vid) and tid -> thr_idx
{
CUTE_STATIC_ASSERT_V(rank(C) == Int<2>{});
CUTE_STATIC_ASSERT_V(rank(A) == Int<2>{});
CUTE_STATIC_ASSERT_V(rank(B) == Int<2>{});
assert(size<0>(A) == size<0>(C));
assert(size<0>(B) == size<1>(C));
assert(size<1>(A) == size<1>(B));
char const* latex_header =
"\\documentclass{standalone}\n"
"\\usepackage{tikz}\n"
"\\usetikzlibrary{external}\n"
"\\tikzexternalize\n"
"\\begin{document}\n"
"\\begin{tikzpicture}[x={(0cm,-1cm)},y={(1cm,0cm)},box/.style={rectangle,draw=black,thick,minimum size=1cm,anchor=center}]\n\n";
char const* latex_footer =
"\\end{tikzpicture}\n"
"\\end{document}\n";
char const* color_map[8] = {"{rgb,255:red,175;green,175;blue,255}",
"{rgb,255:red,175;green,255;blue,175}",
"{rgb,255:red,255;green,255;blue,175}",
"{rgb,255:red,255;green,175;blue,175}",
"{rgb,255:red,210;green,210;blue,255}",
"{rgb,255:red,210;green,255;blue,210}",
"{rgb,255:red,255;green,255;blue,210}",
"{rgb,255:red,255;green,210;blue,210}"};
// Header
printf("%% LayoutC: "); print(C); printf("\n");
printf("%% ThrIDC : "); print(TC); printf("\n");
printf("%% LayoutA: "); print(A); printf("\n");
printf("%% ThrIDA : "); print(TA); printf("\n");
printf("%% LayoutB: "); print(B); printf("\n");
printf("%% ThrIDB : "); print(TB); printf("\n\n");
printf(latex_header);
// C starting at 0,0
for (int m = 0; m < size<0>(C); ++m) {
for (int n = 0; n < size<1>(C); ++n) {
int thrid = C(m,n) % size(TC);
int val_idx = C(m,n) / size(TC);
int thr_idx = TC(thrid);
printf("\\node[box,fill=%s] at (%d,%d) {\\shortstack{T%d \\\\ V%d}};\n",
color_map[thr_idx % 8],
m, n,
thr_idx, val_idx);
}
}
// A starting at 0,-size<1>(A)-1
for (int m = 0; m < size<0>(A); ++m) {
for (int k = 0; k < size<1>(A); ++k) {
int thrid = A(m,k) % size(TA);
int val_idx = A(m,k) / size(TA);
int thr_idx = TA(thrid);
printf("\\node[box,fill=%s] at (%d,%d) {\\shortstack{T%d \\\\ V%d}};\n",
color_map[thr_idx % 8],
m, k-1-size<1>(A),
thr_idx, val_idx);
}
}
// B starting at -size<1>(B)-1,0
for (int n = 0; n < size<0>(B); ++n) {
for (int k = 0; k < size<1>(B); ++k) {
int thrid = B(n,k) % size(TB);
int val_idx = B(n,k) / size(TB);
int thr_idx = TB(thrid);
printf("\\node[box,fill=%s] at (%d,%d) {\\shortstack{T%d \\\\ V%d}};\n",
color_map[thr_idx % 8],
k-1-size<1>(B), n,
thr_idx, val_idx);
}
}
// A labels
for (int m = 0, k = -1; m < size<0>(A); ++m) {
printf("\\node at (%d,%d) {\\Large{\\texttt{%d}}};\n", m, k-1-size<1>(A), m);
}
for (int k = 0, m = -1; k < size<1>(A); ++k) {
printf("\\node at (%d,%d) {\\Large{\\texttt{%d}}};\n", m, k-1-size<1>(A), k);
}
// B labels
for (int n = 0, k = -1; n < size<0>(B); ++n) {
printf("\\node at (%d,%d) {\\Large{\\texttt{%d}}};\n", k-1-size<1>(B), n, n);
}
for (int k = 0, n = -1; k < size<1>(B); ++k) {
printf("\\node at (%d,%d) {\\Large{\\texttt{%d}}};\n", k-1-size<1>(B), n, k);
}
// Footer
printf(latex_footer);
}
} // namespace cute
////////////////////////////////////////////////////////////////////////////////////////////////////
#include <cute/atom/mma_traits_sm61.hpp>
#include <cute/atom/mma_traits_sm70.hpp>
#include <cute/atom/mma_traits_sm75.hpp>
#include <cute/atom/mma_traits_sm80.hpp>
#include <cute/atom/mma_traits_sm90.hpp>
#include <cute/atom/mma_traits_sm90_gmma.hpp>
////////////////////////////////////////////////////////////////////////////////////////////////////
| include/cute/atom/mma_atom.hpp/0 | {
"file_path": "include/cute/atom/mma_atom.hpp",
"repo_id": "include",
"token_count": 15234
} | 14 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/util/type_traits.hpp> // iterator_traits
#include <cute/container/array_subbyte.hpp>
#include <cute/pointer_base.hpp>
#include <cute/swizzle.hpp>
/* This implements a swizzle pointer of the form
* InvolutionFn o PtrAdd
* where the InvolutionFn need not be linear.
*
* This differs subtly from swizzle_layout because the smem pointer is used
* as the offset. That means that swizzle_layout will implement position-independent
* swizzle layouts, while swizzle_ptr implements position-dependent swizzle tensors.
* Arch chose to design hardware with position-dependent swizzles.
*
* For clarity:
* NormalLayout : DeRef <- PtrAdd <- [Layout]
* ComposedLayout: DeRef <- PtrAdd <- [Swizzle <- OffsetAdd <- Layout]
* SwizzlePtr : [DeRef <- Swizzle <- PtrAdd] <- Layout
*
* Furthermore, for known swizzles, this pointer attempts to decay itself
* to a normal-pointer with a new layout containing dynamic or static strides.
* This is possible by determining the subdomain of the InvolutionFn
* that is identity and testing if the Layout's codomain is contained
* within it.
*/
namespace cute
{
// concept SwizzleFn {
// CUTE_HOST_DEVICE constexpr static uint apply(uint);
// }
// See Swizzle<B,M,S> in swizzle.hpp for common swizzle-functions.
template <class SwizzleFn, class Iterator>
struct swizzle_ptr : iter_adaptor<Iterator,swizzle_ptr<SwizzleFn,Iterator>>
{
using iterator = Iterator;
using reference = typename iterator_traits<iterator>::reference;
using element_type = typename iterator_traits<iterator>::element_type;
using value_type = typename iterator_traits<iterator>::value_type;
using iter_adaptor<Iterator,swizzle_ptr<SwizzleFn,Iterator>>::iter_adaptor;
template <class Iter>
CUTE_HOST_DEVICE constexpr static
Iter apply_swizzle(Iter ptr) {
return {apply_swizzle(ptr.get())};
}
template <class T>
CUTE_HOST_DEVICE constexpr static
T* apply_swizzle(T* ptr) {
return reinterpret_cast<T*>(SwizzleFn::apply(reinterpret_cast<uintptr_t>(ptr)));
}
template <class T>
CUTE_HOST_DEVICE constexpr static
subbyte_iterator<T> apply_swizzle(subbyte_iterator<T> ptr) {
return {apply_swizzle(ptr.ptr_), ptr.idx_};
}
CUTE_HOST_DEVICE constexpr
reference operator*() const {
return *apply_swizzle(this->get());
}
template <class Int>
CUTE_HOST_DEVICE constexpr
reference operator[](Int const& i) const {
return *apply_swizzle(this->get() + i);
}
};
template <class T, class = void> // Default No-Swizzle
struct get_swizzle { using type = Swizzle<0,4,3>; };
template <class SwizzleFn, class P> // Found the SwizzleFn
struct get_swizzle<swizzle_ptr<SwizzleFn,P>> { using type = SwizzleFn; };
template <class T> // Recurse into anything with a ::iterator
struct get_swizzle<T, void_t<typename T::iterator>> : get_swizzle<typename T::iterator> {};
template <class Iter>
using get_swizzle_t = typename get_swizzle<Iter>::type;
template <class Iterator, class SwizzleFn>
CUTE_HOST_DEVICE constexpr
swizzle_ptr<SwizzleFn,Iterator>
make_swizzle_ptr(Iterator ptr, SwizzleFn) {
return {ptr};
}
// Swizzle-0 specialization for immediate decay
template <class Iterator, int M, int S>
CUTE_HOST_DEVICE constexpr
Iterator
make_swizzle_ptr(Iterator ptr, Swizzle<0,M,S>) {
return ptr;
}
//
// Recast
//
template <class SwizzleFn, class P>
CUTE_HOST_DEVICE constexpr
auto
raw_pointer_cast(swizzle_ptr<SwizzleFn,P> const& ptr) {
return raw_pointer_cast(ptr.get());
}
// SwizzleFn operates on the pointer address, so it doesn't care about the type
template <class NewT, class SwizzleFn, class P>
CUTE_HOST_DEVICE constexpr
auto
recast_ptr(swizzle_ptr<SwizzleFn,P> const& ptr) {
return make_swizzle_ptr(recast_ptr<NewT>(ptr.get()), SwizzleFn{});
}
//
// Display utilities
//
template <class SwizzleFn, class P>
CUTE_HOST_DEVICE void print(swizzle_ptr<SwizzleFn,P> ptr)
{
print(SwizzleFn{}); printf("_"); print(ptr.get());
}
#if !defined(__CUDACC_RTC__)
template <class SwizzleFn, class P>
CUTE_HOST std::ostream& operator<<(std::ostream& os, swizzle_ptr<SwizzleFn,P> ptr)
{
return os << SwizzleFn{} << "_" << ptr.get();
}
#endif
} // end namespace cute
| include/cute/pointer_swizzle.hpp/0 | {
"file_path": "include/cute/pointer_swizzle.hpp",
"repo_id": "include",
"token_count": 2016
} | 15 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Architecture-specific operators on memory added for SM80
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/complex.h"
#include "cutlass/arch/memory.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/arch/cache_operation.h"
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
#define CUDA_CP_ASYNC_ACTIVATED 1
#else
#define CUDA_CP_ASYNC_ACTIVATED 0
#endif
namespace cutlass {
namespace arch {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Initiates an asynchronous copy from global memory to shared memory.
///
/// cp.async
///
template <
/// Size of the access in bytes
int SizeInBytes,
/// Cache operation
CacheOperation::Kind cache_op = CacheOperation::Always>
struct cp_async;
/// Initiates an asynchronous copy from global memory to shared memory. Rather than predicate
/// the entire transfer, zeros are written to SMEM if the guard predicate is false.
///
/// cp.async
///
template <
/// Size of the access in bytes
int SizeInBytes,
/// Cache operation
CacheOperation::Kind cache_op = CacheOperation::Always>
struct cp_async_zfill;
/// Initiates an asynchronous copy from global memory to shared memory. Rather than predicate
/// the entire transfer, nans (0x7eff) are written to SMEM if the guard predicate is false.
///
/// cp.async
///
template <
/// Size of the access in bytes
int SizeInBytes,
/// Cache operation
CacheOperation::Kind cache_op = CacheOperation::Always>
struct cp_async_nan;
/// Either 0 or 1 are written to SMEM based on input element type
/// Used for diagonal elements of triangular matrix of BLAS3 functions
///
/// st.shared
///
template <
/// Type of Element
typename Element,
/// If the data is for a Hermitian matrix diagonal
bool IsHermitianData = false>
struct cp_async_diag;
static const uint32_t OOB_NAN_F16 = 0x7eff;
static const uint32_t OOB_NAN_F16x2 = ((OOB_NAN_F16 << 16) | OOB_NAN_F16);
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization
template <
/// Size of the access in bytes
int SizeInBytes>
struct cp_async<SizeInBytes, CacheOperation::Always> {
/// Copy
CUTLASS_DEVICE
cp_async(void *smem_ptr, void const *global_ptr, bool pred_guard = true) {
#if CUDA_CP_ASYNC_ACTIVATED
// Make sure the size is supported.
static_assert((SizeInBytes == 4 || SizeInBytes == 8 || SizeInBytes == 16),
"Size is not supported");
unsigned smem_int_ptr = cutlass_get_smem_pointer(smem_ptr);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %0, 0;\n"
#if CUTLASS_ENABLE_L2_PREFETCH
" @p cp.async.ca.shared.global.L2::128B [%1], [%2], %3;\n"
#else
" @p cp.async.ca.shared.global [%1], [%2], %3;\n"
#endif
"}\n" ::"r"((int)pred_guard),
"r"(smem_int_ptr), "l"(global_ptr), "n"(SizeInBytes));
#else
using AccessType = Array<uint8_t, SizeInBytes>;
if (pred_guard) {
*static_cast<AccessType *>(smem_ptr) = *static_cast<AccessType const *>(global_ptr);
}
#endif
}
};
/// Partial specialization
template <
/// Size of the access in bytes
int SizeInBytes>
struct cp_async_zfill<SizeInBytes, CacheOperation::Always> {
/// Copy with zero fill
CUTLASS_DEVICE
cp_async_zfill(void *smem_ptr, void const *global_ptr, bool pred_guard) {
#if CUDA_CP_ASYNC_ACTIVATED
// Make sure the size is supported.
static_assert((SizeInBytes == 4 || SizeInBytes == 8 || SizeInBytes == 16),
"Size is not supported");
unsigned smem_int_ptr = cutlass_get_smem_pointer(smem_ptr);
int src_in_bytes = (pred_guard ? SizeInBytes : 0);
asm volatile(
#if CUTLASS_ENABLE_L2_PREFETCH
"cp.async.ca.shared.global.L2::128B [%0], [%1], %2, %3;\n" ::"r"(smem_int_ptr),
#else
"cp.async.ca.shared.global [%0], [%1], %2, %3;\n" ::"r"(smem_int_ptr),
#endif
"l"(global_ptr), "n"(SizeInBytes), "r"(src_in_bytes));
#else
using AccessType = Array<uint8_t, SizeInBytes>;
if (pred_guard) {
*static_cast<AccessType *>(smem_ptr) = *static_cast<AccessType const *>(global_ptr);
}
else {
AccessType zeros;
zeros.clear();
*static_cast<AccessType *>(smem_ptr) = zeros;
}
#endif
}
};
/// Partial specialization
template <>
struct cp_async_nan<16, CacheOperation::Always> {
static int const kSizeInBytes = 16;
/// Copy with nan fill
CUTLASS_DEVICE
cp_async_nan(void *smem_ptr, void const *global_ptr, bool pred_guard) {
#if CUDA_CP_ASYNC_ACTIVATED
static __constant__ uint4 OOB_NAN_F16x8 = {OOB_NAN_F16x2, OOB_NAN_F16x2,
OOB_NAN_F16x2, OOB_NAN_F16x2};
unsigned smem_int_ptr = cutlass_get_smem_pointer(smem_ptr);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %0, 0;\n"
#if CUTLASS_ENABLE_L2_PREFETCH
" @p cp.async.ca.shared.global.L2::128B [%1], [%2], %3;\n"
#else
" @p cp.async.ca.shared.global [%1], [%2], %3;\n"
#endif
" @!p st.shared.v4.u32 [%1], {%4, %5, %6, %7};\n"
"}\n"
:
: "r"((int)pred_guard), "r"(smem_int_ptr), "l"(global_ptr),
"n"(kSizeInBytes), "r"(OOB_NAN_F16x8.x), "r"(OOB_NAN_F16x8.y), "r"(OOB_NAN_F16x8.z),
"r"(OOB_NAN_F16x8.w));
#else
CUTLASS_UNUSED(smem_ptr);
CUTLASS_UNUSED(global_ptr);
CUTLASS_UNUSED(pred_guard);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
/// Partial specialization to write one (1)
template<typename Element_>
struct cp_async_diag <Element_, false> {
using Element = Element_;
CUTLASS_DEVICE
cp_async_diag(void *smem_ptr) {
#if CUDA_CP_ASYNC_ACTIVATED
/// Values for the diagonal elements of the triangular input matrix
static __constant__ uint2 DIAG_DATA_DOUBLE_ONE = {0x3ff00000, 0x00000000};
static __constant__ uint1 DIAG_DATA_FLOAT_ONE = {0x3f800000};
static __constant__ uint1 DIAG_DATA_ZERO = {0x00000000};
unsigned smem_int_ptr = cutlass_get_smem_pointer(smem_ptr);
if (platform::is_same<Element, complex<double>>::value) {
asm volatile("st.shared.v4.u32 [%0], {%1, %2, %3, %4};\n"
: :
"r"(smem_int_ptr), "r"(DIAG_DATA_DOUBLE_ONE.y), "r"(DIAG_DATA_DOUBLE_ONE.x),
"r"(DIAG_DATA_ZERO.x), "r"(DIAG_DATA_ZERO.x));
} else if (platform::is_same<Element, complex<float>>::value) {
asm volatile("st.shared.v2.u32 [%0], {%1, %2};\n"
: :
"r"(smem_int_ptr), "r"(DIAG_DATA_FLOAT_ONE.x), "r"(DIAG_DATA_ZERO.x));
} else if (platform::is_same<Element, double>::value) {
asm volatile("st.shared.v2.u32 [%0], {%1, %2};\n"
: :
"r"(smem_int_ptr), "r"(DIAG_DATA_DOUBLE_ONE.y),"r"(DIAG_DATA_DOUBLE_ONE.x));
} else if (platform::is_same<Element, float>::value) {
asm volatile("st.shared.u32 [%0], %1;\n"
: :
"r"(smem_int_ptr), "r"(DIAG_DATA_FLOAT_ONE.x));
} else {
CUTLASS_UNUSED(smem_int_ptr);
CUTLASS_NOT_IMPLEMENTED();
}
#else
CUTLASS_UNUSED(smem_ptr);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
/// Partial specialization to write zero for the imaginary part of Hermitian data
template<typename Element_>
struct cp_async_diag <Element_, true> {
using Element = Element_;
CUTLASS_DEVICE
cp_async_diag(void *smem_ptr) {
#if CUDA_CP_ASYNC_ACTIVATED
/// Values for the diagonal elements of the triangular input matrix
static __constant__ uint1 DIAG_DATA_ZERO = {0x00000000};
unsigned smem_int_ptr = cutlass_get_smem_pointer(smem_ptr);
if (platform::is_same<Element, complex<double>>::value) {
asm volatile("st.shared.v2.u32 [%0], {%1, %2};\n"
: :
"r"(smem_int_ptr), "r"(DIAG_DATA_ZERO.x), "r"(DIAG_DATA_ZERO.x));
} else if (platform::is_same<Element, complex<float>>::value) {
asm volatile("st.shared.u32 [%0], %1;\n"
: :
"r"(smem_int_ptr), "r"(DIAG_DATA_ZERO.x));
} else {
CUTLASS_UNUSED(smem_int_ptr);
CUTLASS_NOT_IMPLEMENTED();
}
#else
CUTLASS_UNUSED(smem_ptr);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization
template <
/// Size of the access in bytes
int SizeInBytes>
struct cp_async<SizeInBytes, CacheOperation::Global> {
/// Copy
CUTLASS_DEVICE
cp_async(void *smem_ptr, void const *global_ptr, bool pred_guard = true) {
#if CUDA_CP_ASYNC_ACTIVATED
static_assert(SizeInBytes == 16,
"cp.async only supports CacheOperation::Global when access size is 16B.");
unsigned smem_int_ptr = cutlass_get_smem_pointer(smem_ptr);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %0, 0;\n"
#if CUTLASS_ENABLE_L2_PREFETCH
" @p cp.async.cg.shared.global.L2::128B [%1], [%2], %3;\n"
#else
" @p cp.async.cg.shared.global [%1], [%2], %3;\n"
#endif
"}\n" ::"r"((int)pred_guard),
"r"(smem_int_ptr), "l"(global_ptr), "n"(SizeInBytes));
#else
using AccessType = Array<uint8_t, SizeInBytes>;
if (pred_guard) {
*static_cast<AccessType *>(smem_ptr) = *static_cast<AccessType const *>(global_ptr);
}
#endif
}
};
/// Partial specialization
template <
/// Size of the access in bytes
int SizeInBytes>
struct cp_async_zfill<SizeInBytes, CacheOperation::Global> {
/// Copy with zero fill
CUTLASS_DEVICE
cp_async_zfill(void *smem_ptr, void const *global_ptr, bool pred_guard = true) {
#if CUDA_CP_ASYNC_ACTIVATED
static_assert(SizeInBytes == 16,
"cp.async only supports CacheOperation::Global when access size is 16B.");
unsigned smem_int_ptr = cutlass_get_smem_pointer(smem_ptr);
int src_in_bytes = (pred_guard ? SizeInBytes : 0);
asm volatile(
#if CUTLASS_ENABLE_L2_PREFETCH
"cp.async.cg.shared.global.L2::128B [%0], [%1], %2, %3;\n" ::"r"(smem_int_ptr),
#else
"cp.async.cg.shared.global [%0], [%1], %2, %3;\n" ::"r"(smem_int_ptr),
#endif
"l"(global_ptr), "n"(SizeInBytes), "r"(src_in_bytes));
#else
using AccessType = Array<uint8_t, SizeInBytes>;
if (pred_guard) {
*static_cast<AccessType *>(smem_ptr) = *static_cast<AccessType const *>(global_ptr);
}
else {
AccessType zeros;
zeros.clear();
*static_cast<AccessType *>(smem_ptr) = zeros;
}
#endif
}
};
/// Partial specialization
template <>
struct cp_async_nan<16, CacheOperation::Global> {
static int const kSizeInBytes = 16;
/// Copy with nan fill
CUTLASS_DEVICE
cp_async_nan(void *smem_ptr, void const *global_ptr, bool pred_guard) {
#if CUDA_CP_ASYNC_ACTIVATED
static __constant__ uint4 OOB_NAN_F16x8 = {OOB_NAN_F16x2, OOB_NAN_F16x2,
OOB_NAN_F16x2, OOB_NAN_F16x2};
unsigned smem_int_ptr = cutlass_get_smem_pointer(smem_ptr);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %0, 0;\n"
#if CUTLASS_ENABLE_L2_PREFETCH
" @p cp.async.cg.shared.global.L2::128B [%1], [%2], %3;\n"
#else
" @p cp.async.cg.shared.global [%1], [%2], %3;\n"
#endif
" @!p st.shared.v4.u32 [%1], {%4, %5, %6, %7};\n"
"}\n"
:
: "r"((int)pred_guard), "r"(smem_int_ptr), "l"(global_ptr),
"n"(kSizeInBytes), "r"(OOB_NAN_F16x8.x), "r"(OOB_NAN_F16x8.y), "r"(OOB_NAN_F16x8.z),
"r"(OOB_NAN_F16x8.w));
#else
CUTLASS_UNUSED(smem_ptr);
CUTLASS_UNUSED(global_ptr);
CUTLASS_UNUSED(pred_guard);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Establishes an ordering w.r.t previously issued cp.async instructions. Does not block.
CUTLASS_DEVICE
void cp_async_fence() {
#if CUDA_CP_ASYNC_ACTIVATED
asm volatile("cp.async.commit_group;\n" ::);
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Blocks until all but <N> previous cp.async.commit_group operations have committed.
template <int N>
CUTLASS_DEVICE void cp_async_wait() {
#if CUDA_CP_ASYNC_ACTIVATED
asm volatile("cp.async.wait_group %0;\n" ::"n"(N));
#endif
}
/// Blocks until all previous cp.async.commit_group operations have committed.
template <>
CUTLASS_DEVICE void cp_async_wait<0>() {
#if CUDA_CP_ASYNC_ACTIVATED
asm volatile("cp.async.wait_all;\n" ::);
#endif
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace arch
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/arch/memory_sm80.h/0 | {
"file_path": "include/cutlass/arch/memory_sm80.h",
"repo_id": "include",
"token_count": 6520
} | 16 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/conv/collective/builders/sm90_common.inl"
// SM90 Collective Builders should be used only starting CUDA 12.0
#if (__CUDACC_VER_MAJOR__ >= 12)
#define CUTLASS_SM90_COLLECTIVE_BUILDER_SUPPORTED
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass::conv::collective {
using namespace cute;
namespace detail {
// Returns the maximum number of smem tiles that can be used with a given smem capacity, or overrides with manual count.
template<int CapacityBytes, class ElementA, class ElementB, class TileShapeMNK, int stages>
constexpr int
compute_stage_count_or_override(StageCount<stages> stage_count) {
return stages;
}
// Returns the maximum number of smem tiles that can be used with a given smem capacity, or overrides with manual count.
template<int CapacityBytes, class ElementA, class ElementB, class TileShapeMNK, int stages>
constexpr int
compute_stage_count_or_override(cute::Int<stages> stage_count) {
return stages;
}
// Returns the maximum number of smem tiles that can be used with a given smem capacity, or overrides with manual count.
template<int CapacityBytes, class ElementA, class ElementB, class TileShapeMNK, int carveout_bytes>
constexpr int
compute_stage_count_or_override(StageCountAutoCarveout<carveout_bytes> stage_count) {
constexpr auto mainloop_pipeline_bytes = sizeof(typename cutlass::PipelineTmaAsync<1>::SharedStorage);
constexpr auto a_bits = cute::sizeof_bits_v<ElementA>;
constexpr auto b_bits = cute::sizeof_bits_v<ElementB>;
constexpr int stage_bytes =
cutlass::bits_to_bytes(a_bits * size<0>(TileShapeMNK{}) * size<2>(TileShapeMNK{})) +
cutlass::bits_to_bytes(b_bits * size<1>(TileShapeMNK{}) * size<2>(TileShapeMNK{})) +
static_cast<int>(mainloop_pipeline_bytes);
return (CapacityBytes - carveout_bytes) / stage_bytes;
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA_TMA_WS_SS_FPROP
template <
conv::Operator ConvOp,
class ElementA,
class GmemLayoutA,
int AlignmentA,
class ElementB,
class GmemLayoutB,
int AlignmentB,
class ElementAccumulator,
class TileShape_MNK,
class ClusterShape_MNK,
class StageCountType,
class KernelScheduleType
>
struct CollectiveBuilder<
arch::Sm90,
arch::OpClassTensorOp,
ConvOp,
ElementA,
GmemLayoutA,
AlignmentA,
ElementB,
GmemLayoutB,
AlignmentB,
ElementAccumulator,
TileShape_MNK,
ClusterShape_MNK,
StageCountType,
KernelScheduleType,
cute::enable_if_t<cute::is_same_v<KernelScheduleType, KernelImplicitTmaWarpSpecializedSm90> ||
cute::is_same_v<KernelScheduleType, KernelImplicitTmaWarpSpecializedSm90Cooperative> ||
cute::is_same_v<KernelScheduleType, KernelImplicitTmaWarpSpecializedSm90Pingpong>>
> {
static_assert(is_static<TileShape_MNK>::value);
static_assert(is_static<ClusterShape_MNK>::value);
#ifndef CUTLASS_SM90_COLLECTIVE_BUILDER_SUPPORTED
static_assert(cutlass::detail::dependent_false<ElementA>, "Unsupported Toolkit for SM90 Collective Builder\n");
#endif
static_assert(cutlass::gemm::collective::detail::is_aligned<ElementA, AlignmentA, ElementB, AlignmentB, cutlass::gemm::collective::detail::tma_alignment_bytes>(),
"Should meet TMA alignment requirement\n");
// For fp32 types, map to tf32 MMA value type
using ElementAMma = cute::conditional_t<cute::is_same_v<ElementA, float>, tfloat32_t, ElementA>;
using ElementBMma = cute::conditional_t<cute::is_same_v<ElementB, float>, tfloat32_t, ElementB>;
// For fprop, majorA = K, major B = K;
// For wgrad, majorA = MN, major B = MN;
// For dgrad, majorA = K, major B = MN;
static constexpr cute::GMMA::Major GmmaMajorA =
(ConvOp == conv::Operator::kWgrad) ? cute::GMMA::Major::MN : cute::GMMA::Major::K;
static constexpr cute::GMMA::Major GmmaMajorB =
(ConvOp == conv::Operator::kFprop) ? cute::GMMA::Major::K : cute::GMMA::Major::MN;
using AtomLayoutMNK = cute::conditional_t<cute::is_same_v<KernelScheduleType, KernelImplicitTmaWarpSpecializedSm90Cooperative>,
Layout<Shape<_2,_1,_1>>, Layout<Shape<_1,_1,_1>>>;
using TiledMma = decltype(cute::make_tiled_mma(cute::GMMA::ss_op_selector<
ElementAMma, ElementBMma, ElementAccumulator, TileShape_MNK, GmmaMajorA, GmmaMajorB>(), AtomLayoutMNK{}));
// For wgrad kernel, tensor A uses tma tiled mode and tensor B uses tma im2col mode.
using GmemTiledCopyA = cute::conditional_t<ConvOp == conv::Operator::kWgrad,
decltype(cutlass::gemm::collective::detail::sm90_cluster_shape_to_tma_atom(cute::shape<1>(ClusterShape_MNK{}))),
decltype(cutlass::conv::collective::detail::sm90_cluster_shape_to_im2col_tma_atom(cute::shape<1>(ClusterShape_MNK{})))>;
using GmemTiledCopyB = cute::conditional_t<ConvOp == conv::Operator::kWgrad,
decltype(cutlass::conv::collective::detail::sm90_cluster_shape_to_im2col_tma_atom(cute::shape<0>(ClusterShape_MNK{}))),
decltype(cutlass::gemm::collective::detail::sm90_cluster_shape_to_tma_atom(cute::shape<0>(ClusterShape_MNK{})))>;
using SmemLayoutAtomA = decltype(cutlass::gemm::collective::detail::ss_smem_selector<
GmmaMajorA, ElementAMma, decltype(cute::get<0>(TileShape_MNK{})), decltype(cute::get<2>(TileShape_MNK{}))>());
using SmemLayoutAtomB = decltype(cutlass::gemm::collective::detail::ss_smem_selector<
GmmaMajorB, ElementBMma, decltype(cute::get<1>(TileShape_MNK{})), decltype(cute::get<2>(TileShape_MNK{}))>());
static constexpr int PipelineStages = detail::compute_stage_count_or_override<cutlass::gemm::collective::detail::sm90_smem_capacity_bytes,
ElementAMma, ElementBMma, TileShape_MNK>(StageCountType{});
using SmemLayoutA = decltype(tile_to_shape(
SmemLayoutAtomA{},
make_shape(shape<0>(TileShape_MNK{}), shape<2>(TileShape_MNK{}), Int<PipelineStages>{}),
Step<_2,_1,_3>{}));
using SmemLayoutB = decltype(tile_to_shape(
SmemLayoutAtomB{},
make_shape(shape<1>(TileShape_MNK{}), shape<2>(TileShape_MNK{}), Int<PipelineStages>{}),
Step<_2,_1,_3>{}));
constexpr static int NumSpatialDimensions = cutlass::conv::collective::detail::gmem_layout_tags_to_spatial_dims<GmemLayoutA, GmemLayoutB>();
using DispatchPolicy = MainloopSm90TmaGmmaWarpSpecializedImplicitGemm<
ConvOp, PipelineStages, NumSpatialDimensions, ClusterShape_MNK, KernelScheduleType>;
using CollectiveOp = CollectiveConv<
DispatchPolicy,
TileShape_MNK,
ElementA,
ElementB,
TiledMma,
detail::Sm90ImplicitGemmTileTraits<GmemTiledCopyA, SmemLayoutA>,
detail::Sm90ImplicitGemmTileTraits<GmemTiledCopyB, SmemLayoutB>
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA auto kernel schedule
template <
conv::Operator ConvOp,
class ElementA,
class GmemLayoutA,
int AlignmentA,
class ElementB,
class GmemLayoutB,
int AlignmentB,
class ElementAccumulator,
class TileShape_MNK,
class ClusterShape_MNK,
class StageCountType,
class KernelScheduleType
>
struct CollectiveBuilder<
arch::Sm90,
arch::OpClassTensorOp,
ConvOp,
ElementA,
GmemLayoutA,
AlignmentA,
ElementB,
GmemLayoutB,
AlignmentB,
ElementAccumulator,
TileShape_MNK,
ClusterShape_MNK,
StageCountType,
KernelScheduleType,
cute::enable_if_t<cute::is_same_v<KernelScheduleType, KernelScheduleAuto>>
> {
static_assert(is_static<TileShape_MNK>::value);
static_assert(is_static<ClusterShape_MNK>::value);
#ifndef CUTLASS_SM90_COLLECTIVE_BUILDER_SUPPORTED
static_assert(cutlass::detail::dependent_false<ElementA>, "Unsupported Toolkit for SM90 Collective Builder\n");
#endif
/*
#if ((__CUDACC_VER_MAJOR__ > 12) || ((__CUDACC_VER_MAJOR__ == 12) && (__CUDACC_VER_MINOR__ >= 1)))
// Cooperative schedule performs best for CUDA Toolkits with version >= 12.1
// For TileShape_M == 64, choosing KernelTmaWarpSpecialized as the KernelSchedule
// Since KernelTmaWarpSpecializedCooperative requires TileShape_M to be at least 128
using KernelWarpSpecializedSchedule = cute::conditional_t<size<0>(TileShape_MNK{}) == Int<64>{},
KernelImplicitTmaWarpSpecializedSm90PingPong, KernelImplicitTmaWarpSpecializedSm90Cooperative>;
#else
using KernelWarpSpecializedSchedule = KernelImplicitTmaWarpSpecializedSm90;
#endif
*/
using KernelWarpSpecializedSchedule = KernelImplicitTmaWarpSpecializedSm90;
using CollectiveOp = typename CollectiveBuilder<
arch::Sm90,
arch::OpClassTensorOp,
ConvOp,
ElementA,
GmemLayoutA,
AlignmentA,
ElementB,
GmemLayoutB,
AlignmentB,
ElementAccumulator,
TileShape_MNK,
ClusterShape_MNK,
StageCountType,
KernelWarpSpecializedSchedule
>::CollectiveOp;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::conv::collective
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/collective/builders/sm90_gmma_builder.inl/0 | {
"file_path": "include/cutlass/conv/collective/builders/sm90_gmma_builder.inl",
"repo_id": "include",
"token_count": 3814
} | 17 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level implicit GEMM convolution definitions combine threadblock-scoped
matrix multiply-add with the appropriate threadblock-scoped epilogue.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/conv/kernel/default_conv2d.h"
#include "cutlass/conv/threadblock/conv2d_dgrad_output_gradient_tile_access_iterator_analytic.h"
#include "cutlass/conv/threadblock/conv2d_dgrad_output_gradient_tile_access_iterator_optimized.h"
#include "cutlass/conv/threadblock/conv2d_dgrad_filter_tile_access_iterator_analytic.h"
#include "cutlass/conv/threadblock/conv2d_dgrad_filter_tile_access_iterator_optimized.h"
#include "cutlass/conv/threadblock/conv2d_tile_iterator.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dDgrad
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename OperatorClass,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kOptimized,
conv::StrideSupport StrideSupport = StrideSupport::kStrided,
/// Access granularity of A matrix in units of elements
int AlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value,
/// Access granularity of B matrix in units of elements
int AlignmentB = 128 / cutlass::sizeof_bits<ElementB>::value
> struct DefaultConv2dDgrad;
/////////////////////////////////////////////////////////////////////////////////////////////////
// OpClassTensorOp convolutions
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dDgrad specialization for Analytic IteratorAlgorithm Dgrad Strided and
// multistage pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dDgrad <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kAnalytic,
StrideSupport::kStrided,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>;
using IteratorA =
cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
ThreadMapA,
StrideSupport::kStrided,
AccessTypeA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::Conv2dDgradFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
ThreadMapB,
StrideSupport::kStrided,
AccessTypeB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
static cutlass::arch::CacheOperation::Kind const CacheOpB =
((sizeof_bits<ElementB>::value * AlignmentB) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
CacheOpB,
MmaPolicy,
Stages
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOpStridedDgrad<
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionStridedDgrad<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kDgrad
>;
};
/// Defines a kernel for Conv2dDgrad specialization for Analytic IteratorAlgorithm Dgrad Strided
// and 2 stage pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dDgrad <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kAnalytic,
StrideSupport::kStrided,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
2, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>;
using IteratorA =
cutlass::conv::threadblock::TileIteratorStridedDgrad<
cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
ThreadMapA,
StrideSupport::kStrided,
AccessTypeA
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::TileIteratorStridedDgrad<
cutlass::conv::threadblock::Conv2dDgradFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
ThreadMapB,
StrideSupport::kStrided,
AccessTypeB
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename detail::DefaultConvEpilogueStridedDgrad<
ArchTag,
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionStridedDgrad<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kDgrad
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dDgrad specialization for Analytic IteratorAlgorithm Dgrad Unity Strided
// and multistage pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dDgrad <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kAnalytic,
StrideSupport::kUnity,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>;
using IteratorA =
cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
ThreadMapA,
StrideSupport::kUnity,
AccessTypeA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::Conv2dDgradFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
ThreadMapB,
StrideSupport::kUnity,
AccessTypeB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
static cutlass::arch::CacheOperation::Kind const CacheOpB =
((sizeof_bits<ElementB>::value * AlignmentB) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
CacheOpB,
MmaPolicy,
Stages
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kDgrad
>;
};
/// Defines a kernel for Conv2dDgrad specialization for Analytic IteratorAlgorithm Dgrad Unity
// 2 stage pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dDgrad <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kAnalytic,
StrideSupport::kUnity,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
2, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>;
using IteratorA =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
ThreadMapA,
StrideSupport::kUnity,
AccessTypeA
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dDgradFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
ThreadMapB,
StrideSupport::kUnity,
AccessTypeB
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename detail::DefaultConvEpilogue<
ArchTag,
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kDgrad
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dDgrad specialization for optimized IteratorAlgorithm Dgrad Unity Strided
// and multistage pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dDgrad <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
StrideSupport::kUnity,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>;
using IteratorA =
cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
ThreadMapA,
StrideSupport::kUnity,
AccessTypeA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::Conv2dDgradFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
ThreadMapB,
StrideSupport::kUnity,
AccessTypeB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
static cutlass::arch::CacheOperation::Kind const CacheOpB =
((sizeof_bits<ElementB>::value * AlignmentB) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
CacheOpB,
MmaPolicy,
Stages
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kDgrad
>;
};
/// Defines a kernel for Conv2dDgrad specialization for Optimized IteratorAlgorithm Dgrad Strided and
// multistage pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dDgrad <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
StrideSupport::kStrided,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>;
using IteratorA =
cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
ThreadMapA,
StrideSupport::kStrided,
AccessTypeA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::Conv2dDgradFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
ThreadMapB,
StrideSupport::kStrided,
AccessTypeB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
static cutlass::arch::CacheOperation::Kind const CacheOpB =
((sizeof_bits<ElementB>::value * AlignmentB) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
CacheOpB,
MmaPolicy,
Stages
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOpStridedDgrad<
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionStridedDgrad<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kDgrad
>;
};
/// Defines a kernel for Conv2dDgrad specialization for Optimized IteratorAlgorithm Dgrad Strided
// and 2 stage pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dDgrad <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
StrideSupport::kStrided,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
2, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>;
using IteratorA =
cutlass::conv::threadblock::TileIteratorStridedDgrad<
cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
ThreadMapA,
StrideSupport::kStrided,
AccessTypeA
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::TileIteratorStridedDgrad<
cutlass::conv::threadblock::Conv2dDgradFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
ThreadMapB,
StrideSupport::kStrided,
AccessTypeB
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename detail::DefaultConvEpilogueStridedDgrad<
ArchTag,
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionStridedDgrad<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kDgrad
>;
};
/// Defines a kernel for Conv2dDgrad specialization for Optimized IteratorAlgorithm Dgrad Unity
// 2 stage pipeline
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dDgrad <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
StrideSupport::kUnity,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
2, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>;
using IteratorA =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
ThreadMapA,
StrideSupport::kUnity,
AccessTypeA
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dDgradFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
ThreadMapB,
StrideSupport::kUnity,
AccessTypeB
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename detail::DefaultConvEpilogue<
ArchTag,
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kDgrad
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// OpClassSimt convolutions
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dDgrad specialization for Analytic IteratorAlgorithm,
/// multi-stage pipeline, and FFMA-based mainloop for SM80
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dDgrad <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kAnalytic,
conv::StrideSupport::kUnity,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
ThreadMapA,
conv::StrideSupport::kUnity
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::Conv2dDgradFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
ThreadMapB,
conv::StrideSupport::kUnity
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
arch::CacheOperation::Always,
MmaPolicy,
Stages
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kDgrad
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dDgrad <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kAnalytic,
conv::StrideSupport::kStrided,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
ThreadMapA,
conv::StrideSupport::kStrided
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::Conv2dDgradFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
ThreadMapB,
conv::StrideSupport::kStrided
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
arch::CacheOperation::Always,
MmaPolicy,
Stages
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimtStridedDgrad<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionStridedDgrad<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kDgrad
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dDgrad specialization for Optimized IteratorAlgorithm,
/// multi-stage pipeline, and FFMA-based mainloop for SM80
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dDgrad <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
StrideSupport::kUnity,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
ThreadMapA,
StrideSupport::kUnity
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::Conv2dDgradFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
ThreadMapB,
StrideSupport::kUnity
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
arch::CacheOperation::Always,
MmaPolicy,
Stages
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kDgrad
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dDgrad <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
conv::StrideSupport::kStrided,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
ThreadMapA,
conv::StrideSupport::kStrided
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::Conv2dDgradFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
ThreadMapB,
conv::StrideSupport::kStrided
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
arch::CacheOperation::Always,
MmaPolicy,
Stages
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimtStridedDgrad<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionStridedDgrad<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kDgrad
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dDgrad specialization for Analytic IteratorAlgorithm,
/// 2 stage pipeline, and FFMA-based mainloop for SM50
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dDgrad <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kAnalytic,
conv::StrideSupport::kUnity,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
2, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
ThreadMapA,
conv::StrideSupport::kUnity
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dDgradFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
ThreadMapB,
conv::StrideSupport::kUnity
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kDgrad
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dDgrad <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kAnalytic,
conv::StrideSupport::kStrided,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
2, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::TileIteratorStridedDgrad<
cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
ThreadMapA,
conv::StrideSupport::kStrided
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::TileIteratorStridedDgrad<
cutlass::conv::threadblock::Conv2dDgradFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
ThreadMapB,
conv::StrideSupport::kStrided
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimtStridedDgrad<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionStridedDgrad<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kDgrad
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dDgrad specialization for Optimized IteratorAlgorithm,
/// 2 stage pipeline, and FFMA-based mainloop for SM50
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dDgrad <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
StrideSupport::kUnity,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
2, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
ThreadMapA,
StrideSupport::kUnity
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dDgradFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
ThreadMapB,
StrideSupport::kUnity
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kDgrad
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dDgrad <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
conv::StrideSupport::kStrided,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
2, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::TileIteratorStridedDgrad<
cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
ThreadMapA,
conv::StrideSupport::kStrided
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::TileIteratorStridedDgrad<
cutlass::conv::threadblock::Conv2dDgradFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
ThreadMapB,
conv::StrideSupport::kStrided
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimtStridedDgrad<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionStridedDgrad<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kDgrad
>;
};
} // namespace kernel
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/kernel/default_conv2d_dgrad.h/0 | {
"file_path": "include/cutlass/conv/kernel/default_conv2d_dgrad.h",
"repo_id": "include",
"token_count": 18880
} | 18 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM A (output gradient tile)
matrix from memory.
This iterator assumes TensorNDHWC layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv3d_problem_size.h"
#include "cutlass/conv/threadblock/conv3d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
conv::StrideSupport StrideSupport_ = conv::StrideSupport::kUnity
>
class Conv3dDgradOutputGradientTileAccessIteratorOptimized {
public:
static_assert(StrideSupport_ == conv::StrideSupport::kUnity,
"Only unit-stride dgrad is supported at this time.");
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNDHWC;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized;
static StrideSupport const kStrideSupport = conv::StrideSupport::kUnity;
static int const kConvDim = 3;
using ConvProblemSize = typename conv::Conv3dProblemSize;
using Coord3D = Coord<3>;
static int const kAccessesPerVector = 1;
using Mask = uint64_t;
//
// Simplifying assertions
//
static_assert(ThreadMap::Iterations::kContiguous == 1,
"Require Iterations::kContiguous == 1");
//
// Parameters structure
//
using Params = Conv3dDgradOutputGradientIteratorOptimizedParams;
private:
Params const ¶ms_;
ConvProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
// One pointer per access
char const *pointer_[ThreadMap::Iterations::kStrided];
// current filter position (t, r, s)
int filter_t_;
int filter_r_;
int filter_s_;
int filter_k_;
Index masks_[ThreadMap::Iterations::kStrided][3];
public:
CUTLASS_HOST_DEVICE
Conv3dDgradOutputGradientTileAccessIteratorOptimized(
Params const ¶ms,
ConvProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord() // tile index - units are threadblock-scoped tiles
):
params_(params),
problem_size_(problem_size),
filter_k_(0),
filter_t_(0),
filter_r_(0),
filter_s_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_k_ = threadblock_offset.column() + thread_coord.contiguous();
int offset_n[ThreadMap::Iterations::kStrided];
int offset_d[ThreadMap::Iterations::kStrided];
int offset_h[ThreadMap::Iterations::kStrided];
int offset_w[ThreadMap::Iterations::kStrided];
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
pointer_[s] = reinterpret_cast<char const *>(ptr);
int offset_ndhw = threadblock_offset.row() + thread_coord.strided() + s * ThreadMap::Delta::kStrided;
// The subseqnet fast_divmod() operations are equivalent to the following logical computation:
//
//
// offset_n[s] = offset_ndhw / (problem_size_.D * problem_size_.H * problem_size_.W);
// int residual = offset_ndhw % (problem_size_.D * problem_size_.H * problem_size_.W);
//
//
// offset_d[s] = residual / (problem_size_.H * problem_size_.W);
// residual = residual % (problem_size_.H * problem_size_.W);
//
// offset_h[s] = residual / problem_size_.W;
// offset_w[s] = residual % problem_size_.W;
//
int residual;
// input: (ndhw offset) output: (n offset and resudial (dhw offset))
params_.dhw_divmod(offset_n[s], residual, offset_ndhw);
// input: (dhw offset) output: (d offset and resudial (hw))
params_.hw_divmod(offset_d[s], residual, residual);
// input: (hw offset) output: (h offset and resudial (w offset))
params_.w_divmod(offset_h[s], offset_w[s], residual);
TensorCoord coord = at_(offset_n[s], offset_d[s], offset_h[s], offset_w[s], 0, 0, 0);
pointer_[s] += params_.layout(coord) * sizeof_bits<Element>::value / 8;
}
clear_mask();
CUTLASS_PRAGMA_NO_UNROLL
for (int t = 0; t < problem_size_.T; ++t) {
CUTLASS_PRAGMA_UNROLL
for (int s_idx = 0; s_idx < ThreadMap::Iterations::kStrided; ++s_idx) {
int t_ = t;
if (problem_size_.mode == Mode::kConvolution) {
t_ = problem_size_.T - 1 - t;
}
int z = offset_d[s_idx] + problem_size_.pad_d - t_ * problem_size_.dilation_d;
bool pred = (offset_n[s_idx] < problem_size_.N && z >= 0 && z < problem_size_.Z);
masks_[s_idx][0] |= (pred << t);
}
}
CUTLASS_PRAGMA_NO_UNROLL
for (int r = 0; r < problem_size_.R; ++r) {
CUTLASS_PRAGMA_UNROLL
for (int s_idx = 0; s_idx < ThreadMap::Iterations::kStrided; ++s_idx) {
int r_ = r;
if (problem_size_.mode == Mode::kConvolution) {
r_ = problem_size_.R - 1 - r;
}
int p = offset_h[s_idx] + problem_size_.pad_h - r_ * problem_size_.dilation_h;
bool pred = (p >= 0 && p < problem_size_.P);
masks_[s_idx][1] |= (pred << r);
}
}
CUTLASS_PRAGMA_NO_UNROLL
for (int s = 0; s < problem_size_.S; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int s_idx = 0; s_idx < ThreadMap::Iterations::kStrided; ++s_idx) {
int s_ = s;
if (problem_size_.mode == Mode::kConvolution) {
s_ = problem_size_.S - 1 - s;
}
int q = offset_w[s_idx] + problem_size_.pad_w - s_ * problem_size_.dilation_w;
bool pred = (q >= 0 && q < problem_size_.Q);
masks_[s_idx][2] |= (pred << s);
}
}
if (filter_k_ >= problem_size.K) {
clear_mask();
}
set_iteration_index(0);
}
CUTLASS_HOST_DEVICE
static Params getParams(Conv3dProblemSize const &problem_size, Layout const &layout) {
return Params(problem_size,
layout,
sizeof_bits<Element>::value,
{Shape::kRow, Shape::kColumn},
ThreadMap::kThreads,
ThreadMap::kElementsPerAccess,
{ThreadMap::Iterations::kContiguous, ThreadMap::Iterations::kStrided},
{ThreadMap::Delta::kContiguous, ThreadMap::Delta::kStrided});
}
private:
/// Returns the coordinate in the output gradient tensor dy that is correspoinding to
// activation ndhw and filter position k, t, r, s
CUTLASS_HOST_DEVICE
TensorCoord at_(int n, int d, int h, int w, int t, int r, int s) const {
if (problem_size_.mode == Mode::kConvolution) {
t = problem_size_.T - 1 - t;
r = problem_size_.R - 1 - r;
s = problem_size_.S - 1 - s;
}
int z = d + problem_size_.pad_d - t * problem_size_.dilation_d;
int p = h + problem_size_.pad_h - r * problem_size_.dilation_h;
int q = w + problem_size_.pad_w - s * problem_size_.dilation_w;
return TensorCoord(n, z, p, q, filter_k_);
}
/// Adds a pointer offset in units of element
CUTLASS_HOST_DEVICE
void add_byte_offset_(LongIndex byte_offset) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
pointer_[s] += byte_offset;
}
}
/// Clears the predicates
CUTLASS_HOST_DEVICE
void clear_mask_(bool clear) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
// We are using inline PTX assembly here to avoid an CUDA C++ compilation
// artifact in which control flow instructions are generated. Instead, our
// intent is to predicate the mov instructions.
#if defined(__CUDA_ARCH__)
asm volatile(
"{\n"
" .reg .pred p;\n"
" .reg .u32 m;"
" mov.u32 m, %2;"
" setp.ne.b32 p, %1, 0;\n"
" @p mov.u32 m, 0;\n"
" mov.u32 %0, m;\n"
"}\n"
:
"=r"(masks_[s][0])
:
"r"((int)clear),
"r"(masks_[s][0])
);
asm volatile(
"{\n"
" .reg .pred p;\n"
" .reg .u32 m;"
" mov.u32 m, %2;"
" setp.ne.b32 p, %1, 0;\n"
" @p mov.u32 m, 0;\n"
" mov.u32 %0, m;\n"
"}\n"
:
"=r"(masks_[s][1])
:
"r"((int)clear),
"r"(masks_[s][1])
);
asm volatile(
"{\n"
" .reg .pred p;\n"
" .reg .u32 m;"
" mov.u32 m, %2;"
" setp.ne.b32 p, %1, 0;\n"
" @p mov.u32 m, 0;\n"
" mov.u32 %0, m;\n"
"}\n"
:
"=r"(masks_[s][2])
:
"r"((int)clear),
"r"(masks_[s][2])
);
#else
if (clear) {
masks_[s][0] = 0;
masks_[s][1] = 0;
masks_[s][2] = 0;
}
#endif
}
}
public:
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
add_byte_offset_(pointer_offset * sizeof_bits<Element>::value / 8);
}
CUTLASS_HOST_DEVICE
void advance() {
int next_idx = 0;
// moves to the next tile
++filter_s_;
if (filter_s_ == problem_size_.S) {
filter_s_ = 0;
++filter_r_;
next_idx = 1;
if (filter_r_ == problem_size_.R) {
filter_r_ = 0;
++filter_t_;
if (filter_t_ < problem_size_.T) {
next_idx = 2;
}
else {
filter_t_ = 0;
next_idx = 3;
}
}
}
add_byte_offset_(params_.inc_next[next_idx]);
if (next_idx == 3) {
filter_k_ += params_.filter_k_delta;
}
clear_mask_(filter_k_ >= problem_size_.K);
}
/// Clears the predicates
CUTLASS_HOST_DEVICE
void clear_mask() {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
masks_[s][0] = Mask(0);
masks_[s][1] = Mask(0);
masks_[s][2] = Mask(0);
}
}
CUTLASS_HOST_DEVICE
bool valid() {
return
(masks_[iteration_strided_][0] & (Index(1) << filter_t_)) &&
(masks_[iteration_strided_][1] & (Index(1) << filter_r_)) &&
(masks_[iteration_strided_][2] & (Index(1) << filter_s_));
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
return reinterpret_cast<AccessType const *>(pointer_[iteration_strided_]);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv3dDgradOutputGradientTileAccessIteratorOptimized &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(ConvProblemSize const &problem_size) {
// This is specialized for unit stride
if (problem_size.stride() != Coord3D({1, 1, 1})) {
return Status::kErrorNotSupported;
}
// check alignment constraint on iterator's contiguous dimension
if (problem_size.K % (128/sizeof_bits<Element>::value)) {
return Status::kErrorNotSupported;
}
// Limit on filter size
if (problem_size.T > 32 || problem_size.R > 32 || problem_size.S > 32) {
return Status::kErrorNotSupported;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/threadblock/conv3d_dgrad_output_gradient_tile_access_iterator_optimized.h/0 | {
"file_path": "include/cutlass/conv/threadblock/conv3d_dgrad_output_gradient_tile_access_iterator_optimized.h",
"repo_id": "include",
"token_count": 6116
} | 19 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/numeric_conversion.h"
#include "cutlass/epilogue/thread/scale_type.h"
//////////////////////////////////////////////////////////////////////////////
namespace cutlass::epilogue {
//////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
//
// Builder Epilogue Schedules
//
//////////////////////////////////////////////////////////////////////////////
struct NoSmemWarpSpecialized {};
struct PtrArrayNoSmemWarpSpecialized {};
struct TmaWarpSpecialized {};
struct TmaWarpSpecializedCooperative {};
// DEPRECATED schedules, will be removed in next release
struct TmaWarpSpecializedElementwiseBase : public TmaWarpSpecialized {};
struct TmaWarpSpecializedCooperativeElementwiseBase : public TmaWarpSpecializedCooperative {};
template <
template <class T> class ActivationFunctor_,
thread::ScaleType::Kind Scale_ = thread::ScaleType::Default,
FloatRoundStyle Round_ = FloatRoundStyle::round_to_nearest
>
struct [[deprecated("Use TmaWarpSpecialized with fusion::LinCombEltAct instead")]]
TmaWarpSpecializedElementwise : public TmaWarpSpecializedElementwiseBase {
template <class T>
using ActivationFunctor = ActivationFunctor_<T>;
static constexpr thread::ScaleType::Kind Scale = Scale_;
static constexpr FloatRoundStyle Round = Round_;
};
template <
template <class T> class ActivationFunctor_,
thread::ScaleType::Kind Scale_ = thread::ScaleType::Default,
FloatRoundStyle Round_ = FloatRoundStyle::round_to_nearest
>
struct [[deprecated("Use TmaWarpSpecializedCooperative with fusion::LinCombEltAct instead")]]
TmaWarpSpecializedCooperativeElementwise : public TmaWarpSpecializedCooperativeElementwiseBase {
template <class T>
using ActivationFunctor = ActivationFunctor_<T>;
static constexpr thread::ScaleType::Kind Scale = Scale_;
static constexpr FloatRoundStyle Round = Round_;
};
struct TmaWarpSpecializedBiasElementwiseBase : public TmaWarpSpecialized{};
struct TmaWarpSpecializedCooperativeBiasElementwiseBase : public TmaWarpSpecializedCooperative {};
template <
template <class T> class ActivationFunctor_,
class ElementT_,
template <class T> class BiasOp_,
bool StoreT_,
class ElementBias_
>
struct [[deprecated("Use TmaWarpSpecialized with fusion::LinCombPerRowBiasEltActAux instead")]]
TmaWarpSpecializedBiasElementwise : public TmaWarpSpecializedBiasElementwiseBase {
template <class T>
using ActivationFunctor = ActivationFunctor_<T>;
using ElementT = ElementT_;
template <class T>
using BiasOp = BiasOp_<T>;
static constexpr bool StoreT = StoreT_;
using ElementBias = ElementBias_;
};
template <
template <class T> class ActivationFunctor_,
class ElementT_,
template <class T> class BiasOp_,
bool StoreT_,
class ElementBias_
>
struct [[deprecated("Use TmaWarpSpecializedCooperative with fusion::LinCombPerRowBiasEltActAux instead")]]
TmaWarpSpecializedCooperativeBiasElementwise : public TmaWarpSpecializedCooperativeBiasElementwiseBase {
template <class T>
using ActivationFunctor = ActivationFunctor_<T>;
using ElementT = ElementT_;
template <class T>
using BiasOp = BiasOp_<T>;
static constexpr bool StoreT = StoreT_;
using ElementBias = ElementBias_;
};
//////////////////////////////////////////////////////////////////////////////
//
// Collective Dispatch Policies
//
//////////////////////////////////////////////////////////////////////////////
template<
int StagesC_,
int StagesD_,
int FragmentSize_,
bool ReuseSmemC_,
bool DelayTmaStore_
>
struct Sm90TmaWarpSpecialized {
constexpr static int StagesC = StagesC_;
constexpr static int StagesD = StagesD_;
constexpr static int FragmentSize = FragmentSize_;
constexpr static bool ReuseSmemC = ReuseSmemC_;
constexpr static bool DelayTmaStore = DelayTmaStore_;
};
// DEPRECATED policies, will be removed in next release
template<
int StagesC_,
int StagesD_,
int FragmentSize_ = 2
>
struct Sm90TmaWarpSpecializedBiasElementwise {
constexpr static int StagesC = StagesC_;
constexpr static int StagesD = StagesD_;
constexpr static int FragmentSize = FragmentSize_;
};
//////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::epilogue
| include/cutlass/epilogue/dispatch_policy.hpp/0 | {
"file_path": "include/cutlass/epilogue/dispatch_policy.hpp",
"repo_id": "include",
"token_count": 1702
} | 20 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Functor performing elementwise operations used by epilogues.
*/
#pragma once
/////////////////////////////////////////////////////////////////////////////////////////////////
#include "cutlass/epilogue/threadblock/epilogue_base.h"
////////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
namespace detail {
struct EVT2xBase { };
template <class T>
static constexpr bool is_2x_evt_v = platform::is_base_of<EVT2xBase, T>::value;
} // namespace detail
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Epilogue operator
template <
typename DefaultEpilogue, ///< Default Epilogue Descriptor
typename FusionCallbacks_, ///< The called fusion callbacks
int Stages = 2, ///< Software pipeline stages for epilogue
int IterationsUnroll = true ///< Used to reduce binary size when epilogue op is large
>
class EpilogueWithVisitorCallbacks :
public EpilogueBase<
typename DefaultEpilogue::Shape,
typename DefaultEpilogue::WarpMmaOperator::Shape,
DefaultEpilogue::kPartitionsK,
typename DefaultEpilogue::AccumulatorFragmentIterator,
typename DefaultEpilogue::WarpTileIterator,
typename DefaultEpilogue::Padding,
DefaultEpilogue::kFragmentsPerIteration>,
public EpilogueBaseStreamK<
typename DefaultEpilogue::Shape,
DefaultEpilogue::kPartitionsK,
typename DefaultEpilogue::WarpMmaOperator,
typename DefaultEpilogue::AccumulatorFragmentIterator>,
public detail::EVT2xBase
{
public:
static_assert(Stages <= 2, "Sm80 EVT only support upto 2 Stages.");
// Whether the epilogue is pipelined
static bool constexpr Pipelined = Stages > 1;
using FusionCallbacks = FusionCallbacks_;
using OutputTileIterator = typename DefaultEpilogue::OutputTileIterator;
// Number of epilogue iterations.
// Each iteration processes a 8xThreadblockTile::kN output tile
static const int kIterations = OutputTileIterator::kIterations;
using Base = EpilogueBase<
typename DefaultEpilogue::Shape,
typename DefaultEpilogue::WarpMmaOperator::Shape,
DefaultEpilogue::kPartitionsK,
typename DefaultEpilogue::AccumulatorFragmentIterator,
typename DefaultEpilogue::WarpTileIterator,
typename DefaultEpilogue::Padding,
DefaultEpilogue::kFragmentsPerIteration>;
using BaseStreamK = EpilogueBaseStreamK<
typename DefaultEpilogue::Shape,
DefaultEpilogue::kPartitionsK,
typename DefaultEpilogue::WarpMmaOperator,
typename DefaultEpilogue::AccumulatorFragmentIterator>;
static int const kPartitionsK = DefaultEpilogue::kPartitionsK;
using AccumulatorFragmentIterator = typename DefaultEpilogue::AccumulatorFragmentIterator;
using WarpTileIterator = typename DefaultEpilogue::WarpTileIterator;
using SharedLoadIterator = typename DefaultEpilogue::SharedLoadIterator;
/// The complete warp-level accumulator tile
using AccumulatorTile = typename Base::AccumulatorTile;
/// Accumulator element
using ElementAccumulator = typename WarpTileIterator::Element;
struct OutputOp{
using ElementAccumulator = ElementAccumulator;
using Params = typename FusionCallbacks::Arguments;
};
/// Fragment type used by the accumulator tile's fragment iterator
using AccumulatorFragment = typename AccumulatorFragmentIterator::Fragment;
// Output access size
static int const kElementsPerAccess = DefaultEpilogue::kElementsPerAccess;
/// Array type used by output functor
using AccumulatorAccessType = Array<
typename WarpTileIterator::Element, kElementsPerAccess>;
static int constexpr kSmemTiles = Base::kFragmentsPerIteration > 1 ? Base::kFragmentsPerIteration : kPartitionsK;
static int constexpr kSmemPointerOffset = Base::SharedStorage::StorageShape::kCount / kSmemTiles;
using Params = typename FusionCallbacks::Params;
static size_t constexpr kSmemStageOffset = sizeof(Base::SharedStorage) / sizeof(ElementAccumulator);
static int constexpr kAccumulatorFragmentCount = AccumulatorTile::kElements / (kIterations * AccumulatorAccessType::kElements) / kPartitionsK;
struct SharedStorage {
typename Base::SharedStorage acc_smem[Stages];
typename FusionCallbacks::SharedStorage callback_smem;
};
private:
/// Loads fragment from shared memory aligned with output tensor
SharedLoadIterator shared_load_iterator_;
FusionCallbacks fusion_callbacks;
public:
/// Constructor
CUTLASS_DEVICE
EpilogueWithVisitorCallbacks(
const Params ¶ms_callbacks, ///< Epilogue Visitor params
SharedStorage &shared_storage, ///< Shared storage object
int thread_idx, ///< ID of a thread within the threadblock
int warp_idx, ///< ID of warp within threadblock
int lane_idx ///< Id of thread within warp
):
Base(shared_storage.acc_smem[0], thread_idx, warp_idx, lane_idx),
BaseStreamK(thread_idx),
shared_load_iterator_(shared_storage.acc_smem[0].reference(), thread_idx),
fusion_callbacks(params_callbacks, shared_storage.callback_smem)
{ }
/// Aggregates the accumulator sets shared by peer blocks in the global workspace,
/// performing epilogue computations, writing to output
template <class ProblemShape>
CUTLASS_DEVICE
void reduce(
int peer_idx_begin,
int peer_idx_end,
int reduce_fragment_idx,
void *element_workspace,
cutlass::gemm::GemmCoord threadblock_tile_offset,
ProblemShape problem_shape,
int thread_idx)
{
auto callbacks = fusion_callbacks.get_callbacks(
threadblock_tile_offset,
thread_idx,
problem_shape
);
callbacks.begin_epilogue();
// Reduce peer accumulator fragments into one fragment
AccumulatorFragment accum_fragment;
BaseStreamK::reduce(accum_fragment, peer_idx_begin, peer_idx_end, reduce_fragment_idx, element_workspace);
// Store fragment to shared memory
this->warp_tile_iterator_.store(accum_fragment);
__syncthreads();
callbacks.begin_step(reduce_fragment_idx);
// Load fragment from shared memory
typename SharedLoadIterator::Fragment aligned_accum_fragment;
shared_load_iterator_.load(aligned_accum_fragment);
// Add fragments shared by other k partitions
if (kPartitionsK > 1)
{
plus <typename SharedLoadIterator::Fragment> add_fragments;
CUTLASS_PRAGMA_UNROLL
for ( int i = 1; i < kPartitionsK; ++i) {
typename SharedLoadIterator::Fragment aligned_addend_fragment;
shared_load_iterator_.add_pointer_offset(kSmemPointerOffset);
shared_load_iterator_.load(aligned_addend_fragment);
aligned_accum_fragment = add_fragments(aligned_accum_fragment, aligned_addend_fragment);
}
}
//
// Iterate over output fragment
//
AccumulatorAccessType const *accum_frag_ptr =
reinterpret_cast<AccumulatorAccessType const*>(&aligned_accum_fragment);
CUTLASS_PRAGMA_UNROLL
for (int idx = 0; idx < kAccumulatorFragmentCount; ++idx) {
int row_idx = idx / SharedLoadIterator::ThreadMap::Iterations::kColumn;
int col_idx = idx % SharedLoadIterator::ThreadMap::Iterations::kColumn;
// Start a new row of the output fragment
if (!col_idx) {
callbacks.begin_row(row_idx);
}
callbacks.visit(
reduce_fragment_idx,
row_idx,
col_idx,
idx,
accum_frag_ptr[idx]
);
// End the row of the output fragment
if (col_idx + 1 == SharedLoadIterator::ThreadMap::Iterations::kColumn) {
callbacks.end_row(row_idx);
}
}
callbacks.end_step(reduce_fragment_idx);
callbacks.end_epilogue();
}
/// Streams the result to global memory
template <class ProblemShape>
CUTLASS_DEVICE
void operator()(
AccumulatorTile const &accumulators,
cutlass::gemm::GemmCoord threadblock_tile_offset,
ProblemShape problem_shape,
int thread_idx
) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
auto callbacks = fusion_callbacks.get_callbacks(
threadblock_tile_offset,
thread_idx,
problem_shape
);
callbacks.begin_epilogue();
//
// Iterator over warp-level accumulator fragment
//
AccumulatorFragmentIterator accum_fragment_iterator(accumulators);
//
// Iterate over accumulator tile
//
if constexpr(Pipelined){
__syncthreads();
//
// Pipeline Prologue
//
size_t warp_iterator_offset = kSmemStageOffset;
size_t smem_iterator_offset = kSmemStageOffset;
callbacks.begin_step(0);
acc2smem_source_needed<cutlass::make_index_sequence<kIterations>>::push(
0, accum_fragment_iterator, this->warp_tile_iterator_);
this->warp_tile_iterator_.add_pointer_offset(warp_iterator_offset);
warp_iterator_offset = -warp_iterator_offset;
//
// Pipeline Loop
//
#pragma unroll(IterationsUnroll ? kIterations : 1)
for (int iter_idx = 1; iter_idx < kIterations + 1; ++iter_idx) {
__syncthreads();
// Skip the load for epilogue
if (iter_idx < kIterations) {
callbacks.begin_step(iter_idx);
acc2smem_source_needed<cutlass::make_index_sequence<kIterations>>::push(
iter_idx, accum_fragment_iterator, this->warp_tile_iterator_);
this->warp_tile_iterator_.add_pointer_offset(warp_iterator_offset);
warp_iterator_offset = -warp_iterator_offset;
}
typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK];
shared_load_iterator_.load(aligned_accum_fragment[0]);
// If the number of k-slices is > 1 - perform a reduction amongst the k-slices
if (kPartitionsK > 1) {
plus <typename SharedLoadIterator::Fragment> add_fragments;
CUTLASS_PRAGMA_UNROLL
for ( int i = 1; i < kPartitionsK; ++i) {
shared_load_iterator_.add_pointer_offset(kSmemPointerOffset);
shared_load_iterator_.load(aligned_accum_fragment[i]);
aligned_accum_fragment[0] = add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]);
}
shared_load_iterator_.add_pointer_offset((1 - kPartitionsK) * kSmemPointerOffset);
}
shared_load_iterator_.add_pointer_offset(smem_iterator_offset);
smem_iterator_offset = -smem_iterator_offset;
//
// Iterate over output fragments
//
AccumulatorAccessType const *accum_frag_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment);
CUTLASS_PRAGMA_UNROLL
for (int idx = 0; idx < kAccumulatorFragmentCount; ++idx) {
int row_idx = idx / SharedLoadIterator::ThreadMap::Iterations::kColumn;
int col_idx = idx % SharedLoadIterator::ThreadMap::Iterations::kColumn;
// Start a new row of the output fragment
if (!col_idx) {
callbacks.begin_row(row_idx);
}
callbacks.visit(
iter_idx-1,
row_idx,
col_idx,
idx,
accum_frag_ptr[idx]
);
// End the row of the output fragment
if (col_idx + 1 == SharedLoadIterator::ThreadMap::Iterations::kColumn) {
callbacks.end_row(row_idx);
}
}
//
// Conclude the step
//
callbacks.end_step(iter_idx-1);
}
} else {
#pragma unroll(IterationsUnroll ? kIterations : 1)
for (int iter_idx = 0; iter_idx < kIterations; ++iter_idx) {
//
// Load the source
//
callbacks.begin_step(iter_idx);
//
// Convert and store fragment
//
__syncthreads();
acc2smem_source_needed<cutlass::make_index_sequence<kIterations>>::push(
iter_idx, accum_fragment_iterator, this->warp_tile_iterator_);
__syncthreads();
//
// Load fragments from shared memory
//
typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK];
shared_load_iterator_.load(aligned_accum_fragment[0]);
// If the number of k-slices is > 1 - perform a reduction amongst the k-slices
if (kPartitionsK > 1) {
plus <typename SharedLoadIterator::Fragment> add_fragments;
CUTLASS_PRAGMA_UNROLL
for ( int i = 1; i < kPartitionsK; ++i) {
shared_load_iterator_.add_pointer_offset(kSmemPointerOffset);
shared_load_iterator_.load(aligned_accum_fragment[i]);
aligned_accum_fragment[0] = add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]);
}
shared_load_iterator_.add_pointer_offset((1 - kPartitionsK) * kSmemPointerOffset);
}
//
// Iterate over output fragments
//
AccumulatorAccessType const *accum_frag_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment[0]);
CUTLASS_PRAGMA_UNROLL
for (int idx = 0; idx < kAccumulatorFragmentCount; ++idx) {
int row_idx = idx / SharedLoadIterator::ThreadMap::Iterations::kColumn;
int col_idx = idx % SharedLoadIterator::ThreadMap::Iterations::kColumn;
// Start a new row of the output fragment
if (!col_idx) {
callbacks.begin_row(row_idx);
}
callbacks.visit(
iter_idx,
row_idx,
col_idx,
idx,
accum_frag_ptr[idx]
);
// End the row of the output fragment
if (col_idx + 1 == SharedLoadIterator::ThreadMap::Iterations::kColumn) {
callbacks.end_row(row_idx);
}
}
//
// Conclude the step
//
callbacks.end_step(iter_idx);
}
}
callbacks.end_epilogue();
}
private:
template<class Seq>
struct acc2smem_source_needed;
template <size_t... Seq>
struct acc2smem_source_needed<cutlass::index_sequence<Seq...>> {
template<int Advance>
CUTLASS_DEVICE
static void helper(AccumulatorFragmentIterator accum_fragment_iterator,
WarpTileIterator &warp_tile_iterator) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Advance; i++) {
++accum_fragment_iterator;
}
typename AccumulatorFragmentIterator::Fragment accum_fragment;
accum_fragment_iterator.load(accum_fragment);
warp_tile_iterator.store(accum_fragment);
}
CUTLASS_DEVICE
static void push(size_t pos,
AccumulatorFragmentIterator const &iterator_begin,
WarpTileIterator &warp_tile_iterator) {
int dummy[] = {(pos == Seq) && (helper<Seq>(iterator_begin, warp_tile_iterator), 0)...};
}
};
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/epilogue_with_visitor_callbacks.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/epilogue_with_visitor_callbacks.h",
"repo_id": "include",
"token_count": 6585
} | 21 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/conv2d_problem_size.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
struct OutputTileShapeDesc {
int column;
int row;
int group;
int cluster;
int tile;
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
OutputTileShapeDesc(): column(0), row(0), group(0), cluster(0), tile(0) { }
/// Ctor
CUTLASS_HOST_DEVICE
OutputTileShapeDesc(
int column_,
int row_,
int group_,
int cluster_,
int tile_
):
column(column_),
row(row_),
group(group_),
cluster(cluster_),
tile(tile_) { }
/// Total number of points in the 5D space
CUTLASS_HOST_DEVICE
int count() const {
return column * row * group * cluster * tile;
}
#if 0
CUTLASS_HOST_DEVICE
void print() const {
printf("{%d, %d, %d, %d, %d}", column, row, group, cluster, tile);
}
#endif
};
/// Helper template to construct an OutputTileShapeDesc from a OutputTileShape template.
template <typename Shape>
CUTLASS_HOST_DEVICE
OutputTileShapeDesc make_OutputTileShapeDesc() {
return OutputTileShapeDesc(
Shape::kColumn,
Shape::kRow,
Shape::kGroup,
Shape::kCluster,
Shape::kTile
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Thread map description
struct OutputTileThreadMapDesc {
int threads;
int elements_per_access;
OutputTileShapeDesc shape;
OutputTileShapeDesc iterations;
OutputTileShapeDesc delta;
OutputTileShapeDesc count;
//
// Methods
//
CUTLASS_HOST_DEVICE
OutputTileThreadMapDesc() { }
CUTLASS_HOST_DEVICE
OutputTileThreadMapDesc(
int threads_,
int elements_per_access_,
OutputTileShapeDesc shape_,
OutputTileShapeDesc iterations_,
OutputTileShapeDesc delta_,
OutputTileShapeDesc count_
):
threads(threads_),
elements_per_access(elements_per_access_),
shape(shape_),
iterations(iterations_),
delta(delta_),
count(count_)
{
}
};
/// Helper template to construct an OutputTileShapeDesc from a OutputTileThreadMap template.
template <typename ThreadMap>
CUTLASS_HOST_DEVICE
OutputTileThreadMapDesc make_OutputTileThreadMapDesc() {
return OutputTileThreadMapDesc(
ThreadMap::kThreads,
ThreadMap::kElementsPerAccess,
make_OutputTileShapeDesc<typename ThreadMap::Shape>(),
make_OutputTileShapeDesc<typename ThreadMap::Iterations>(),
make_OutputTileShapeDesc<typename ThreadMap::Delta>(),
make_OutputTileShapeDesc<typename ThreadMap::Count>()
);
}
///////////////////////////////////////////////////////////////////////////////
//
// Parameters struct for PredicatedTileIterator
//
struct PredicatedTileIteratorParams {
using Index = int32_t;
using LongIndex = int64_t;
//
// Data members
//
LongIndex stride; ///< stride in bytes between rows
LongIndex increment_row; ///< increment quantity (in bytes) to advance when moving between rows
LongIndex increment_group; ///< increment quantity (in bytes) to advance when moving to the next group
LongIndex increment_cluster; ///< increment quantity (in bytes) to advance when moving to the next cluster
LongIndex advance_row; ///< amount to add to move to the next 'row' position
LongIndex advance_group; ///< amount to add to move to the next 'group' position
LongIndex advance_cluster; ///< amount to add to move to the next 'cluster' position
LongIndex advance_tile; ///< amount to add to move to the next 'tile'
//
// Methods
//
CUTLASS_HOST_DEVICE
Status initialize(LongIndex stride_, OutputTileThreadMapDesc thread_map) {
stride = stride_;
increment_row = stride * thread_map.delta.row;
increment_group = stride * thread_map.delta.group
- stride * thread_map.delta.row * (thread_map.iterations.row - 1);
increment_cluster = stride * thread_map.delta.cluster
- stride * thread_map.delta.group * (thread_map.iterations.group - 1)
- stride * thread_map.delta.row * (thread_map.iterations.row - 1);
advance_row = stride * thread_map.shape.row;
advance_group =
stride *
(thread_map.shape.group - 1) * thread_map.shape.row * thread_map.count.row;
advance_cluster =
stride *
thread_map.count.group *
thread_map.shape.group *
thread_map.count.row *
thread_map.shape.row;
advance_tile =
stride *
thread_map.shape.group *
thread_map.shape.row *
thread_map.shape.cluster *
thread_map.shape.tile;
return Status::kSuccess;
}
CUTLASS_HOST_DEVICE
Status initialize(Index stride_, OutputTileThreadMapDesc thread_map) {
return initialize(LongIndex(stride_), thread_map);
}
CUTLASS_HOST_DEVICE
PredicatedTileIteratorParams() {
initialize(LongIndex(0), OutputTileThreadMapDesc());
}
CUTLASS_HOST_DEVICE
PredicatedTileIteratorParams(Index stride, OutputTileThreadMapDesc thread_map) {
initialize(stride, thread_map);
}
CUTLASS_HOST_DEVICE
PredicatedTileIteratorParams(LongIndex stride, OutputTileThreadMapDesc thread_map) {
initialize(stride, thread_map);
}
};
///////////////////////////////////////////////////////////////////////////////
//
// Parameters struct for PredicatedTileIteratorDirect2dConv
//
struct PredicatedTileIteratorDirect2dConvParams{
using Index = int32_t;
using LongIndex = int64_t;
//
// Data members
//
FastDivmod pq_divmod;
FastDivmod q_divmod;
LongIndex stride;
LongIndex stride_n;
LongIndex stride_p;
int N;
int P;
int Q;
//
// Methods
//
CUTLASS_HOST_DEVICE
Status initialize(LongIndex stride_,
cutlass::conv::Conv2dProblemSize const &problem_size,
MatrixCoord threadblock_output_shape) {
stride = stride_; // The stride per row of output tensor (bytes)
stride_n = problem_size.P * problem_size.Q;
stride_p = problem_size.Q ;
N = problem_size.N;
P = problem_size.P;
Q = problem_size.Q;
// Fastdivmod for output O, P, Q
if(threadblock_output_shape.row() != 0 && threadblock_output_shape.column() !=0 ){
// MSVC emits a "potential divide by 0" warning as error
// if the code just divides without a check and substitution.
CUTLASS_ASSERT(threadblock_output_shape.row() != 0);
const auto row_denom = threadblock_output_shape.row() != 0 ?
threadblock_output_shape.row() : cutlass::MatrixCoord::Index(1);
int tiles_p =
(problem_size.P + (threadblock_output_shape.row() - 1)) / row_denom;
CUTLASS_ASSERT(threadblock_output_shape.column() != 0);
const auto col_denom = threadblock_output_shape.column() != 0 ?
threadblock_output_shape.column() : cutlass::MatrixCoord::Index(1);
int tiles_q = (problem_size.Q + (threadblock_output_shape.column() - 1)) /
col_denom;
pq_divmod = FastDivmod(tiles_p * tiles_q);
q_divmod = FastDivmod(tiles_q);
}
return Status::kSuccess;
}
CUTLASS_HOST_DEVICE
Status initialize(
Index stride_,
cutlass::conv::Conv2dProblemSize const &problem_size = cutlass::conv::Conv2dProblemSize(),
MatrixCoord threadblock_output_shape = MatrixCoord()) {
return initialize(LongIndex(stride_), problem_size, threadblock_output_shape);
}
CUTLASS_HOST_DEVICE
PredicatedTileIteratorDirect2dConvParams() { initialize(LongIndex(0)); }
CUTLASS_HOST_DEVICE
PredicatedTileIteratorDirect2dConvParams(Index stride,
cutlass::conv::Conv2dProblemSize const &problem_size,
MatrixCoord threadblock_output_shape) {
initialize(stride, problem_size, threadblock_output_shape);
}
CUTLASS_HOST_DEVICE
PredicatedTileIteratorDirect2dConvParams(LongIndex stride,
cutlass::conv::Conv2dProblemSize const &problem_size,
MatrixCoord threadblock_output_shape) {
initialize(stride, problem_size, threadblock_output_shape);
}
};
///////////////////////////////////////////////////////////////////////////////
// InterleavedPredicatedTileIterator
///////////////////////////////////////////////////////////////////////////////
/// Predicated tile access iterator descriptor object containing template dependent state
struct InterleavedPredicatedTileIteratorDesc {
int element_size_bits;
int elements_per_access;
int threadmap_warp_size;
layout::PitchLinearCoord threadmap_iterations;
layout::PitchLinearCoord threadmap_delta;
//
// Methods
//
CUTLASS_HOST_DEVICE
InterleavedPredicatedTileIteratorDesc() { }
CUTLASS_HOST_DEVICE
InterleavedPredicatedTileIteratorDesc(
int element_size_bits_,
int elements_per_access_,
int threadmap_warp_size_,
layout::PitchLinearCoord threadmap_iterations_,
layout::PitchLinearCoord threadmap_delta_
):
element_size_bits(element_size_bits_),
elements_per_access(elements_per_access_),
threadmap_warp_size(threadmap_warp_size_),
threadmap_iterations(threadmap_iterations_),
threadmap_delta(threadmap_delta_) { }
};
//
// Parameters struct InterleavedPredicatedTileIterator
//
struct InterleavedPredicatedTileIteratorParams {
using Index = int32_t;
using LongIndex = int64_t;
//
// Data members
//
LongIndex stride; ///< stride in bytes between rows
LongIndex advance_row; ///< amount to add to move to the next 'row' position
LongIndex advance_column; ///< amount to add to move to the next 'column' position
//
// Methods
//
CUTLASS_HOST_DEVICE
Status initialize(LongIndex stride_, InterleavedPredicatedTileIteratorDesc desc) {
stride = stride_;
advance_row = desc.threadmap_delta.contiguous() * desc.element_size_bits / 8;
advance_column = stride_ - desc.threadmap_iterations.contiguous() *
desc.elements_per_access *
desc.element_size_bits *
desc.threadmap_warp_size / 8;
return Status::kSuccess;
}
CUTLASS_HOST_DEVICE
InterleavedPredicatedTileIteratorParams() {
initialize(LongIndex(0), InterleavedPredicatedTileIteratorDesc());
}
CUTLASS_HOST_DEVICE
InterleavedPredicatedTileIteratorParams(Index stride, InterleavedPredicatedTileIteratorDesc desc) {
initialize(stride, desc);
}
CUTLASS_HOST_DEVICE
InterleavedPredicatedTileIteratorParams(LongIndex stride, InterleavedPredicatedTileIteratorDesc desc) {
initialize(stride, desc);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Helper template to construct an OutputTileShapeDesc from a OutputTileThreadMap template.
template <typename Element, typename ThreadMap>
CUTLASS_HOST_DEVICE
InterleavedPredicatedTileIteratorDesc make_InterleavedPredicatedTileIteratorDesc() {
return InterleavedPredicatedTileIteratorDesc(
sizeof_bits<Element>::value,
ThreadMap::kElementsPerAccess,
ThreadMap::kWarpSize,
{ThreadMap::Iterations::kContiguous, ThreadMap::Iterations::kStrided},
{ThreadMap::Delta::kContiguous, ThreadMap::Delta::kStrided}
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Helper template to construct an MakePredicatedTileIteratorDesc from a template
// dependent state
template <typename Element, typename Layout,
typename ThreadMap>
struct MakePredicatedTileIteratorDesc;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator for layout::RowMajor output data.
template <typename Element, typename ThreadMap>
struct MakePredicatedTileIteratorDesc <
Element, layout::RowMajor, ThreadMap> {
CUTLASS_HOST_DEVICE
OutputTileThreadMapDesc operator()() {
return make_OutputTileThreadMapDesc<ThreadMap>();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator for layout::ColumnMajorInterleaved<InterleavedN> output data.
template <typename Element, typename ThreadMap, int InterleavedN>
struct MakePredicatedTileIteratorDesc <
Element, layout::ColumnMajorInterleaved<InterleavedN>, ThreadMap> {
CUTLASS_HOST_DEVICE
InterleavedPredicatedTileIteratorDesc operator()() {
return make_InterleavedPredicatedTileIteratorDesc<Element, ThreadMap>();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/predicated_tile_iterator_params.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/predicated_tile_iterator_params.h",
"repo_id": "include",
"token_count": 4942
} | 22 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/epilogue/warp/tensor_op_policy.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
// This is an optimization available on CUDA 11.2 and beyond that eliminates branches in the epilogue.
#define CUTLASS_EPILOGUE_WARP_TILE_ITERATOR_TENSOR_OP_MIXED_OPTIMIZATION_ENABLED ((__CUDACC_VER_MAJOR__ * 10 + __CUDACC_VER_MINOR__) >= 112)
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template for reading and writing tiles of accumulators to shared memory. This is optimized
/// for mixed-precision epilogues in which the accumulators are 32b in width, but the output
/// data type is smaller.
template <
typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape)
typename OperatorShape_, ///< matrix multiply operation shape (concept: gemm::GemmShape)
typename Element_, ///< data type of accumulator element
int ElementSizeBits, ///< Size of accumulator element in bits
int OutputSizeBits, ///< Size of output element in bits
int OutputElementCount, ///< number of elements in output vector
int ContiguousLanes, ///< Number of consecutive lanes writing to contiguous memory
bool EightBitsOutputOrLess = (OutputSizeBits <= 8)
>
class TileIteratorTensorOpMixed {
public:
using WarpShape = WarpShape_;
using OperatorShape = OperatorShape_;
using Element = Element_;
using Layout = layout::RowMajor;
static int const kOutputElementCount = OutputElementCount;
using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object
using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor
using Index = typename TensorRef::Index;
using LongIndex = typename TensorRef::LongIndex;
using Policy = TensorOpPolicy<WarpShape, OperatorShape, Layout>;
/// Shape of the tile in memory
using Shape = MatrixShape<
Policy::kRowsPerIteration,
WarpShape::kN
>;
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<
Element,
Policy::OperatorCount::kColumn * Policy::kElementsPerAccess>;
/// This is the complete warp-level accumulator tile.
//using AccumulatorTile = typename Operator::FragmentC;
/// Number of times this iterator can be incremented
static int const kIterations = Policy::kIterations;
// Internal constants
struct Detail {
static int const kLanesInQuad = 4;
/// Number of pointers needed to write accumulators
static int const kPointerCount =
(OutputElementCount * sizeof_bits<Element>::value) / (const_min(128, OutputElementCount * sizeof_bits<Element>::value));
// Currently support max 4 ptr
static constexpr int kMaxPointerCount{4};
static_assert(kPointerCount <= kMaxPointerCount, "Can only accommodate four pointers at present.");
static_assert(sizeof(Element) == 4, "This can only be used with 32b accumulator data types (f32, s32).");
};
/// Padding quantity
using Padding = MatrixShape<
0,
Detail::kLanesInQuad * Policy::kElementsPerAccess>;
private:
/// Storage type for accessing memory
using AccessType = AlignedArray<Element, Policy::kElementsPerAccess>;
//
// Data members
//
/// Internal pointer to memory
AccessType *pointers_[Detail::kPointerCount] = {nullptr};
/// Stride in units of AccessType
int stride_{0};
/// Logical column in which warp tile is aligned
int warp_column_{0};
public:
/// Default constructor
TileIteratorTensorOpMixed() = default;
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed(
TensorRef const &ref,
unsigned lane_id
):
stride_(ref.stride()[0] / Policy::kElementsPerAccess),
warp_column_(0) {
int quad_id = (lane_id / Detail::kLanesInQuad);
int lane_in_quad = (lane_id % Detail::kLanesInQuad);
CUTLASS_PRAGMA_UNROLL
for (int64_t i = 0; i < Detail::kPointerCount; ++i) {
AccessType *ptr = reinterpret_cast<AccessType *>(ref.data()) + quad_id * stride_;
int column_idx = (lane_in_quad % 2) + (((lane_in_quad / 2) + i) % Detail::kPointerCount) * 2;
ptr += column_idx;
pointers_[i % Detail::kPointerCount] = ptr;
}
}
/// Adds a pointer offset
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed & add_pointer_offset(Index pointer_offset) {
CUTLASS_PRAGMA_UNROLL
for (int64_t i = 0; i < Detail::kPointerCount; ++i) {
pointers_[i] += pointer_offset / Policy::kElementsPerAccess;
}
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed & add_tile_offset(TensorCoord const &tile_offset) {
CUTLASS_PRAGMA_UNROLL
for (int64_t i = 0; i < Detail::kPointerCount; ++i) {
pointers_[i] += tile_offset.row() * Shape::kRow * stride_ +
tile_offset.column() * Shape::kColumn / Policy::kElementsPerAccess;
}
warp_column_ += tile_offset.column() * Shape::kColumn;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed & operator+=(TensorCoord const &tile_offset) {
return add_tile_offset(tile_offset);
}
/// Store
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
AccessType *ptr = pointers_[0];
#if CUTLASS_EPILOGUE_WARP_TILE_ITERATOR_TENSOR_OP_MIXED_OPTIMIZATION_ENABLED
// When the optimization is enabled, small tiles require separate logic.
bool kN32_optimization = (WarpShape::kN * Detail::kLanesInQuad * Policy::kElementsPerAccess * sizeof_bits<Element>::value) % 1024 == 0;
if (kN32_optimization) {
int ptr_idx = ((warp_column_ * sizeof_bits<Element>::value) / 1024) % Detail::kPointerCount;
if (ptr_idx == 0) {
ptr = pointers_[0];
} else if (ptr_idx == 1) {
ptr = pointers_[1];
} else if (ptr_idx == 2) {
ptr = pointers_[2];
} else if (ptr_idx == 3) {
ptr = pointers_[3];
}
}
#endif
CUTLASS_PRAGMA_UNROLL
for (int64_t n = 0; n < Policy::OperatorCount::kColumn; ++n) {
#if CUTLASS_EPILOGUE_WARP_TILE_ITERATOR_TENSOR_OP_MIXED_OPTIMIZATION_ENABLED
//
// When the optimization is enabled, this expression suffices to obtain the SMEM pointer.
//
if (WarpShape::kN == 64) {
ptr = pointers_[n / 4];
}
else if (!kN32_optimization)
#endif
{
// This is the reference implementation
int column_idx = warp_column_ + n * Detail::kLanesInQuad * Policy::kElementsPerAccess;
int ptr_idx = ((column_idx * sizeof_bits<Element>::value) / 1024) % Detail::kPointerCount;
if (ptr_idx == 0) {
ptr = pointers_[0 % Detail::kPointerCount];
}
else if (ptr_idx == 1) {
ptr = pointers_[1 % Detail::kPointerCount];
}
else if (ptr_idx == 2) {
ptr = pointers_[2 % Detail::kPointerCount];
}
else if (ptr_idx == 3) {
ptr = pointers_[3 % Detail::kPointerCount];
}
}
int offset = n * Detail::kLanesInQuad + pointer_offset / Policy::kElementsPerAccess;
ptr[offset] = frag_ptr[n];
}
}
/// Store
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
/// Load
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int64_t n = 0; n < Policy::OperatorCount::kColumn; ++n) {
int column_idx = warp_column_ + n * Detail::kLanesInQuad * Policy::kElementsPerAccess;
int ptr_idx = ((column_idx * sizeof_bits<Element>::value) / 1024) % Detail::kPointerCount;
AccessType const *smem_ptr = pointers_[ptr_idx];
frag_ptr[n] = smem_ptr[n * Detail::kLanesInQuad + pointer_offset / Policy::kElementsPerAccess];
}
}
/// Load
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Set smem base address
CUTLASS_HOST_DEVICE
void set_smem_base_address(Index address) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for int32_t x 16 => int8_t/int4b_t x 16
template <
typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape)
typename OperatorShape_, ///< matrix multiply operation shape (concept: gemm::GemmShape),
int OutputSizeBits ///< Size of output element in bits
>
class TileIteratorTensorOpMixed<WarpShape_, OperatorShape_, int32_t, 32, OutputSizeBits, 16, 8, true> {
public:
using WarpShape = WarpShape_;
using OperatorShape = OperatorShape_;
using Element = int32_t;
using Layout = layout::RowMajor;
static int const kOutputElementCount = 16;
using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object
using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor
using Index = typename TensorRef::Index;
using LongIndex = typename TensorRef::LongIndex;
using Policy = TensorOpPolicy<WarpShape, OperatorShape, Layout>;
/// Shape of the tile in memory
using Shape = MatrixShape<
Policy::kRowsPerIteration,
WarpShape::kN
>;
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<
Element,
Policy::OperatorCount::kColumn * Policy::kElementsPerAccess>;
/// This is the complete warp-level accumulator tile.
//using AccumulatorTile = typename Operator::FragmentC;
/// Number of times this iterator can be incremented
static int const kIterations = Policy::kIterations;
// Internal constants
struct Detail {
static int const kLanesInQuad = 4;
/// Number of pointers needed to write accumulators
static int const kPointerCount = 2;
/// Offsets added
static int const kOffsetCount = 4;
static_assert(sizeof(Element) == 4, "This can only be used with 32b accumulator data types (f32, s32).");
};
/// Padding quantity
using Padding = MatrixShape<0, Detail::kLanesInQuad * 2>;
private:
/// Storage type for accessing memory
using AccessType = AlignedArray<Element, 2>;
//
// Data members
//
/// Internal pointer to memory
AccessType *pointers_[Detail::kPointerCount] = {nullptr};
/// Stride in units of AccessType
int stride_{0};
/// Uniform offset in bytes added to warp tile iterator
int uniform_offset_[Detail::kOffsetCount] = {0};
public:
/// Default constructor
TileIteratorTensorOpMixed() = default;
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed(
TensorRef const &ref,
unsigned lane_id
):
stride_(ref.stride()[0] / AccessType::kElements) {
int quad_id = (lane_id / Detail::kLanesInQuad);
int lane_in_quad = (lane_id % Detail::kLanesInQuad);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Detail::kPointerCount; ++i) {
AccessType *ptr = reinterpret_cast<AccessType *>(ref.data()) + quad_id * stride_;
int column_idx = lane_in_quad ^ (i * 2);
ptr += column_idx;
if (i == 0) {
pointers_[0] = ptr;
}
else if (i == 1) {
pointers_[1] = ptr;
}
}
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Detail::kOffsetCount; ++i) {
uniform_offset_[i] = (i ^ 0) * 4 * sizeof(AccessType);
}
}
/// Adds a pointer offset
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed & add_pointer_offset(Index pointer_offset) {
CUTLASS_PRAGMA_UNROLL
for (int64_t i = 0; i < Detail::kPointerCount; ++i) {
pointers_[i] += pointer_offset / AccessType::kElements;
}
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed & add_tile_offset(TensorCoord const &tile_offset) {
int ptr_offset = tile_offset.row() * Shape::kRow * stride_ +
tile_offset.column() * Shape::kColumn / AccessType::kElements;
pointers_[0] += ptr_offset;
pointers_[1] += ptr_offset;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Detail::kOffsetCount; ++i) {
uniform_offset_[i] = (i ^ tile_offset.column()) * 4 * sizeof(AccessType);
}
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed & operator+=(TensorCoord const &tile_offset) {
return add_tile_offset(tile_offset);
}
/// Store
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::OperatorCount::kColumn; ++n) {
int ptr_idx = (n / 4);
int offset_idx = (n % 4);
AccessType *ptr;
if (ptr_idx == 0) {
ptr = pointers_[0];
}
else if (ptr_idx == 1) {
ptr = pointers_[1];
}
int offset = (n / 4) * 16 + pointer_offset / AccessType::kElements;
#if 0
//
// Using inline PTX to avoid generic memory
//
AccessType *smem_ptr = pointers_[ptr_idx];
smem_ptr[offset] = frag_ptr[n];
#else
uint32_t smem_addr = arch::cutlass_get_smem_pointer(ptr);
uint32_t const *data = reinterpret_cast<uint32_t const *>(frag_ptr + n);
uint32_t offset_in_bytes = offset * sizeof(AccessType) + uniform_offset_[offset_idx];
asm volatile(
"{ .reg .u32 smem_ptr; add.u32 smem_ptr, %0, %1; st.shared.v2.u32 [smem_ptr], {%2, %3}; }\n"
: : "r"(smem_addr), "r"(offset_in_bytes), "r"(data[0]), "r"(data[1])
);
#endif
}
}
/// Store
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
/// Set smem base address
CUTLASS_HOST_DEVICE
void set_smem_base_address(Index address) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for int32_t x 8 => int8_t/int4b_t x 8
template <
typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape)
typename OperatorShape_, ///< matrix multiply operation shape (concept: gemm::GemmShape)
int OutputSizeBits ///< Size of output element in bits
>
class TileIteratorTensorOpMixed<WarpShape_, OperatorShape_, int32_t, 32, OutputSizeBits, 8, 8, true> {
public:
using WarpShape = WarpShape_;
using OperatorShape = OperatorShape_;
using Element = int32_t;
using Layout = layout::RowMajor;
static int const kOutputElementCount = 8;
using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object
using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor
using Index = typename TensorRef::Index;
using LongIndex = typename TensorRef::LongIndex;
using Policy = TensorOpPolicy<WarpShape, OperatorShape, Layout>;
/// Shape of the tile in memory
using Shape = MatrixShape<
Policy::kRowsPerIteration,
WarpShape::kN
>;
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<
Element,
Policy::OperatorCount::kColumn * Policy::kElementsPerAccess>;
/// This is the complete warp-level accumulator tile.
//using AccumulatorTile = typename Operator::FragmentC;
/// Number of times this iterator can be incremented
static int const kIterations = Policy::kIterations;
// Internal constants
struct Detail {
static int const kLanesInQuad = 4;
/// Number of pointers needed to write accumulators
static int const kPointerCount = 2;
static_assert(sizeof(Element) == 4, "This can only be used with 32b accumulator data types (f32, s32).");
};
/// Padding quantity
using Padding = MatrixShape<0, Detail::kLanesInQuad * 2>;
private:
/// Storage type for accessing memory
using AccessType = AlignedArray<Element, 2>;
//
// Data members
//
/// Internal pointer to memory
AccessType *pointers_[Detail::kPointerCount] = {nullptr};
/// Stride in units of AccessType
int stride_{0};
public:
/// Default constructor
TileIteratorTensorOpMixed() = default;
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed(
TensorRef const &ref,
unsigned lane_id
):
stride_(ref.stride()[0] / AccessType::kElements) {
int quad_id = (lane_id / Detail::kLanesInQuad);
int lane_in_quad = (lane_id % Detail::kLanesInQuad);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Detail::kPointerCount; ++i) {
AccessType *ptr = reinterpret_cast<AccessType *>(ref.data()) + quad_id * stride_;
int column_idx = lane_in_quad ^ (i * 2);
ptr += column_idx;
if (i == 0) {
pointers_[0] = ptr;
}
else if (i == 1) {
pointers_[1] = ptr;
}
}
}
/// Adds a pointer offset
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed & add_pointer_offset(Index pointer_offset) {
CUTLASS_PRAGMA_UNROLL
for (int64_t i = 0; i < Detail::kPointerCount; ++i) {
pointers_[i] += pointer_offset / AccessType::kElements;
}
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed & add_tile_offset(TensorCoord const &tile_offset) {
int ptr_offset = tile_offset.row() * Shape::kRow * stride_ +
tile_offset.column() * Shape::kColumn / AccessType::kElements;
pointers_[0] += ptr_offset;
pointers_[1] += ptr_offset;
if (tile_offset.column() % 2) {
auto tmp = pointers_[0];
pointers_[0] = pointers_[1];
pointers_[1] = tmp;
}
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed & operator+=(TensorCoord const &tile_offset) {
return add_tile_offset(tile_offset);
}
/// Store
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::OperatorCount::kColumn; ++n) {
int ptr_idx = (n / 4);
AccessType *ptr;
if (ptr_idx == 0) {
ptr = pointers_[0];
}
else if (ptr_idx == 1) {
ptr = pointers_[1];
}
int offset = (n / 4) * 16 + pointer_offset / AccessType::kElements + (n % 4) * 4;
#if 0
//
// Using inline PTX to avoid generic memory
//
AccessType *smem_ptr = pointers_[ptr_idx];
smem_ptr[offset] = frag_ptr[n];
#else
uint32_t smem_addr = arch::cutlass_get_smem_pointer(ptr);
uint32_t const *data = reinterpret_cast<uint32_t const *>(frag_ptr + n);
uint32_t offset_in_bytes = offset * sizeof(AccessType);
asm volatile(
"{ .reg .u32 smem_ptr; add.u32 smem_ptr, %0, %1; st.shared.v2.u32 [smem_ptr], {%2, %3}; }\n"
: : "r"(smem_addr), "r"(offset_in_bytes), "r"(data[0]), "r"(data[1])
);
#endif
}
}
/// Store
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
/// Set smem base address
CUTLASS_HOST_DEVICE
void set_smem_base_address(Index address) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for float x 16 => float_e4m3_t/float_e5m2_t x 16
template <
typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape)
typename OperatorShape_ ///< matrix multiply operation shape (concept: gemm::GemmShape),
>
class TileIteratorTensorOpMixed<WarpShape_, OperatorShape_, float, 32, 8, 16, 8> {
public:
using WarpShape = WarpShape_;
using OperatorShape = OperatorShape_;
using Element = float;
using Layout = layout::RowMajor;
static int const kOutputElementCount = 16;
using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object
using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor
using Index = typename TensorRef::Index;
using LongIndex = typename TensorRef::LongIndex;
using Policy = TensorOpPolicy<WarpShape, OperatorShape, Layout>;
/// Shape of the tile in memory
using Shape = MatrixShape<
Policy::kRowsPerIteration,
WarpShape::kN
>;
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<
Element,
Policy::OperatorCount::kColumn * Policy::kElementsPerAccess>;
/// This is the complete warp-level accumulator tile.
//using AccumulatorTile = typename Operator::FragmentC;
/// Number of times this iterator can be incremented
static int const kIterations = Policy::kIterations;
// Internal constants
struct Detail {
static int const kLanesInQuad = 4;
/// Number of pointers needed to write accumulators
static int const kPointerCount = 2;
/// Offsets added
static int const kOffsetCount = 4;
static_assert(sizeof(Element) == 4, "This can only be used with 32b accumulator data types (f32, s32).");
};
/// Padding quantity
using Padding = MatrixShape<0, Detail::kLanesInQuad * 2>;
private:
/// Storage type for accessing memory
using AccessType = AlignedArray<Element, 2>;
//
// Data members
//
/// Internal pointer to memory
AccessType *pointers_[Detail::kPointerCount] = {nullptr};
/// Stride in units of AccessType
int stride_{0};
/// Uniform offset in bytes added to warp tile iterator
int uniform_offset_[Detail::kOffsetCount] = {0};
public:
/// Default constructor
TileIteratorTensorOpMixed() = default;
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed(
TensorRef const &ref,
unsigned lane_id
):
stride_(ref.stride()[0] / AccessType::kElements) {
int quad_id = (lane_id / Detail::kLanesInQuad);
int lane_in_quad = (lane_id % Detail::kLanesInQuad);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Detail::kPointerCount; ++i) {
AccessType *ptr = reinterpret_cast<AccessType *>(ref.data()) + quad_id * stride_;
int column_idx = lane_in_quad ^ (i * 2);
ptr += column_idx;
if (i == 0) {
pointers_[0] = ptr;
}
else if (i == 1) {
pointers_[1] = ptr;
}
}
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Detail::kOffsetCount; ++i) {
uniform_offset_[i] = (i ^ 0) * 4 * sizeof(AccessType);
}
}
/// Adds a pointer offset
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed & add_pointer_offset(Index pointer_offset) {
CUTLASS_PRAGMA_UNROLL
for (int64_t i = 0; i < Detail::kPointerCount; ++i) {
pointers_[i] += pointer_offset / AccessType::kElements;
}
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed & add_tile_offset(TensorCoord const &tile_offset) {
int ptr_offset = tile_offset.row() * Shape::kRow * stride_ +
tile_offset.column() * Shape::kColumn / AccessType::kElements;
pointers_[0] += ptr_offset;
pointers_[1] += ptr_offset;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Detail::kOffsetCount; ++i) {
uniform_offset_[i] = (i ^ tile_offset.column()) * 4 * sizeof(AccessType);
}
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed & operator+=(TensorCoord const &tile_offset) {
return add_tile_offset(tile_offset);
}
/// Store
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::OperatorCount::kColumn; ++n) {
int ptr_idx = (n / 4);
int offset_idx = (n % 4);
AccessType *ptr;
if (ptr_idx == 0) {
ptr = pointers_[0];
}
else if (ptr_idx == 1) {
ptr = pointers_[1];
}
int offset = (n / 4) * 16 + pointer_offset / AccessType::kElements;
#if 0
//
// Using inline PTX to avoid generic memory
//
AccessType *smem_ptr = pointers_[ptr_idx];
smem_ptr[offset] = frag_ptr[n];
#else
uint32_t smem_addr = arch::cutlass_get_smem_pointer(ptr);
uint32_t const *data = reinterpret_cast<uint32_t const *>(frag_ptr + n);
uint32_t offset_in_bytes = offset * sizeof(AccessType) + uniform_offset_[offset_idx];
asm volatile(
"{ .reg .u32 smem_ptr; add.u32 smem_ptr, %0, %1; st.shared.v2.u32 [smem_ptr], {%2, %3}; }\n"
: : "r"(smem_addr), "r"(offset_in_bytes), "r"(data[0]), "r"(data[1])
);
#endif
}
}
/// Store
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for float x 8 => float_e4m3_t/float_e5m2_t x 8
template <
typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape)
typename OperatorShape_ ///< matrix multiply operation shape (concept: gemm::GemmShape)
>
class TileIteratorTensorOpMixed<WarpShape_, OperatorShape_, float, 32, 8, 8, 8> {
public:
using WarpShape = WarpShape_;
using OperatorShape = OperatorShape_;
using Element = float;
using Layout = layout::RowMajor;
static int const kOutputElementCount = 8;
using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object
using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor
using Index = typename TensorRef::Index;
using LongIndex = typename TensorRef::LongIndex;
using Policy = TensorOpPolicy<WarpShape, OperatorShape, Layout>;
/// Shape of the tile in memory
using Shape = MatrixShape<
Policy::kRowsPerIteration,
WarpShape::kN
>;
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<
Element,
Policy::OperatorCount::kColumn * Policy::kElementsPerAccess>;
/// This is the complete warp-level accumulator tile.
//using AccumulatorTile = typename Operator::FragmentC;
/// Number of times this iterator can be incremented
static int const kIterations = Policy::kIterations;
// Internal constants
struct Detail {
static int const kLanesInQuad = 4;
/// Number of pointers needed to write accumulators
static int const kPointerCount = 2;
static_assert(sizeof(Element) == 4, "This can only be used with 32b accumulator data types (f32, s32).");
};
/// Padding quantity
using Padding = MatrixShape<0, Detail::kLanesInQuad * 2>;
private:
/// Storage type for accessing memory
using AccessType = AlignedArray<Element, 2>;
//
// Data members
//
/// Internal pointer to memory
AccessType *pointers_[Detail::kPointerCount] = {nullptr};
/// Stride in units of AccessType
int stride_{0};
public:
/// Default constructor
TileIteratorTensorOpMixed() = default;
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed(
TensorRef const &ref,
unsigned lane_id
):
stride_(ref.stride()[0] / AccessType::kElements) {
int quad_id = (lane_id / Detail::kLanesInQuad);
int lane_in_quad = (lane_id % Detail::kLanesInQuad);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Detail::kPointerCount; ++i) {
AccessType *ptr = reinterpret_cast<AccessType *>(ref.data()) + quad_id * stride_;
int column_idx = lane_in_quad ^ (i * 2);
ptr += column_idx;
if (i == 0) {
pointers_[0] = ptr;
}
else if (i == 1) {
pointers_[1] = ptr;
}
}
}
/// Adds a pointer offset
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed & add_pointer_offset(Index pointer_offset) {
CUTLASS_PRAGMA_UNROLL
for (int64_t i = 0; i < Detail::kPointerCount; ++i) {
pointers_[i] += pointer_offset / AccessType::kElements;
}
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed & add_tile_offset(TensorCoord const &tile_offset) {
int ptr_offset = tile_offset.row() * Shape::kRow * stride_ +
tile_offset.column() * Shape::kColumn / AccessType::kElements;
pointers_[0] += ptr_offset;
pointers_[1] += ptr_offset;
if (tile_offset.column() % 2) {
auto tmp = pointers_[0];
pointers_[0] = pointers_[1];
pointers_[1] = tmp;
}
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed & operator+=(TensorCoord const &tile_offset) {
return add_tile_offset(tile_offset);
}
/// Store
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::OperatorCount::kColumn; ++n) {
int ptr_idx = (n / 4);
AccessType *ptr;
if (ptr_idx == 0) {
ptr = pointers_[0];
}
else if (ptr_idx == 1) {
ptr = pointers_[1];
}
int offset = (n / 4) * 16 + pointer_offset / AccessType::kElements + (n % 4) * 4;
#if 0
//
// Using inline PTX to avoid generic memory
//
AccessType *smem_ptr = pointers_[ptr_idx];
smem_ptr[offset] = frag_ptr[n];
#else
uint32_t smem_addr = arch::cutlass_get_smem_pointer(ptr);
uint32_t const *data = reinterpret_cast<uint32_t const *>(frag_ptr + n);
uint32_t offset_in_bytes = offset * sizeof(AccessType);
asm volatile(
"{ .reg .u32 smem_ptr; add.u32 smem_ptr, %0, %1; st.shared.v2.u32 [smem_ptr], {%2, %3}; }\n"
: : "r"(smem_addr), "r"(offset_in_bytes), "r"(data[0]), "r"(data[1])
);
#endif
}
}
/// Store
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
#undef CUTLASS_EPILOGUE_WARP_TILE_ITERATOR_TENSOR_OP_MIXED_OPTIMIZATION_ENABLED
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/warp/tile_iterator_tensor_op_mixed.h/0 | {
"file_path": "include/cutlass/epilogue/warp/tile_iterator_tensor_op_mixed.h",
"repo_id": "include",
"token_count": 12302
} | 23 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a pipelined SYMM and HEMM kernels. Does not compute batching or support split-K.
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/arch/arch.h"
#include "cutlass/device_kernel.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/gemm/kernel/symm_universal.h"
#include "cutlass/gemm/kernel/default_symm_universal.h"
#include "cutlass/gemm/device/default_gemm_configuration.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Side Mode for A (kLeft or kRight)
SideMode SideModeA,
/// Fill Mode for A (kLower or kUpper)
FillMode FillModeA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator_ = ElementC_,
/// Operator class tag
typename OperatorClass_ = arch::OpClassTensorOp,
/// Tag indicating architecture to tune for
typename ArchTag_ = arch::Sm80,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp_ = epilogue::thread::LinearCombination<
ElementC_,
128 / sizeof_bits<ElementC_>::value,
ElementAccumulator_,
ElementAccumulator_,
epilogue::thread::ScaleType::OnlyAlphaScaling
>,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle_ = threadblock::GemmIdentityThreadblockSwizzle<>,
/// Number of stages used in the pipelined mainloop
int Stages =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kStages,
/// Access granularity of A matrix in units of elements
int AlignmentA =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kAlignmentA,
/// Access granularity of B matrix in units of elements
int AlignmentB =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kAlignmentB,
/// If true, kernel supports split-K with serial reduction
bool SplitKSerial = false,
/// Operation performed by SYMM
typename Operator_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::Operator,
/// Blas3 computation mode (symmetric/hermitian)
BlasMode BlasMode_ = BlasMode::kSymmetric>
class Symm {
public:
using ElementA = ElementA_;
using LayoutA = LayoutA_;
using ElementAKernel = typename platform::conditional<(SideModeA == SideMode::kRight), ElementB_, ElementA_>::type;
using LayoutAKernel = typename platform::conditional<(SideModeA == SideMode::kRight), LayoutB_, LayoutA_>::type;
using ElementB = ElementB_;
using LayoutB = LayoutB_;
using ElementBKernel = typename platform::conditional<(SideModeA == SideMode::kRight), ElementA_, ElementB_>::type;
using LayoutBKernel = typename platform::conditional<(SideModeA == SideMode::kRight), LayoutA_, LayoutB_>::type;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using ElementAccumulator = ElementAccumulator_;
using OperatorClass = OperatorClass_;
using ArchTag = ArchTag_;
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using EpilogueOutputOp = EpilogueOutputOp_;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using Operator = Operator_;
static SideMode const kSideModeA = SideModeA;
static FillMode const kFillModeA = FillModeA;
static int const kStages = Stages;
static int const kAlignmentA = AlignmentA;
static int const kAlignmentAKernel = (SideModeA == SideMode::kRight) ? AlignmentB : AlignmentA;
static int const kAlignmentB = AlignmentB;
static int const kAlignmentBKernel = (SideModeA == SideMode::kRight) ? AlignmentA : AlignmentB;
static int const kAlignmentC = EpilogueOutputOp::kCount;
static bool const kSplitKSerial = SplitKSerial;
static BlasMode const kBlasMode = BlasMode_;
// static asserts for symm update kernel
static_assert(platform::is_same<LayoutA, LayoutB>::value,
"SYMM update operator support same layouts for operand A and B");
/// Define the kernel
using SymmKernel = typename kernel::DefaultSymmUniversal<
ElementAKernel,
LayoutAKernel,
kSideModeA,
kFillModeA,
kAlignmentAKernel,
ElementBKernel,
LayoutBKernel,
kAlignmentBKernel,
ElementC,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
kStages,
kSplitKSerial,
Operator,
kBlasMode
>::SymmKernel;
using Arguments = typename SymmKernel::Arguments;
private:
/// Kernel parameters object
typename SymmKernel::Params params_;
public:
/// Constructs the SYMM.
Symm() { }
/// Determines whether the SYMM can execute the given problem.
static Status can_implement(Arguments const &args) {
if (!kSplitKSerial && args.batch_count > 1) {
return Status::kErrorInvalidProblem;
}
Status status = SymmKernel::can_implement(args);
if (SideModeA == SideMode::kInvalid) {
return Status::kErrorInvalidProblem;
}
if (FillModeA != FillMode::kLower && FillModeA != FillMode::kUpper) {
return Status::kErrorInvalidProblem;
}
if (status != Status::kSuccess) {
return status;
}
return Status::kSuccess;
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
size_t bytes = 0;
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord tiled_shape = threadblock_swizzle.get_tiled_shape(
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.batch_count);
if (kSplitKSerial && args.batch_count > 1) {
bytes += sizeof(int) * size_t(tiled_shape.m()) * size_t(tiled_shape.n());
}
return bytes;
}
/// Initializes SYMM state from arguments.
Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) {
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord grid_tiled_shape = threadblock_swizzle.get_tiled_shape(
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.batch_count);
if (kSplitKSerial) {
if (args.batch_count > 1) {
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
size_t bytes = get_workspace_size(args);
cudaError_t result = cudaMemsetAsync(workspace, 0, bytes, stream);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
}
else {
if (args.batch_count > 1) {
return Status::kErrorInvalidProblem;
}
}
int gemm_k_size = args.problem_size.k();
// Swapping argument for A and B, if A was on the right side (problem size doesn't need to change here).
if (kSideModeA == SideMode::kRight) {
// Initialize the Params structure
params_ = typename SymmKernel::Params{
args.swapped_matrices(),
grid_tiled_shape,
gemm_k_size,
static_cast<int *>(workspace)
};
return Status::kSuccess;
}
// Initialize the Params structure
params_ = typename SymmKernel::Params{
args,
grid_tiled_shape,
gemm_k_size,
static_cast<int *>(workspace)
};
return Status::kSuccess;
}
/// Lightweight update given a subset of arguments
Status update(Arguments const &args, void *workspace = nullptr) {
if (kSplitKSerial && args.batch_count > 1) {
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
}
size_t workspace_bytes = get_workspace_size(args);
if (workspace_bytes && !workspace) {
return Status::kErrorWorkspaceNull;
}
params_.update(args, workspace);
return Status::kSuccess;
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
ThreadblockSwizzle threadblock_swizzle;
dim3 grid = threadblock_swizzle.get_grid_shape(params_.grid_tiled_shape);
dim3 block(SymmKernel::kThreadCount, 1, 1);
int smem_size = int(sizeof(typename SymmKernel::SharedStorage));
if (smem_size >= (48 << 10)) {
cudaError_t result = cudaFuncSetAttribute(Kernel<SymmKernel>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
cutlass::Kernel<SymmKernel><<<grid, block, smem_size, stream>>>(params_);
cudaError_t result = cudaGetLastError();
return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal;
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
};
////////////////////////////////////////////////////////////////////////////////
/********************************************************************************************************
SYMM/HEMM has 4 combinations based on Layouts {RowMajor, ColumnMajor} x Side mode {LeftSide, RightSide}
In templates and arguments to cutlass kernel, `matrix A` is always symmetric/hermitian, and `matrix B` is rectangular.
(adhering to the cuBLAS convention)
Although, cuBLAS SYMM/HEMM only supports ColumnMajor layouts for all matrices (A, B, C/D).
For the mainloop and symm kernel, `A` and `B` points to left-side and right-side matrices, respectively.
Thus, for LeftSide mode `A` and `B` points to `matrix A` and `matrix B`, respectively. While for
the RightSide mode `A` and `B` points to `matrix B` and `matrix A`, respectively.
Additionally, CUTLASS GEMM epilogue is always RowMajor, and ColumnMajor output is achieved by
transposing the GEMM problem. Thus, ColumnMajor output layout for SYMM/HEMM requires:
- Transposing `matrix A` and `matrix B` layouts
- Swapping problem size m and n values
- Swapping LeftSide and RightSide mode
RowMajor output: D = matrix A x matrix B
ColumnMajor output: D = matrix A x matrix B -> Transpose (D) = Transpose(matrix B) x Transpose(matrix A)
{RowMajor, ColumnMajor} x Side Mode {LeftSide, RightSide} 4 cases:
1. LeftSide mode and RowMajor output (default template)
2. LeftSide mode and ColumnMajor output
3. RightSide mode and RowMajor output
4. RightSide mode and ColumnMajor output
Mapping ColumnMajor output layout cases 2 and 4 to RowMajor efficient epilogue implementation:
Case 2 -> Case 3:
D_col = matrix A x matrix B (LeftSide mode)
=> Transpose(D_col) = Transpose(matrix B) x Transpose(matrix A) (RightSide mode)
swap pointers for `A` and `B` call GEMM mainloop with RowMajor efficient-epilogue
Case 4 -> Case 1:
D_col = matrix B x matrix A (RightSide mode)
=> Transpose(D_col) = Transpose(matrix A) x Transpose(matrix B) (LeftSide mode)
call GEMM mainloop for with RowMajor efficient-epilogue
********************************************************************************************************/
/// Partial specialization for column-major output exchanges problem size and operand.
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Side Mode for A (kLeft or kRight)
SideMode SideModeA,
/// Fill Mode for A (kLower or kUpper)
FillMode FillModeA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Element type for internal accumulation
typename ElementAccumulator_,
/// Operator class tag
typename OperatorClass_,
/// Tag indicating architecture to tune for. This is the minimum SM that
/// supports the intended feature. The device kernel can be built
/// targeting any SM larger than this number.
typename ArchTag_,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape_,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape_,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape_,
/// Epilogue output operator
typename EpilogueOutputOp_,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle_,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Access granularity of A matrix in units of elements
int AlignmentA,
/// Access granularity of B matrix in units of elements
int AlignmentB,
/// If true, kernel supports split-K with serial reduction
bool SplitKSerial,
/// Operation performed by Symm update kernel
typename Operator_,
/// Blas3 computation mode (symmetric/hermitian)
BlasMode BlasMode_
>
class Symm<ElementA_, LayoutA_, SideModeA, FillModeA, ElementB_, LayoutB_, ElementC_,
layout::ColumnMajor, // partially specialized on LayoutC
ElementAccumulator_, OperatorClass_, ArchTag_, ThreadblockShape_,
WarpShape_, InstructionShape_, EpilogueOutputOp_,
ThreadblockSwizzle_, Stages, AlignmentA, AlignmentB,
SplitKSerial, Operator_, BlasMode_> {
public:
using ElementA = ElementA_;
using LayoutA = LayoutA_;
using ElementB = ElementB_;
using LayoutB = LayoutB_;
using ElementC = ElementC_;
using LayoutC = layout::ColumnMajor;
using ElementAccumulator = ElementAccumulator_;
using OperatorClass = OperatorClass_;
using ArchTag = ArchTag_;
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using EpilogueOutputOp = EpilogueOutputOp_;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using Operator = Operator_;
static SideMode const kSideModeA = SideModeA;
static FillMode const kFillModeA = FillModeA;
static int const kStages = Stages;
static int const kAlignmentA = AlignmentA;
static int const kAlignmentB = AlignmentB;
static int const kAlignmentC = EpilogueOutputOp::kCount;
static bool const kSplitKSerial = SplitKSerial;
static BlasMode const kBlasMode = BlasMode_;
/// Define the kernel
using UnderlyingOperator = typename cutlass::gemm::device::Symm<
ElementA,
typename layout::LayoutTranspose<LayoutA>::type,
InvertSideMode<kSideModeA>::mode,
InvertFillMode<kFillModeA>::mode,
ElementB,
typename layout::LayoutTranspose<LayoutB>::type,
ElementC,
layout::RowMajor,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
kStages,
kAlignmentA,
kAlignmentB,
kSplitKSerial,
Operator,
kBlasMode
>;
/// Argument structure
using Arguments = typename UnderlyingOperator::Arguments;
using SymmKernel = typename UnderlyingOperator::SymmKernel;
private:
UnderlyingOperator underlying_operator_;
public:
/// Constructs the Symm.
Symm() { }
/// Helper to construct a transposed equivalent for the underying SYMM operator
static Arguments to_underlying_arguments(Arguments const &args) {
return args.transposed_problem_size();
}
/// Determines whether the Symm can execute the given problem.
static Status can_implement(Arguments const &args) {
return UnderlyingOperator::can_implement(to_underlying_arguments(args));
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
return UnderlyingOperator::get_workspace_size(to_underlying_arguments(args));
}
/// Computes the grid shape
static dim3 get_grid_shape(Arguments const &args) {
return UnderlyingOperator::get_grid_shape(to_underlying_arguments(args));
}
/// Computes the maximum number of active blocks per multiprocessor
static int maximum_active_blocks(int smem_capacity = -1) {
return UnderlyingOperator::maximum_active_blocks(smem_capacity);
}
/// Initializes Symm state from arguments.
Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) {
return underlying_operator_.initialize(to_underlying_arguments(args), workspace, stream);
}
/// Lightweight update given a subset of arguments
Status update(Arguments const &args, void *workspace = nullptr) {
return underlying_operator_.update(to_underlying_arguments(args), workspace);
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
return underlying_operator_.run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace, stream);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace Symm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/device/symm.h/0 | {
"file_path": "include/cutlass/gemm/device/symm.h",
"repo_id": "include",
"token_count": 7081
} | 24 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/semaphore.h"
#include "cutlass/gemm/kernel/params_universal_base.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_ ///! Threadblock swizzling function
>
struct GemmPlanarComplex {
public:
using Mma = Mma_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using ElementA = typename Mma::IteratorA::Element;
using LayoutA = typename Mma::IteratorA::Layout;
using ElementB = typename Mma::IteratorB::Element;
using LayoutB = typename Mma::IteratorB::Layout;
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename Epilogue::OutputTileIterator::Layout;
using Operator = typename Mma::Operator;
using ArchTag = typename Mma::ArchTag;
static ComplexTransform const kTransformA = Mma::kTransformA;
static ComplexTransform const kTransformB = Mma::kTransformB;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
/// Split-K preserves splits that are 128b aligned
static int const kSplitKAlignment = const_max(
128 / sizeof_bits<ElementA>::value,
128 / sizeof_bits<ElementB>::value);
//
// Additional types needed for reflection
//
using ElementAccumulator = typename Mma::Policy::Operator::ElementC;
using OperatorClass = typename Mma::Operator::OperatorClass;
using ThreadblockShape = typename Mma::Shape;
using WarpShape = typename Mma::Operator::Shape;
using InstructionShape = typename Mma::Policy::Operator::Shape;
static int const kStages = Mma::kStages;
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
//
// Arguments structure
//
/// Argument structure
struct Arguments : UniversalArgumentsBase
{
//
// Data members
//
typename EpilogueOutputOp::Params epilogue{};
void const * ptr_A_real{nullptr};
void const * ptr_A_imag{nullptr};
void const * ptr_B_real{nullptr};
void const * ptr_B_imag{nullptr};
void const * ptr_C_real{nullptr};
void const * ptr_C_imag{nullptr};
void * ptr_D_real{nullptr};
void * ptr_D_imag{nullptr};
typename LayoutA::Stride::Index lda_real{};
typename LayoutA::Stride::Index lda_imag{};
typename LayoutB::Stride::Index ldb_real{};
typename LayoutB::Stride::Index ldb_imag{};
typename LayoutC::Stride::Index ldc_real{};
typename LayoutC::Stride::Index ldc_imag{};
typename LayoutC::Stride::Index ldd_real{};
typename LayoutC::Stride::Index ldd_imag{};
int64_t batch_stride_A{0};
int64_t batch_stride_A_imag{0};
int64_t batch_stride_B{0};
int64_t batch_stride_B_imag{0};
int64_t batch_stride_C{0};
int64_t batch_stride_C_imag{0};
int64_t batch_stride_D_imag{0};
//
// Methods
//
Arguments() = default;
/// constructs an arguments structure
Arguments(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_count,
typename EpilogueOutputOp::Params epilogue,
void const * ptr_A_real,
void const * ptr_A_imag,
void const * ptr_B_real,
void const * ptr_B_imag,
void const * ptr_C_real,
void const * ptr_C_imag,
void * ptr_D_real,
void * ptr_D_imag,
typename LayoutA::Stride::Index lda_real,
typename LayoutA::Stride::Index lda_imag,
typename LayoutB::Stride::Index ldb_real,
typename LayoutB::Stride::Index ldb_imag,
typename LayoutC::Stride::Index ldc_real,
typename LayoutC::Stride::Index ldc_imag,
typename LayoutC::Stride::Index ldd_real,
typename LayoutC::Stride::Index ldd_imag,
int64_t batch_stride_A = 0,
int64_t batch_stride_A_imag = 0,
int64_t batch_stride_B = 0,
int64_t batch_stride_B_imag = 0,
int64_t batch_stride_C = 0,
int64_t batch_stride_C_imag = 0,
int64_t batch_stride_D = 0,
int64_t batch_stride_D_imag = 0)
:
UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D),
epilogue(epilogue),
ptr_A_real(ptr_A_real),
ptr_A_imag(ptr_A_imag),
ptr_B_real(ptr_B_real),
ptr_B_imag(ptr_B_imag),
ptr_C_real(ptr_C_real),
ptr_C_imag(ptr_C_imag),
ptr_D_real(ptr_D_real),
ptr_D_imag(ptr_D_imag),
lda_real(lda_real),
lda_imag(lda_imag),
ldb_real(ldb_real),
ldb_imag(ldb_imag),
ldc_real(ldc_real),
ldc_imag(ldc_imag),
ldd_real(ldd_real),
ldd_imag(ldd_imag),
batch_stride_A(batch_stride_A),
batch_stride_A_imag(batch_stride_A_imag),
batch_stride_B(batch_stride_B),
batch_stride_B_imag(batch_stride_B_imag),
batch_stride_C(batch_stride_C),
batch_stride_C_imag(batch_stride_C_imag),
batch_stride_D_imag(batch_stride_D_imag)
{}
/// Returns arguments for the transposed problem
Arguments transposed_problem() const {
Arguments args(*this);
std::swap(args.problem_size.m(), args.problem_size.n());
std::swap(args.ptr_A_real, args.ptr_B_real);
std::swap(args.ptr_A_imag, args.ptr_B_imag);
std::swap(args.lda_real, args.ldb_real);
std::swap(args.lda_imag, args.ldb_imag);
std::swap(args.batch_stride_A, args.batch_stride_B);
std::swap(args.batch_stride_A_imag, args.batch_stride_B_imag);
return args;
}
};
//
// Structure for precomputing values in host memory and passing to kernels
//
/// Parameters structure
struct Params : UniversalParamsBase<
ThreadblockSwizzle,
ThreadblockShape,
ElementA,
ElementB,
ElementC,
LayoutA,
LayoutB>
{
using ParamsBase = UniversalParamsBase<
ThreadblockSwizzle,
ThreadblockShape,
ElementA,
ElementB,
ElementC,
LayoutA,
LayoutB>;
//
// Data members
//
typename Mma::IteratorA::Params params_A_real{};
typename Mma::IteratorA::Params params_A_imag{};
typename Mma::IteratorB::Params params_B_real{};
typename Mma::IteratorB::Params params_B_imag{};
typename Epilogue::OutputTileIterator::Params params_C_real{};
typename Epilogue::OutputTileIterator::Params params_C_imag{};
typename Epilogue::OutputTileIterator::Params params_D_real{};
typename Epilogue::OutputTileIterator::Params params_D_imag{};
typename EpilogueOutputOp::Params output_op{};
void * ptr_A_real{nullptr};
void * ptr_A_imag{nullptr};
void * ptr_B_real{nullptr};
void * ptr_B_imag{nullptr};
void * ptr_C_real{nullptr};
void * ptr_C_imag{nullptr};
void * ptr_D_real{nullptr};
void * ptr_D_imag{nullptr};
int64_t batch_stride_A{0};
int64_t batch_stride_B{0};
int64_t batch_stride_C{0};
int64_t batch_stride_A_imag{0};
int64_t batch_stride_B_imag{0};
int64_t batch_stride_C_imag{0};
int64_t batch_stride_D_imag{0};
//
// Host dispatch API
//
/// Default constructor
Params() = default;
/// Constructor
Params(
Arguments const &args, /// GEMM application arguments
int device_sms, /// Number of SMs on the device
int sm_occupancy) /// Kernel SM occupancy (in thread blocks)
:
ParamsBase(args, device_sms, sm_occupancy),
params_A_real(args.lda_real),
params_A_imag(args.lda_imag),
params_B_real(args.ldb_real),
params_B_imag(args.ldb_imag),
params_C_real(args.ldc_real),
params_C_imag(args.ldc_imag),
params_D_real(args.ldd_real),
params_D_imag(args.ldd_imag),
output_op(args.epilogue),
ptr_A_real(const_cast<void *>(args.ptr_A_real)),
ptr_A_imag(const_cast<void *>(args.ptr_A_imag)),
ptr_B_real(const_cast<void *>(args.ptr_B_real)),
ptr_B_imag(const_cast<void *>(args.ptr_B_imag)),
ptr_C_real(const_cast<void *>(args.ptr_C_real)),
ptr_C_imag(const_cast<void *>(args.ptr_C_imag)),
ptr_D_real(args.ptr_D_real),
ptr_D_imag(args.ptr_D_imag),
batch_stride_A(args.batch_stride_A),
batch_stride_B(args.batch_stride_B),
batch_stride_C(args.batch_stride_C),
batch_stride_A_imag(args.batch_stride_A_imag),
batch_stride_B_imag(args.batch_stride_B_imag),
batch_stride_C_imag(args.batch_stride_C_imag),
batch_stride_D_imag(args.batch_stride_D_imag)
{}
/// Returns the workspace size (in bytes) needed for this problem geometry
size_t get_workspace_size() const
{
size_t workspace_bytes = ParamsBase::get_workspace_size();
if (this->mode == GemmUniversalMode::kGemmSplitKParallel)
{
// Double the size returned by the base class because we need to
// accumulate two ElementC components
workspace_bytes *= 2;
}
return workspace_bytes;
}
/// Lightweight update given a subset of arguments.
void update(Arguments const &args)
{
ptr_A_real = const_cast<void *>(args.ptr_A_real);
ptr_A_imag = const_cast<void *>(args.ptr_A_imag);
ptr_B_real = const_cast<void *>(args.ptr_B_real);
ptr_B_imag = const_cast<void *>(args.ptr_B_imag);
ptr_C_real = const_cast<void *>(args.ptr_C_real);
ptr_C_imag = const_cast<void *>(args.ptr_C_imag);
ptr_D_real = const_cast<void *>(args.ptr_D_real);
ptr_D_imag = const_cast<void *>(args.ptr_D_imag);
batch_stride_A = args.batch_stride_A;
batch_stride_B = args.batch_stride_B;
batch_stride_C = args.batch_stride_C;
this->batch_stride_D = args.batch_stride_D;
batch_stride_A_imag = args.batch_stride_A_imag;
batch_stride_B_imag = args.batch_stride_B_imag;
batch_stride_C_imag = args.batch_stride_C_imag;
batch_stride_D_imag = args.batch_stride_D_imag;
output_op = args.epilogue;
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
public:
//
// Host dispatch API
//
/// Determines whether kernel satisfies alignment
static Status can_implement(Arguments const &args)
{
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
bool isAMisaligned = false;
bool isBMisaligned = false;
bool isCMisaligned = false;
if (platform::is_same<LayoutA, layout::RowMajor>::value) {
isAMisaligned = args.problem_size.k() % kAlignmentA;
} else if (platform::is_same<LayoutA, layout::ColumnMajor>::value) {
isAMisaligned = args.problem_size.m() % kAlignmentA;
}
if (platform::is_same<LayoutB, layout::RowMajor>::value) {
isBMisaligned = args.problem_size.n() % kAlignmentB;
} else if (platform::is_same<LayoutB, layout::ColumnMajor>::value) {
isBMisaligned = args.problem_size.k() % kAlignmentB;
}
if (platform::is_same<LayoutC, layout::RowMajor>::value) {
isCMisaligned = args.problem_size.n() % kAlignmentC;
} else if (platform::is_same<LayoutC, layout::ColumnMajor>::value) {
isCMisaligned = args.problem_size.m() % kAlignmentC;
}
if (isAMisaligned || isBMisaligned || isCMisaligned) {
return Status::kErrorMisalignedOperand;
}
return Status::kSuccess;
}
public:
//
// Device-only API
//
// Factory invocation
CUTLASS_DEVICE
static void invoke(
Params const ¶ms,
SharedStorage &shared_storage)
{
GemmPlanarComplex op;
op(params, shared_storage);
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
int offset_k = 0;
int problem_size_k = params.problem_size.k();
ElementA *ptr_A_real = static_cast<ElementA *>(params.ptr_A_real);
ElementA *ptr_A_imag = static_cast<ElementA *>(params.ptr_A_imag);
ElementB *ptr_B_real = static_cast<ElementB *>(params.ptr_B_real);
ElementB *ptr_B_imag = static_cast<ElementB *>(params.ptr_B_imag);
//
// Fetch pointers based on mode.
//
if (params.mode == GemmUniversalMode::kGemm ||
params.mode == GemmUniversalMode::kGemmSplitKParallel) {
if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) {
problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size;
}
offset_k = threadblock_tile_offset.k() * params.gemm_k_size;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_A_real += int64_t(threadblock_tile_offset.k()) * params.batch_stride_A;
ptr_A_imag += int64_t(threadblock_tile_offset.k()) * params.batch_stride_A_imag;
ptr_B_real += int64_t(threadblock_tile_offset.k()) * params.batch_stride_B;
ptr_B_imag += int64_t(threadblock_tile_offset.k()) * params.batch_stride_B_imag;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_A_real = static_cast<ElementA * const *>(params.ptr_A_real)[threadblock_tile_offset.k()];
ptr_A_imag = static_cast<ElementA * const *>(params.ptr_A_imag)[threadblock_tile_offset.k()];
ptr_B_real = static_cast<ElementB * const *>(params.ptr_B_real)[threadblock_tile_offset.k()];
ptr_B_imag = static_cast<ElementB * const *>(params.ptr_B_imag)[threadblock_tile_offset.k()];
}
__syncthreads();
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
threadblock_tile_offset.m() * Mma::Shape::kM,
offset_k,
};
cutlass::MatrixCoord tb_offset_B{
offset_k,
threadblock_tile_offset.n() * Mma::Shape::kN
};
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A_real(
params.params_A_real,
ptr_A_real,
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_A);
typename Mma::IteratorA iterator_A_imag(
params.params_A_imag,
ptr_A_imag,
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_A);
typename Mma::IteratorB iterator_B_real(
params.params_B_real,
ptr_B_real,
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_B);
typename Mma::IteratorB iterator_B_imag(
params.params_B_imag,
ptr_B_imag,
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_B);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = canonical_warp_idx_sync();
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
mma(
gemm_k_iterations,
accumulators,
iterator_A_real,
iterator_A_imag,
iterator_B_real,
iterator_B_imag,
accumulators);
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.n() * Mma::Shape::kN
);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
ElementC *ptr_C_real = static_cast<ElementC *>(params.ptr_C_real);
ElementC *ptr_C_imag = static_cast<ElementC *>(params.ptr_C_imag);
ElementC *ptr_D_real = static_cast<ElementC *>(params.ptr_D_real);
ElementC *ptr_D_imag = static_cast<ElementC *>(params.ptr_D_imag);
//
// Fetch pointers based on mode.
//
// Construct the semaphore.
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
if (params.mode == GemmUniversalMode::kGemm) {
// If performing a reduction via split-K, fetch the initial synchronization
if (params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
}
}
else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) {
ptr_D_real += threadblock_tile_offset.k() * params.batch_stride_D;
ptr_D_imag += threadblock_tile_offset.k() * params.batch_stride_D_imag;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_C_real += int64_t(threadblock_tile_offset.k()) * params.batch_stride_C;
ptr_C_imag += int64_t(threadblock_tile_offset.k()) * params.batch_stride_C_imag;
ptr_D_real += int64_t(threadblock_tile_offset.k()) * params.batch_stride_D;
ptr_D_imag += int64_t(threadblock_tile_offset.k()) * params.batch_stride_D_imag;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_C_real = static_cast<ElementC * const *>(params.ptr_C_real)[threadblock_tile_offset.k()];
ptr_C_imag = static_cast<ElementC * const *>(params.ptr_C_imag)[threadblock_tile_offset.k()];
ptr_D_real = static_cast<ElementC * const *>(params.ptr_D_real)[threadblock_tile_offset.k()];
ptr_D_imag = static_cast<ElementC * const *>(params.ptr_D_imag)[threadblock_tile_offset.k()];
}
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C_real(
params.params_C_real,
ptr_C_real,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
typename Epilogue::OutputTileIterator iterator_C_imag(
params.params_C_imag,
ptr_C_imag,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D_real(
params.params_D_real,
ptr_D_real,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
typename Epilogue::OutputTileIterator iterator_D_imag(
params.params_D_imag,
ptr_D_imag,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
//
// Construct epilogue
//
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Wait on the semaphore - this latency may have been covered by iterator construction
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_offset.k()) {
iterator_C_real = iterator_D_real;
iterator_C_imag = iterator_D_imag;
}
semaphore.wait(threadblock_tile_offset.k());
__threadfence();
}
// Execute the epilogue operator to update the destination tensor.
epilogue(
output_op,
iterator_D_real,
iterator_D_imag,
accumulators,
iterator_C_real,
iterator_C_imag);
//
// Release the semaphore
//
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_offset.k() + 1;
}
semaphore.release(lock);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/kernel/gemm_planar_complex.h/0 | {
"file_path": "include/cutlass/gemm/kernel/gemm_planar_complex.h",
"repo_id": "include",
"token_count": 9363
} | 25 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Base functionality for common types of sparse GEMM kernel parameters
*/
#pragma once
#include "cutlass/cutlass.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Parameters structure
template <
typename ThreadblockSwizzle,
typename ParamsA,
typename TensorRefA,
typename ParamsB,
typename TensorRefB,
typename ParamsE,
typename TensorRefE>
struct SparseParamsBase
{
//
// Data members
//
cutlass::gemm::GemmCoord problem_size{};
cutlass::gemm::GemmCoord grid_tiled_shape{};
int swizzle_log_tile;
ParamsA params_A{};
TensorRefA ref_A{};
ParamsB params_B{};
TensorRefB ref_B{};
ParamsE params_E{};
TensorRefE ref_E{};
int gemm_k_iterations{0};
int gemm_k_size{0};
//
// Host dispatch API
//
/// Default constructor
SparseParamsBase() = default;
/// Constructor
CUTLASS_HOST_DEVICE
SparseParamsBase(
cutlass::gemm::GemmCoord const & problem_size,
cutlass::gemm::GemmCoord const & grid_tiled_shape,
TensorRefA ref_A,
TensorRefB ref_B,
TensorRefE ref_E,
int const mma_shape_k)
:
problem_size(problem_size),
grid_tiled_shape(grid_tiled_shape),
swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)),
params_A(ref_A.layout()),
ref_A(ref_A),
params_B(ref_B.layout()),
ref_B(ref_B),
params_E(ref_E.layout()),
ref_E(ref_E)
{
int total_gemm_k_iterations = (problem_size.k() + mma_shape_k - 1) / mma_shape_k;
int gemm_k_iterations = (total_gemm_k_iterations + grid_tiled_shape.k() - 1) / grid_tiled_shape.k();
gemm_k_size = gemm_k_iterations * mma_shape_k;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/kernel/params_sparse_base.h/0 | {
"file_path": "include/cutlass/gemm/kernel/params_sparse_base.h",
"repo_id": "include",
"token_count": 1197
} | 26 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/gemm/kernel/static_tile_scheduler.hpp"
namespace cutlass::gemm::kernel::detail {
///////////////////////////////////////////////////////////////////////////////
// Persistent Thread Block (TB) scheduler
class PersistentTileSchedulerSm90:
public StaticPersistentTileScheduler<PersistentTileSchedulerSm90> {
using BaseScheduler = StaticPersistentTileScheduler<PersistentTileSchedulerSm90>;
public:
using StaticPersistentTileScheduler::StaticPersistentTileScheduler;
using Params = PersistentTileSchedulerSm90Params;
using RasterOrder = typename Params::RasterOrder;
using RasterOrderOptions = typename Params::RasterOrderOptions;
using Arguments = BaseScheduler::Arguments;
// get work_idx_m, work_idx_n from blk_per_grid_dim while applying swizzle
static CUTLASS_DEVICE
cute::tuple<int32_t, int32_t>
get_work_idx_m_and_n(
uint64_t blk_per_grid_dim,
FastDivmodU64Pow2 const& divmod_cluster_shape_major,
FastDivmodU64Pow2 const& divmod_cluster_shape_minor,
FastDivmodU64 const& divmod_cluster_blk_major,
int32_t log_swizzle_size,
RasterOrder raster_order) {
auto [cta_m_in_cluster, cta_n_in_cluster, _] = cute::block_id_in_cluster();
return get_work_idx_m_and_n(
blk_per_grid_dim,
divmod_cluster_shape_major,
divmod_cluster_shape_minor,
divmod_cluster_blk_major,
log_swizzle_size,
raster_order,
cta_m_in_cluster,
cta_n_in_cluster
);
}
static CUTLASS_DEVICE
cute::tuple<int32_t, int32_t>
get_work_idx_m_and_n(
uint64_t blk_per_grid_dim,
FastDivmodU64Pow2 const& divmod_cluster_shape_major,
FastDivmodU64Pow2 const& divmod_cluster_shape_minor,
FastDivmodU64 const& divmod_cluster_blk_major,
int32_t log_swizzle_size,
RasterOrder raster_order,
uint64_t cta_m_in_cluster,
uint64_t cta_n_in_cluster) {
uint64_t cluster_id, cluster_major_offset = 0, cluster_minor_offset = 0;
divmod_cluster_shape_major(cluster_id, cluster_major_offset, blk_per_grid_dim);
if (raster_order == RasterOrder::AlongN) {
cluster_minor_offset = cta_m_in_cluster;
}
else {
cluster_minor_offset = cta_n_in_cluster;
}
uint64_t cluster_idx_minor, cluster_idx_major;
uint64_t cluster_idx_minor_div_swizzle, extra, offset;
offset = cluster_id & ((1 << log_swizzle_size) - 1);
extra = cluster_id >> log_swizzle_size;
divmod_cluster_blk_major(cluster_idx_minor_div_swizzle, cluster_idx_major, extra);
cluster_idx_minor = cluster_idx_minor_div_swizzle * (1 << log_swizzle_size) + offset;
auto minor_work_idx = static_cast<int32_t>(cluster_idx_minor * divmod_cluster_shape_minor.divisor +
cluster_minor_offset);
auto major_work_idx = static_cast<int32_t>(cluster_idx_major * divmod_cluster_shape_major.divisor +
cluster_major_offset);
if (raster_order == RasterOrder::AlongN) {
return {minor_work_idx, major_work_idx};
}
else {
return {major_work_idx, minor_work_idx};
}
}
// The basic tile scheduler does not require any additional workspace
template <class ProblemShape, class ElementAccumulator>
static size_t
get_workspace_size(Arguments const&, ProblemShape, KernelHardwareInfo const&, uint32_t, const uint32_t = 1) {
return 0;
}
template <class ProblemShape, class ElementAccumulator>
static cutlass::Status
initialize_workspace(Arguments const&, void*, cudaStream_t, ProblemShape, KernelHardwareInfo const&,
uint32_t, const uint32_t = 1) {
return Status::kSuccess;
}
};
}
| include/cutlass/gemm/kernel/sm90_tile_scheduler.hpp/0 | {
"file_path": "include/cutlass/gemm/kernel/sm90_tile_scheduler.hpp",
"repo_id": "include",
"token_count": 2012
} | 27 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines basic properties needed by CTA-level batched GEMV assuming expectations about data
layout of the global memory fragments, data types, and internal tile sizes.
Partial specializations for threadblock::Mma operations targeting SIMT instructions.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/platform/platform.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/thread/mma.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/gemm/threadblock/gemv.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
/// Template defininng default vector-matrix multiply operators inferred from threadblock tile size,
/// global memory data layout.
template <
typename Shape_, /// Shape of the threadblock vector-matrix multiply operator
typename ThreadShape_, /// Shape of per-thread vector-matrix multiply operator
typename ElementA_, /// Element data type of A operand
typename LayoutA_, /// Layout of operand A
typename ElementB_, /// Element data type of B operand
typename LayoutB_, /// Layout of operand B
typename ElementC_, /// Data type of accumulator
typename LayoutC_ /// Layout of accumulator
>
struct DefaultGemvCore {
using Shape = Shape_;
using ThreadShape = ThreadShape_;
using LayoutA = LayoutA_;
using LayoutB = LayoutB_;
using LayoutC = LayoutC_;
using ElementA = ElementA_;
using ElementB = ElementB_;
using ElementC = ElementC_;
static int const kThreadsPerN = Shape::kN / ThreadShape::kN;
using IteratorPolicyA = typename platform::conditional<
platform::is_same<LayoutA, layout::RowMajor>::value,
cutlass::transform::PitchLinearTilePolicyStripminedThreadContiguous<
layout::PitchLinearShape<Shape::kK, Shape::kM>, 1, ThreadShape::kK>,
cutlass::transform::PitchLinearTilePolicyStripminedThreadStrided<
layout::PitchLinearShape<Shape::kM, Shape::kK>, 1, ThreadShape::kM>>::type;
using IteratorA = cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<Shape::kM, Shape::kK>, ElementA, LayoutA, 1, IteratorPolicyA>;
using IteratorPolicyB = typename platform::conditional<
platform::is_same<LayoutB, layout::RowMajor>::value,
cutlass::transform::PitchLinearTilePolicyStripminedThreadContiguous<
layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreadsPerN, ThreadShape::kN>,
cutlass::transform::PitchLinearTilePolicyStripminedThreadStrided<
layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreadsPerN, ThreadShape::kK>>::type;
using IteratorB = cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<Shape::kK, Shape::kN>, ElementB, LayoutB, 0, IteratorPolicyB>;
using IteratorPolicyC = typename platform::conditional<
platform::is_same<LayoutC, layout::RowMajor>::value,
cutlass::transform::PitchLinearTilePolicyStripminedThreadContiguous<
layout::PitchLinearShape<Shape::kN, Shape::kM>, kThreadsPerN, ThreadShape::kN>,
cutlass::transform::PitchLinearTilePolicyStripminedThreadStrided<
layout::PitchLinearShape<Shape::kM, Shape::kN>, kThreadsPerN, ThreadShape::kM>>::type;
using IteratorC = cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<Shape::kM, Shape::kN>, ElementC, LayoutC, 0, IteratorPolicyC>;
using MmaSimtOp = typename cutlass::gemm::thread::Mma<
cutlass::gemm::GemmShape<ThreadShape::kM, ThreadShape::kN, Shape::kK>,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC>;
using Operator = MmaSimtOp;
// Assertions for correctness
static_assert((Shape::kM == 1), "M=1 is required for GEMV");
static_assert((ThreadShape::kM == 1), "M=1 is required for GEMV");
static_assert(Shape::kK % ThreadShape::kK == 0, "Shape::K must be a multiple of ThreadShape::K");
static_assert(((ThreadShape::kK == 1) ||
(ThreadShape::kK == 2) ||
(ThreadShape::kK == 4) ||
(ThreadShape::kK == 8) ||
(ThreadShape::kK == 16) ||
(ThreadShape::kK == 32)
),
"ThreadShape::K must be a 1, 2, 4, 8, 16 or 32");
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
| include/cutlass/gemm/threadblock/default_gemv_core.h/0 | {
"file_path": "include/cutlass/gemm/threadblock/default_gemv_core.h",
"repo_id": "include",
"token_count": 2552
} | 28 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a double-buffered threadblock-scoped GEMM kernel.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/arch/memory.h"
#include "cutlass/array.h"
#include "cutlass/array_planar_complex.h"
#include "cutlass/functional.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/threadblock/mma_planar_complex_base.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Iterates over tiles of A operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorA_,
/// Iterates over tiles of A operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorA_,
/// Cache operation for operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorB_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB_,
/// Cache operation for operand B
cutlass::arch::CacheOperation::Kind CacheOpB,
/// Data type of accumulator matrix
typename ElementC_,
/// Data type of accumulator matrix
typename LayoutC_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// Number of stages,
int Stages,
/// Transformation applied to A
ComplexTransform TransformA = ComplexTransform::kNone,
/// Transformation applied to B
ComplexTransform TransformB = ComplexTransform::kNone
>
class MmaPlanarComplexMultistage :
public MmaPlanarComplexBase<Shape_, Policy_, Stages> {
public:
///< Base class
using Base = MmaPlanarComplexBase<Shape_, Policy_, Stages>;
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
///< Iterates over tiles of A operand in global memory
using IteratorA = IteratorA_;
///< Iterates over tiles of B operand in global memory
using IteratorB = IteratorB_;
///< Data type of accumulator matrix
using ElementC = ElementC_;
///< Layout of accumulator matrix
using LayoutC = LayoutC_;
///< Policy describing tuning details
using Policy = Policy_;
///< Archtecture tag
using ArchTag = arch::Sm80;
using SmemIteratorA = SmemIteratorA_;
using SmemIteratorB = SmemIteratorB_;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB;
/// Transformation applied to A
static ComplexTransform const kTransformA = TransformA;
/// Transformation applied to B
static ComplexTransform const kTransformB = TransformB;
//
// Dependent types
//
/// Fragment of accumulator tile
using FragmentC = ArrayPlanarComplex<
typename Policy::Operator::FragmentC::Element,
Policy::Operator::FragmentC::kElements
>;
/// Warp-level Mma
using Operator = typename Policy::Operator;
/// Internal structure exposed for introspection.
struct Detail {
static_assert(Base::kWarpGemmIterations > 1,
"The pipelined structure requires at least two warp-level "
"GEMM operations.");
/// Number of cp.async instructions to load one stage of operand A
static int const TBLoadIterationsA =
IteratorA::ThreadMap::Iterations::kCount;
/// Number of cp.async instructions to load one stage of operand B
static int const TBLoadIterationsB =
IteratorB::ThreadMap::Iterations::kCount;
/// Number of stages
static int const kStages = Stages;
static int const kAccessesPerGroupA =
(TBLoadIterationsA + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations;
static int const kAccessesPerGroupB =
(TBLoadIterationsB + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations;
};
private:
using WarpFragmentA = typename Operator::FragmentA;
using WarpFragmentB = typename Operator::FragmentB;
private:
//
// Data members
//
/// Iterator to write threadblock-scoped tile of A operand to shared memory
SmemIteratorA smem_iterator_A_;
/// Iterator to write threadblock-scoped tile of B operand to shared memory
SmemIteratorB smem_iterator_B_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
MmaPlanarComplexMultistage(
///< Shared storage needed for internal use by threadblock-scoped GEMM
typename Base::SharedStorage &shared_storage,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx
):
Base(shared_storage, thread_idx, warp_idx, lane_idx),
smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx),
smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx)
{
// Compute warp location within threadblock tile by mapping the warp_id to
// three coordinates:
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_m = warp_idx_mn % Base::WarpCount::kM;
int warp_idx_n = warp_idx_mn / Base::WarpCount::kM;
// Add per-warp offsets in units of warp-level tiles
this->warp_tile_iterator_A_.add_tile_offset({warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
this->warp_tile_iterator_B_.add_tile_offset({Base::kWarpGemmIterations * warp_idx_k, warp_idx_n});
}
private:
CUTLASS_DEVICE
void copy_tiles_and_advance(
IteratorA &iterator_A_real,
IteratorA &iterator_A_imag,
IteratorB &iterator_B_real,
IteratorB &iterator_B_imag,
int group_start_A = 0,
int group_start_B = 0) {
iterator_A_real.set_iteration_index(group_start_A * IteratorA::kAccessesPerVector);
iterator_A_imag.set_iteration_index(group_start_A * IteratorA::kAccessesPerVector);
this->smem_iterator_A_.set_iteration_index(group_start_A);
// Load for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupA; ++j) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(this->smem_iterator_A_.get());
int const kSrcBytes =
sizeof_bits<typename IteratorA::Element>::value *
IteratorA::ThreadMap::kElementsPerAccess / IteratorA::kAccessesPerVector / 8;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) {
auto gmem_ptr_real = iterator_A_real.get();
auto gmem_ptr_imag = iterator_A_imag.get();
bool pred_guard = iterator_A_real.valid();
cutlass::arch::cp_async<kSrcBytes, kCacheOpA>(
dst_ptr + v,
gmem_ptr_real,
pred_guard);
cutlass::arch::cp_async<kSrcBytes, kCacheOpA>(
dst_ptr + v + (Base::SharedStorage::kImaginaryStrideA / IteratorA::ThreadMap::kElementsPerAccess),
reinterpret_cast<char const *>(gmem_ptr_imag),
pred_guard);
++iterator_A_real;
++iterator_A_imag;
}
++this->smem_iterator_A_;
}
iterator_B_real.set_iteration_index(group_start_B * IteratorB::kAccessesPerVector);
iterator_B_imag.set_iteration_index(group_start_B * IteratorB::kAccessesPerVector);
this->smem_iterator_B_.set_iteration_index(group_start_B);
// Load for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupB; ++j) {
typename IteratorB::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB::AccessType *>(this->smem_iterator_B_.get());
int const kSrcBytes =
sizeof_bits<typename IteratorB::Element>::value *
IteratorB::ThreadMap::kElementsPerAccess / IteratorB::kAccessesPerVector / 8;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) {
auto gmem_ptr_real = iterator_B_real.get();
auto gmem_ptr_imag = iterator_B_imag.get();
bool pred_guard = iterator_B_real.valid();
cutlass::arch::cp_async<kSrcBytes, kCacheOpB>(
dst_ptr + v,
gmem_ptr_real,
pred_guard);
cutlass::arch::cp_async<kSrcBytes, kCacheOpB>(
dst_ptr + v + (Base::SharedStorage::kImaginaryStrideB / IteratorB::ThreadMap::kElementsPerAccess),
reinterpret_cast<char const *>(gmem_ptr_imag),
pred_guard);
++iterator_B_real;
++iterator_B_imag;
}
++this->smem_iterator_B_;
}
}
CUTLASS_DEVICE
void warp_mma_planar_complex(
Operator & warp_mma,
FragmentC &accum,
WarpFragmentA const & real_A,
WarpFragmentA const & imag_A,
WarpFragmentB const & real_B,
WarpFragmentB const & imag_B) {
cutlass::negate<Array<typename WarpFragmentB::Element, WarpFragmentB::kElements>> neg_op_B;
WarpFragmentB neg_real_B = neg_op_B(real_B);
WarpFragmentB neg_imag_B = neg_op_B(imag_B);
warp_mma(accum.real, real_A, real_B, accum.real);
if (kTransformB == ComplexTransform::kNone) {
warp_mma(accum.imag, real_A, imag_B, accum.imag);
}
else {
warp_mma(accum.imag, real_A, neg_imag_B, accum.imag);
}
if (kTransformA == ComplexTransform::kNone) {
warp_mma(accum.imag, imag_A, real_B, accum.imag);
}
else {
warp_mma(accum.imag, imag_A, neg_real_B, accum.imag);
}
if (kTransformA == ComplexTransform::kNone ^ kTransformB == ComplexTransform::kNone) {
warp_mma(accum.real, imag_A, imag_B, accum.real);
}
else {
warp_mma(accum.real, imag_A, neg_imag_B, accum.real);
}
}
public:
/// Perform a threadblock-scoped matrix multiply-accumulate
CUTLASS_DEVICE
void operator()(
///< problem size of GEMM
int gemm_k_iterations,
///< destination accumulator tile
FragmentC &accum,
///< iterator over A operand in global memory
IteratorA iterator_A_real,
///< iterator over A operand in global memory
IteratorA iterator_A_imag,
///< iterator over B operand in global memory
IteratorB iterator_B_real,
///< iterator over B operand in global memory
IteratorB iterator_B_imag,
///< initial value of accumulator
FragmentC const &src_accum) {
//
// Prologue
//
// Issue several complete stages
CUTLASS_PRAGMA_UNROLL
for (int stage = 0; stage < Base::kStages - 1;
++stage, --gemm_k_iterations) {
iterator_A_real.clear_mask(gemm_k_iterations == 0);
iterator_A_imag.clear_mask(gemm_k_iterations == 0);
iterator_B_real.clear_mask(gemm_k_iterations == 0);
iterator_B_imag.clear_mask(gemm_k_iterations == 0);
iterator_A_real.set_iteration_index(0);
iterator_A_imag.set_iteration_index(0);
this->smem_iterator_A_.set_iteration_index(0);
// Load for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::TBLoadIterationsA; ++j) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(this->smem_iterator_A_.get());
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) {
int const kSrcBytes =
sizeof_bits<typename IteratorA::Element>::value *
IteratorA::ThreadMap::kElementsPerAccess / IteratorA::kAccessesPerVector / 8;
bool pred_guard = iterator_A_real.valid();
auto src_ptr_real = iterator_A_real.get();
auto src_ptr_imag = iterator_A_imag.get();
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>(
dst_ptr + v, src_ptr_real, pred_guard);
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>(
dst_ptr + v +
Base::SharedStorage::kImaginaryStrideA /
IteratorA::ThreadMap::kElementsPerAccess,
reinterpret_cast<char const *>(src_ptr_imag),
pred_guard);
++iterator_A_real;
++iterator_A_imag;
}
++this->smem_iterator_A_;
}
iterator_B_real.set_iteration_index(0);
iterator_B_imag.set_iteration_index(0);
this->smem_iterator_B_.set_iteration_index(0);
// Load for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::TBLoadIterationsB; ++j) {
typename IteratorB::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB::AccessType *>(this->smem_iterator_B_.get());
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) {
int const kSrcBytes =
sizeof_bits<typename IteratorB::Element>::value *
IteratorB::ThreadMap::kElementsPerAccess / IteratorB::kAccessesPerVector / 8;
bool pred_guard = iterator_B_real.valid();
auto src_ptr_real = iterator_B_real.get();
auto src_ptr_imag = iterator_B_imag.get();
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>(
dst_ptr + v, src_ptr_real, pred_guard);
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>(
dst_ptr + v +
Base::SharedStorage::kImaginaryStrideB /
IteratorB::ThreadMap::kElementsPerAccess,
reinterpret_cast<char const *>(src_ptr_imag),
pred_guard);
++iterator_B_real;
++iterator_B_imag;
}
++this->smem_iterator_B_;
}
// Move to the next stage
iterator_A_real.add_tile_offset({0, 1});
iterator_A_imag.add_tile_offset({0, 1});
iterator_B_real.add_tile_offset({1, 0});
iterator_B_imag.add_tile_offset({1, 0});
this->smem_iterator_A_.add_tile_offset({0, 1});
this->smem_iterator_B_.add_tile_offset({1, 0});
// Inserts a memory fence between stages of cp.async instructions
cutlass::arch::cp_async_fence();
}
// Perform accumulation in the 'd' output operand
accum = src_accum;
// Blocks until all but kStages-2 cp.async stages have committed.
cutlass::arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
// Pair of fragments used to overlap shared memory loads and math
// instructions
WarpFragmentA warp_frag_real_A[2];
WarpFragmentA warp_frag_imag_A[2];
WarpFragmentB warp_frag_real_B[2];
WarpFragmentB warp_frag_imag_B[2];
this->warp_tile_iterator_A_.set_kgroup_index(0);
this->warp_tile_iterator_B_.set_kgroup_index(0);
this->warp_tile_iterator_A_.load(warp_frag_real_A[0]);
this->warp_tile_iterator_A_.load_with_pointer_offset(warp_frag_imag_A[0], Base::SharedStorage::kImaginaryStrideA);
this->warp_tile_iterator_B_.load(warp_frag_real_B[0]);
this->warp_tile_iterator_B_.load_with_pointer_offset(warp_frag_imag_B[0], Base::SharedStorage::kImaginaryStrideB);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
iterator_A_real.clear_mask(gemm_k_iterations == 0);
iterator_A_imag.clear_mask(gemm_k_iterations == 0);
iterator_B_real.clear_mask(gemm_k_iterations == 0);
iterator_B_imag.clear_mask(gemm_k_iterations == 0);
// Start issuing the first group of the next stage outside of the mainloop
copy_tiles_and_advance(iterator_A_real, iterator_A_imag, iterator_B_real, iterator_B_imag);
Operator warp_mma;
int smem_write_stage_idx = Base::kStages - 1;
int smem_read_stage_idx = 0;
//
// Mainloop
//
CUTLASS_GEMM_LOOP
for (; gemm_k_iterations > (-Base::kStages + 1);) {
//
// Loop over GEMM K dimension
//
// Computes a warp-level GEMM on data held in shared memory
// Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations;
++warp_mma_k) {
// Load warp-level tiles from shared memory, wrapping to k offset if
// this is the last group as the case may be.
this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_A_.load(warp_frag_real_A[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_A_.load_with_pointer_offset(warp_frag_imag_A[(warp_mma_k + 1) % 2], Base::SharedStorage::kImaginaryStrideA);
this->warp_tile_iterator_B_.load(warp_frag_real_B[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_B_.load_with_pointer_offset(warp_frag_imag_B[(warp_mma_k + 1) % 2], Base::SharedStorage::kImaginaryStrideB);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
// Issue global->shared copies for the next stage
int group_start_iteration_A, group_start_iteration_B;
if (warp_mma_k + 1 == Base::kWarpGemmIterations) {
group_start_iteration_A = 0;
group_start_iteration_B = 0;
}
else {
group_start_iteration_A = (warp_mma_k + 1) * Detail::kAccessesPerGroupA;
group_start_iteration_B = (warp_mma_k + 1) * Detail::kAccessesPerGroupB;
}
copy_tiles_and_advance(
iterator_A_real,
iterator_A_imag,
iterator_B_real,
iterator_B_imag,
group_start_iteration_A,
group_start_iteration_B);
if (warp_mma_k + 2 == Base::kWarpGemmIterations) {
// Inserts a memory fence between stages of cp.async instructions
cutlass::arch::cp_async_fence();
// Blocks until all but kStages-2 cp.async stages have committed.
arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
// Move to the next stage
iterator_A_real.add_tile_offset({0, 1});
iterator_A_imag.add_tile_offset({0, 1});
iterator_B_real.add_tile_offset({1, 0});
iterator_B_imag.add_tile_offset({1, 0});
this->smem_iterator_A_.add_tile_offset({0, 1});
this->smem_iterator_B_.add_tile_offset({1, 0});
// Add negative offsets to return iterators to the 'start' of the
// circular buffer in shared memory
if (smem_write_stage_idx == (Base::kStages - 1)) {
this->smem_iterator_A_.add_tile_offset({0, -Base::kStages});
this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0});
smem_write_stage_idx = 0;
} else {
++smem_write_stage_idx;
}
if (smem_read_stage_idx == (Base::kStages - 1)) {
this->warp_tile_iterator_A_.add_tile_offset(
{0, -Base::kStages * Policy::kPartitionsK *
Base::kWarpGemmIterations});
this->warp_tile_iterator_B_.add_tile_offset(
{-Base::kStages * Policy::kPartitionsK *
Base::kWarpGemmIterations,
0});
smem_read_stage_idx = 0;
} else {
++smem_read_stage_idx;
}
--gemm_k_iterations;
iterator_A_real.clear_mask(gemm_k_iterations == 0);
iterator_A_imag.clear_mask(gemm_k_iterations == 0);
iterator_B_real.clear_mask(gemm_k_iterations == 0);
iterator_B_imag.clear_mask(gemm_k_iterations == 0);
}
warp_mma_planar_complex(
warp_mma,
accum,
warp_frag_real_A[warp_mma_k % 2],
warp_frag_imag_A[warp_mma_k % 2],
warp_frag_real_B[warp_mma_k % 2],
warp_frag_imag_B[warp_mma_k % 2]);
}
}
// Commit and drain all pending and predicated cp.async pnz from the GEMM mainloop
cutlass::arch::cp_async_fence();
cutlass::arch::cp_async_wait<0>();
__syncthreads();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/threadblock/mma_planar_complex_multistage.h/0 | {
"file_path": "include/cutlass/gemm/threadblock/mma_planar_complex_multistage.h",
"repo_id": "include",
"token_count": 9412
} | 29 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing warp-level matrix multiply-accumulate operations targeting
Tensor Cores.
This is a work in progress.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/arch/mma.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/mma.h"
#include "cutlass/gemm/warp/mma_tensor_op_policy.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm70.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename ElementA_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Data type of B elements
typename ElementB_,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Element type of C matrix
typename ElementC_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
typename Policy_,
/// Used for partial specialization
typename Enable = bool
>
class MmaVoltaTensorOp {
public:
/// Shape of warp-level matrix operation (concept: GemmShape)
using Shape = Shape_;
/// Data type of multiplicand A
using ElementA = ElementA_;
/// Layout of multiplicand A
using LayoutA = LayoutA_;
/// Data type of multiplicand B
using ElementB = ElementB_;
/// Layout of multiplicand B
using LayoutB = LayoutB_;
/// Data type of accumulator matrix C
using ElementC = ElementC_;
/// Layout of accumulator matrix C
using LayoutC = LayoutC_;
/// Shape of the warp in units of thread (concept: MmaLanePolicySimt)
using Policy = Policy_;
/// Indicates class of matrix operator
using OperatorClass = arch::OpClassTensorOp;
/// Architecture tag
using ArchTag = arch::Sm70;
/// Underlying matrix multiply operator (concept: arch::Mma)
using ArchMmaOperator = typename Policy::Operator;
/// Indicates math operator
using MathOperator = typename ArchMmaOperator::Operator;
/// Underlying instruction shape
using InstructionShape = typename ArchMmaOperator::Shape;
/// Complex transform on A operand
static ComplexTransform const kTransformA = ComplexTransform::kNone;
/// Complex transform on B operand
static ComplexTransform const kTransformB = ComplexTransform::kNone;
/// Number of threads participating in warp-level matrix product
static int const kThreadCount = 32;
/// interleaved 32x32 tiles
using InterleavedTileShape = GemmShape<32, 32, 4>;
static_assert(!(Shape::kM % InterleavedTileShape::kM) &&
!(Shape::kN % InterleavedTileShape::kN),
"Shape must be a multiple of InterleavedTileShape.");
public:
/// Iterates over the A operand in memory
using IteratorA = MmaVoltaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
Operand::kA,
ElementA,
LayoutA,
MatrixShape<
ArchMmaOperator::Shape::kM,
ArchMmaOperator::Shape::kK
>,
Policy::OpDelta::kRow,
kThreadCount
>;
/// Storage for A tile
using FragmentA = typename IteratorA::Fragment;
/// Iterates over the B operand in memory
using IteratorB = MmaVoltaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
Operand::kB,
ElementB,
LayoutB,
MatrixShape<
ArchMmaOperator::Shape::kK,
ArchMmaOperator::Shape::kN
>,
Policy::OpDelta::kRow,
kThreadCount
>;
/// Storage for B tile
using FragmentB = typename IteratorB::Fragment;
/// Iterates over the C operand in memory
using IteratorC = MmaVoltaTensorOpAccumulatorTileIterator<
MatrixShape<Shape::kM, Shape::kN>,
ElementC,
LayoutC,
typename ArchMmaOperator::Shape,
typename Policy::OpDelta
>;
/// Storage for C tile
using FragmentC = typename IteratorC::Fragment;
private:
static_assert(
!(Shape::kM % ArchMmaOperator::Shape::kM) &&
!(Shape::kN % ArchMmaOperator::Shape::kN),
"Shape of warp-level Mma must be divisible by operator shape.");
/// Number of mma operations performed
using MmaIterations = MatrixShape<
InterleavedTileShape::kM / ArchMmaOperator::Shape::kM,
InterleavedTileShape::kN / ArchMmaOperator::Shape::kN
>;
using TileIterations = MatrixShape<
Shape::kM / InterleavedTileShape::kM,
Shape::kN / InterleavedTileShape::kN
>;
// Whether matrix B is reordered
bool reorder_B_;
public:
/// Underlying matrix multiply operator (concept: arch::Mma)
ArchMmaOperator mma;
public:
//
// Methods
//
/// Ctor
CUTLASS_DEVICE
MmaVoltaTensorOp() {}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
FragmentA const &A,
FragmentB const &B,
FragmentC const &C) {
using MmaOperandA = typename ArchMmaOperator::FragmentA;
using MmaOperandB = typename ArchMmaOperator::FragmentB;
using MmaOperandC = typename ArchMmaOperator::FragmentC;
D = C;
MmaOperandA const *ptr_A = reinterpret_cast<MmaOperandA const *>(&A);
MmaOperandB const *ptr_B = reinterpret_cast<MmaOperandB const *>(&B);
MmaOperandC *ptr_D = reinterpret_cast<MmaOperandC *>(&D);
CUTLASS_PRAGMA_UNROLL
for (int outer_col = 0; outer_col < TileIterations::kColumn; ++outer_col) {
CUTLASS_PRAGMA_UNROLL
for (int inner_col = 0; inner_col < MmaIterations::kColumn; ++inner_col) {
CUTLASS_PRAGMA_UNROLL
for (int outer_row = 0; outer_row < TileIterations::kRow; ++outer_row) {
CUTLASS_PRAGMA_UNROLL
for (int inner_row = 0; inner_row < MmaIterations::kRow; ++inner_row) {
int op_col = inner_col + MmaIterations::kColumn * outer_col;
// Column-major serpentine sequence to maximize reuse of A operand.
int inner_row_serp = inner_row;
int outer_row_serp = outer_row;
if (op_col & 1) {
inner_row_serp = MmaIterations::kRow - inner_row - 1;
outer_row_serp = TileIterations::kRow - outer_row - 1;
}
int op_row = inner_row_serp + MmaIterations::kRow * outer_row_serp;
int op_idx = inner_row_serp + MmaIterations::kRow *
(inner_col + MmaIterations::kColumn *
(outer_row_serp + TileIterations::kRow * outer_col));
mma(
ptr_D[op_idx],
ptr_A[op_row],
ptr_B[op_col],
ptr_D[op_idx]);
}
}
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
| include/cutlass/gemm/warp/mma_tensor_op_sm70.h/0 | {
"file_path": "include/cutlass/gemm/warp/mma_tensor_op_sm70.h",
"repo_id": "include",
"token_count": 3138
} | 30 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Kernel performing a reduction over one or more ranks of an affine tensor
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/fast_math.h"
#include "cutlass/numeric_types.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/device_kernel.h"
#include "cutlass/reduction/thread/reduction_operators.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace reduction {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace kernel {
/// Parameters structure
template <
int Rank, ///< Rank of source tensor (e.g. NDHWC => 5)
int ReducedRank, ///< Rank of reduced tensor (includes contiguous, e.g. NC => 2)
typename ElementOutput, ///< Data type of output tensor
typename ElementSource, ///< Data type of source tensor
typename ReductionOp, ///< Reduction operator
int VectorLength = 1, ///< Vector length for memory
typename ElementCompute = ElementOutput, ///< Internal compute type - input type of reduction operation
int Threads = 256, ///< Number of participating threads
int BatchSize = 4 ///< Number of elements to load per batch
>
struct TensorReductionAffineStridedParams {
static int const kRank = Rank;
static int const kReducedRank = ReducedRank;
static int const kVectorLength = VectorLength;
static int const kInnerRank = kRank - kReducedRank;
static int const kThreads = Threads;
static int const kBatchSize = BatchSize;
Coord<kRank> extent; /// Extent of source tensor
FastDivmodU64 divmod[kRank - 1]; /// FastDivmod by each strided rank
int64_t dst_stride[kReducedRank - 1]; /// stride (units of bytes) - I, J
int64_t src_stride[kRank - 1]; /// stride (units of bytes) - I, J, K
int64_t workspace_stride; /// stride (units of bytes) between workspace
int64_t workspace_outer_stride; /// stride (units of bytes) between 'rows' of the workspace
int workspace_count; /// number of workspaces
uint64_t inner_count; /// Number of elements in reduced index space
uint64_t outer_count; /// Number of elements in outer index space
ElementOutput * destination; /// Pointer to output tensor of rank kReducedRank
ElementSource const * source; /// Pointer to source pointer of rank kRank
ReductionOp reduction_op; /// Reduction operator
ElementCompute reduction_identity; /// Identity element for reduction operator
ElementCompute *device_workspace; /// Pointer to device workspace for inter-CTA reductions
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
TensorReductionAffineStridedParams() {
}
/// Ctor
TensorReductionAffineStridedParams(
Coord<kRank> extent_, ///< Extent of source tensor
ElementOutput * dst_ptr_, ///< Output tensor data
int64_t dst_stride_[], ///< Stride (units of elements)
ElementSource const * src_ptr_, ///< Source tensor data
int64_t src_stride_[], ///< Stride (units of elements)
ElementCompute *device_workspace_, ///< Pointer to device workspace for inter-CTA reductions
int64_t workspace_stride_, ///< Stride between workspaces
int workspace_count_, ///< Number of workspaces
ReductionOp reduction_op_, ///< Reduction operator
ElementCompute reduction_identity_ = ElementCompute() ///< Identity element for reduction operator
):
extent(extent_),
inner_count(1),
outer_count(1),
destination(dst_ptr_),
source(src_ptr_),
device_workspace(device_workspace_),
workspace_outer_stride(0),
workspace_stride(workspace_stride_),
workspace_count(workspace_count_),
reduction_op(reduction_op_),
reduction_identity(reduction_identity_) {
// Initialize divisors for fast div-mod
for (int p = 1; p < kRank; ++p) {
divmod[p - 1] = FastDivmodU64(uint64_t(extent[p]));
}
int input_size_bits = sizeof_bits<ElementSource>::value;
int output_size_bits = sizeof_bits<ElementOutput>::value;
workspace_outer_stride = workspace_stride * workspace_count;
// Compute strides in units of bytes
for (int p = 0; p < kReducedRank - 1; ++p) {
dst_stride[p] = dst_stride_[p] * output_size_bits / 8;
}
for (int p = 0; p < kRank - 1; ++p) {
src_stride[p] = src_stride_[p] * input_size_bits / 8;
}
// Compute number of elements in strided ranks
for (int p = 0; p < kReducedRank - 1; ++p) {
outer_count *= uint64_t(extent[p]);
}
for (int p = 0; p < kInnerRank; ++p) {
inner_count *= uint64_t(extent[kReducedRank + p - 1]);
}
}
};
/// Kernel to reduce a tensor with affine layout over a set of ranks *EXCLUDING* the contiguous
/// rank. This leads to favorable vectorized memory accesses over the contiguous rank.
template <
int Rank, ///< Rank of source tensor (e.g. NDHWC => 5)
int ReducedRank, ///< Rank of reduced tensor (includes contiguous, e.g. NC => 2)
typename ElementOutput, ///< Data type of output tensor
typename ElementSource, ///< Data type of source tensor
typename ReductionOp, ///< Reduction operator
int VectorLength = 1, ///< Vector length for memory
typename ElementCompute = ElementOutput, ///< Internal compute type - input type of reduction operation
int Threads = 256, ///< Number of participating threads
int BatchSize = 4 ///< Number of elements to load per batch
>
class TensorReductionAffineStrided {
public:
static int const kRank = Rank;
static int const kReducedRank = ReducedRank;
static int const kVectorLength = VectorLength;
static int const kInnerRank = kRank - kReducedRank;
static int const kThreads = Threads;
static int const kBatchSize = BatchSize;
using ComputeFragment = Array<ElementCompute, VectorLength>;
using SourceFragment = AlignedArray<ElementSource, VectorLength>;
using OutputFragment = AlignedArray<ElementOutput, VectorLength>;
/// Shared memory allocation used for reduction within the CTA
struct SharedStorage {
Array<ElementCompute, kThreads * kVectorLength> workspace;
};
/// Parameters structure
using Params = TensorReductionAffineStridedParams<
Rank,
ReducedRank,
ElementOutput,
ElementSource,
ReductionOp,
VectorLength,
ElementCompute,
Threads,
BatchSize
>;
private:
/// Computes the coordinate and offset of a given linear index
CUTLASS_DEVICE
void compute_inner_coord_and_offset_(
Params const ¶ms,
Coord<kInnerRank> & coord,
int64_t &src_offset,
uint64_t linear_idx) const {
// Decompose into coordinate
coord = CoordinateDecomposition<kInnerRank>(linear_idx, ¶ms.divmod[kReducedRank - 1]);
// Compute linear offset
src_offset = 0;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kInnerRank; ++i) {
src_offset += params.src_stride[kReducedRank + i - 1] * coord[i];
}
}
/// Computes the coordinate and offset of a given linear index
CUTLASS_DEVICE
void compute_outer_coord_and_offset_(
Params const ¶ms,
Coord<kReducedRank - 1> & coord,
int64_t &dst_offset,
int64_t &src_offset,
uint64_t linear_idx) const {
// Decompose linear coordinate
coord = CoordinateDecomposition<kReducedRank - 1>(linear_idx, params.divmod);
// Compute offset into tensors
dst_offset = 0;
src_offset = 0;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kReducedRank - 1; ++i) {
dst_offset += params.dst_stride[i] * coord[i];
src_offset += params.src_stride[i] * coord[i];
}
}
/// Reduces over the reduction indices
CUTLASS_DEVICE
ComputeFragment reduce_indices_(
Params const ¶ms,
ElementCompute *threadblock_workspace,
char const *src_byte_ptr) {
NumericArrayConverter<ElementCompute, ElementSource, VectorLength> convert_source;
ReductionOp reduction_op(params.reduction_op);
// Accumulated output
ComputeFragment identity_frag;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < int(identity_frag.size()); ++i) {
identity_frag[i] = params.reduction_identity;
}
if (!params.inner_count) {
return identity_frag;
}
ComputeFragment accumulator = identity_frag;
// Compute the coordinate of the first access
int64_t src_byte_offset = 0;
Coord<kInnerRank> coord;
uint64_t linear_idx = threadIdx.z + blockIdx.z * blockDim.z;
compute_inner_coord_and_offset_(params, coord, src_byte_offset, linear_idx);
// Load the first vector
SourceFragment source_fragment[kBatchSize];
bool not_done = true;
// Iterate over vectors in a linearized reduction index space
while (not_done) {
bool guards[kBatchSize];
// Issue a batch of loads
CUTLASS_PRAGMA_UNROLL
for (int b = 0; b < kBatchSize; ++b) {
if (linear_idx < params.inner_count) {
source_fragment[b] = *reinterpret_cast<SourceFragment const *>(src_byte_ptr + src_byte_offset);
guards[b] = true;
}
else {
guards[b] = false;
not_done = false;
}
linear_idx += blockDim.z * gridDim.z;
compute_inner_coord_and_offset_(params, coord, src_byte_offset, linear_idx);
}
// Perform a batch of reduction operations
CUTLASS_PRAGMA_UNROLL
for (int b = 0; b < kBatchSize; ++b) {
if (guards[b]) {
auto cvt = convert_source(source_fragment[b]);
accumulator = cutlass::reduction::thread::detail::ApplyArrayOperator(
reduction_op,
accumulator,
cvt);
}
}
};
// Optional reduction within a CTA
if (blockDim.z > 1) {
// Linearized thread ID
int thread_idx = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z);
// all threads store to workspace
ComputeFragment *frag_ptr = reinterpret_cast<ComputeFragment *>(threadblock_workspace);
frag_ptr[thread_idx] = accumulator;
__syncthreads();
if (threadIdx.z == 0) {
// Load all additional block indices
for (int z = 1; z < blockDim.z; ++z) {
ComputeFragment frag = frag_ptr[thread_idx + z * blockDim.x * blockDim.y];
accumulator = cutlass::reduction::thread::detail::ApplyArrayOperator(
reduction_op,
accumulator,
frag);
}
}
__syncthreads();
}
return accumulator;
}
public:
/// Perform a reduction
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
int coord_c = (blockIdx.x * blockDim.x + threadIdx.x) * kVectorLength;
char const * src_byte_ptr = reinterpret_cast<char const *>(params.source + coord_c);
char * dst_byte_ptr = nullptr;
// If performing a reduction across CTAs, redirect output to device workspace
if (gridDim.z == 1) {
dst_byte_ptr = reinterpret_cast<char *>(params.destination + coord_c);
}
else {
dst_byte_ptr = reinterpret_cast<char *>(params.device_workspace + coord_c);
}
// If the C index is out of bounds, exit
if (coord_c >= params.extent[kRank - 1]) {
return;
}
int64_t idx_linear = blockIdx.y * blockDim.y + threadIdx.y;
// Use modulo division to compute location
Coord<kReducedRank - 1> outer_coord;
int64_t dst_byte_offset;
int64_t src_byte_offset;
compute_outer_coord_and_offset_(
params,
outer_coord,
dst_byte_offset,
src_byte_offset,
idx_linear);
if (gridDim.z == 1) {
/// Complete the reduction with no workspace
while (idx_linear < params.outer_count) {
ComputeFragment result;
result = reduce_indices_(
params,
shared_storage.workspace.data(),
src_byte_ptr + src_byte_offset);
// Store the result after possible final reduction within the CTA
if (threadIdx.z == 0) {
// Convert to output type and store
NumericArrayConverter<ElementOutput, ElementCompute, VectorLength> convert_output;
auto cvt = convert_output(result);
*reinterpret_cast<OutputFragment *>(dst_byte_ptr + dst_byte_offset) =
reinterpret_cast<OutputFragment const &>(cvt);
}
// Update indices and pointers
idx_linear += gridDim.y * blockDim.y;
compute_outer_coord_and_offset_(
params,
outer_coord,
dst_byte_offset,
src_byte_offset,
idx_linear);
} // while
}
else {
/// Complete the reduction with a device workspace
while (idx_linear < params.outer_count) {
ComputeFragment result;
result = reduce_indices_(
params,
shared_storage.workspace.data(),
src_byte_ptr + src_byte_offset);
// Store the result after possible final reduction within the CTA
if (threadIdx.z == 0) {
int64_t byte_offset =
blockIdx.z * params.workspace_stride + idx_linear * params.workspace_outer_stride;
// No conversion - store in compute type
*reinterpret_cast<ComputeFragment *>(dst_byte_ptr + byte_offset) =
reinterpret_cast<ComputeFragment const &>(result);
}
// Update indices and pointers
idx_linear += gridDim.y * blockDim.y;
compute_outer_coord_and_offset_(
params,
outer_coord,
dst_byte_offset,
src_byte_offset,
idx_linear);
} // while (outer index)
} // if ()
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Kernel to perform final reduction
template <
int Rank, ///< Rank of source tensor (e.g. NDHWC => 5)
int ReducedRank, ///< Rank of reduced tensor (includes contiguous, e.g. NC => 2)
typename ElementOutput, ///< Data type of output tensor
typename ElementSource, ///< Data type of source tensor
typename ReductionOp, ///< Reduction operator
int VectorLength = 1, ///< Vector length for memory
typename ElementCompute = ElementOutput, ///< Internal compute type - input type of reduction operation
int Threads = 256, ///< Number of participating threads
int BatchSize = 4 ///< Number of elements to load per batch
>
class TensorReductionAffineStridedFinal {
public:
static int const kRank = Rank;
static int const kReducedRank = ReducedRank;
static int const kVectorLength = VectorLength;
static int const kInnerRank = kRank - kReducedRank;
static int const kThreads = Threads;
static int const kBatchSize = BatchSize;
using ComputeFragment = Array<ElementCompute, VectorLength>;
using SourceFragment = AlignedArray<ElementSource, VectorLength>;
using OutputFragment = AlignedArray<ElementOutput, VectorLength>;
/// Shared memory
struct SharedStorage { };
/// Parameters structure
using Params = TensorReductionAffineStridedParams<
Rank,
ReducedRank,
ElementOutput,
ElementSource,
ReductionOp,
VectorLength,
ElementCompute,
Threads,
BatchSize
>;
private:
/// Computes the coordinate and offset of a given linear index
CUTLASS_DEVICE
void compute_outer_coord_and_offset_(
Params const ¶ms,
Coord<kReducedRank - 1> & coord,
int64_t &dst_offset,
uint64_t linear_idx) const {
// Decompose linear index
coord = CoordinateDecomposition<kReducedRank - 1>(linear_idx, params.divmod);
// Compute tensor offset
dst_offset = 0;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kReducedRank - 1; ++i) {
dst_offset += params.dst_stride[i] * coord[i];
}
}
/// Reduces over the reduction indices
CUTLASS_DEVICE
ComputeFragment reduce_indices_(
Params const ¶ms,
char *src_byte_ptr) {
ReductionOp reduction_op(params.reduction_op);
// Accumulated output
ComputeFragment identity_frag;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < int(identity_frag.size()); ++i) {
identity_frag[i] = params.reduction_identity;
}
ComputeFragment accumulator = identity_frag;
ComputeFragment workspace_fragments[kBatchSize];
// Partially unrolled loop
for (int idx = 0; idx < params.workspace_count; idx += kBatchSize) {
// Issue a batch of loads
CUTLASS_PRAGMA_UNROLL
for (int b = 0; b < kBatchSize; ++b) {
if (idx + b < params.workspace_count) {
workspace_fragments[b] =
*reinterpret_cast<ComputeFragment *>(src_byte_ptr);
}
else {
workspace_fragments[b] = identity_frag;
}
src_byte_ptr += + params.workspace_stride;
}
// Perform a reduction
CUTLASS_PRAGMA_UNROLL
for (int b = 0; b < kBatchSize; ++b) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kVectorLength; ++i) {
accumulator[i] = reduction_op(accumulator[i], workspace_fragments[b][i]);
}
}
}
return accumulator;
}
public:
//
// Methods
//
/// Perform a reduction
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
int coord_c = (blockIdx.x * blockDim.x + threadIdx.x) * kVectorLength;
char * src_byte_ptr = reinterpret_cast<char *>(params.device_workspace + coord_c);
char * dst_byte_ptr = reinterpret_cast<char *>(params.destination + coord_c);
// If the C index is out of bounds, exit
if (coord_c >= params.extent[kRank - 1]) {
return;
}
int64_t idx_linear = blockIdx.y * blockDim.y + threadIdx.y;
// Use modulo division to compute location
Coord<kReducedRank - 1> outer_coord;
int64_t dst_byte_offset;
compute_outer_coord_and_offset_(
params,
outer_coord,
dst_byte_offset,
idx_linear);
/// Complete the reduction
while (idx_linear < params.outer_count) {
int64_t src_byte_offset = idx_linear * params.workspace_outer_stride;
ComputeFragment result = reduce_indices_(
params,
src_byte_ptr + src_byte_offset);
// Convert to output type and store
NumericArrayConverter<ElementOutput, ElementCompute, VectorLength> convert_output;
auto cvt = convert_output(result);
*reinterpret_cast<OutputFragment *>(dst_byte_ptr + dst_byte_offset) =
reinterpret_cast<OutputFragment const &>(cvt);
// Update indices and pointers
idx_linear += gridDim.y * blockDim.y;
compute_outer_coord_and_offset_(
params,
outer_coord,
dst_byte_offset,
idx_linear);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace reduction
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/reduction/kernel/tensor_reduce_affine_strided.h/0 | {
"file_path": "include/cutlass/reduction/kernel/tensor_reduce_affine_strided.h",
"repo_id": "include",
"token_count": 8525
} | 31 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing how threads are mapped to a given tile.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
////////////////////////////////////////////////////////////////////////////////
/// Strip-mines a pitch-linear tile among a given number of threads, first along
/// the contiguous dimension then along the strided dimension.
///
/// The tile must be divisible by the thread count such that all threads may
/// execute the same number of iterations with the same delta to exhaustively
/// cover the tile.
///
/// This class satisfies the "RegularThreadMapping" concept.
///
/// This ThreadMap is used by SIMT kernels and operand E of the sparse tensor
/// kernels.
template <
typename Shape_,
int Threads,
int ElementsPerAccess = 1
>
struct PitchLinearStripminedThreadMap {
/// Tensor coordinate
using TensorCoord = layout::PitchLinearCoord;
/// Tile shape
using Shape = Shape_;
/// Number of threads total
static int const kThreads = Threads;
/// Extract vector length from Layout
static int const kElementsPerAccess = ElementsPerAccess;
/// Shape of access by each thread
using ThreadAccessShape = layout::PitchLinearShape<kElementsPerAccess, 1>;
/// Internal implementation details
struct Detail {
static_assert(!(Shape::kContiguous % kElementsPerAccess), "");
/// Shape of the tile in units of vectors
using ShapeVec = layout::PitchLinearShape<
Shape::kContiguous / kElementsPerAccess,
Shape::kStrided
>;
static_assert((Threads < ShapeVec::kContiguous && !(ShapeVec::kContiguous % kThreads)) ||
(!(kThreads % ShapeVec::kContiguous)),
"Shape must be divisible by number of iterations of each thread.");
};
/// Number of iterations by each thread
using Iterations = typename platform::conditional<
Threads >= Detail::ShapeVec::kContiguous,
layout::PitchLinearShape<
1,
// Redo the comparison here to work around divide by zero compiler
// error. The compiler evaluates both path of platform::conditional.
(Threads >= Detail::ShapeVec::kContiguous
? (Detail::ShapeVec::kStrided + (kThreads / Detail::ShapeVec::kContiguous - 1)) /
(kThreads / Detail::ShapeVec::kContiguous)
: 0)>,
layout::PitchLinearShape<Detail::ShapeVec::kContiguous / kThreads,
Detail::ShapeVec::kStrided>>::type;
/// Interval between accesses along each dimension of the tensor's logical coordinate space
/// (in units of Elements)
using Delta = typename platform::conditional<
Threads >= Detail::ShapeVec::kContiguous,
layout::PitchLinearShape<
1,
kThreads / Detail::ShapeVec::kContiguous
>,
layout::PitchLinearShape<
kThreads * kElementsPerAccess,
1
>
>::type;
/// Shape of the tile in units of vectors
using StorageShape = typename platform::conditional<
Threads >= Detail::ShapeVec::kContiguous,
layout::PitchLinearShape<Shape::kContiguous,
Iterations::kStrided*(kThreads / Detail::ShapeVec::kContiguous)>,
layout::PitchLinearShape<Shape::kContiguous, Shape::kStrided>>::type;
/// Maps thread ID to a coordinate offset within the tensor's logical coordinate space
/// (in units of Elements)
CUTLASS_HOST_DEVICE
static TensorCoord initial_offset(int thread_id) {
return TensorCoord(
(thread_id % Detail::ShapeVec::kContiguous) * kElementsPerAccess,
thread_id / Detail::ShapeVec::kContiguous);
}
};
/// This ThreadMap is used by GEMV
template <
typename Shape,
int Threads,
int ElementsPerAccess = 1
>
struct PitchLinearTilePolicyStripminedThreadContiguous
{
static_assert((Shape::kContiguous % (Threads * ElementsPerAccess)) == 0,
"Contiguous shape must divide number of threads");
using TensorCoord = layout::PitchLinearCoord;
static int const kThreads = Threads;
static int const kElementsPerAccess = ElementsPerAccess;
using Iterations = layout::PitchLinearShape<
Shape::kContiguous / (kThreads * kElementsPerAccess),
Shape::kStrided>;
using Delta = layout::PitchLinearShape<1, 1>;
CUTLASS_HOST_DEVICE
static TensorCoord initial_offset(int thread_id)
{
return TensorCoord(thread_id * Iterations::kContiguous * kElementsPerAccess, 0);
}
};
template <
typename Shape,
int Threads,
int ElementsPerAccess = 1
>
struct PitchLinearTilePolicyStripminedThreadStrided
{
static_assert((Shape::kStrided % Threads == 0),
"Strided shape must divide number of threads");
using TensorCoord = layout::PitchLinearCoord;
static int const kThreads = Threads;
static int const kElementsPerAccess = ElementsPerAccess;
using Iterations = layout::PitchLinearShape<
Shape::kContiguous / kElementsPerAccess,
Shape::kStrided / kThreads>;
using Delta = layout::PitchLinearShape<1, 1>;
using ShapeVec = Shape;
CUTLASS_HOST_DEVICE
static TensorCoord initial_offset(int thread_id)
{
return TensorCoord(0, thread_id * Iterations::kStrided);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Policy defining a warp-raked arrangement in which a shape is partitioned into contiguous
/// elements.
///
/// This ThreadMap is used by tensor core kernels.
template <
typename Shape_,
int Threads,
typename WarpThreadArrangement_,
int ElementsPerAccess = 1
>
struct PitchLinearWarpRakedThreadMap {
/// Tensor coordinate
using TensorCoord = layout::PitchLinearCoord;
/// Tile shape
using Shape = Shape_;
/// Number of threads total
static int const kThreads = Threads;
/// Extract vector length from Layout
static int const kElementsPerAccess = ElementsPerAccess;
/// Shape of access by each thread
using ThreadAccessShape = layout::PitchLinearShape<kElementsPerAccess, 1>;
/// Internal details made public to facilitate introspection
struct Detail {
/// Fixed arrangement of threads within a warp (units of threads).
using WarpThreadArrangement = WarpThreadArrangement_;
/// Number of threads per warp
static int const kWarpSize = WarpThreadArrangement::kCount;
/// Number of participating warps
static int const kWarpCount = kThreads / kWarpSize;
static_assert(
!(Shape::kContiguous % kElementsPerAccess),
"Shape must be divisible by vector length.");
/// Compute the 'shape' of the overall tile in units of vectors
using ShapeInAccesses = layout::PitchLinearShape<
Shape::kContiguous / kElementsPerAccess,
Shape::kStrided
>;
static_assert(
!(ShapeInAccesses::kContiguous % WarpThreadArrangement::kContiguous),
"ShapeInAccesses must be divisible by WarpThreadArrangement.");
static_assert(
!(ShapeInAccesses::kStrided % WarpThreadArrangement::kStrided),
"ShapeInAccesses must be divisible by WarpThreadArrangement.");
// compute number of warp-level accesses total
using WarpAccessIterations = layout::PitchLinearShape<
ShapeInAccesses::kContiguous / WarpThreadArrangement::kContiguous,
ShapeInAccesses::kStrided / WarpThreadArrangement::kStrided
>;
// Divide it into the number of warps, first partitioning the strided dimension then the
// contiguous.
static int const kWarpsStrided =
(WarpAccessIterations::kStrided >= kWarpCount
? kWarpCount
: WarpAccessIterations::kStrided);
static int const kWarpsContiguous =
(kWarpCount > WarpAccessIterations::kStrided
? kWarpCount / kWarpsStrided
: 1);
/// Arrangement of warps within a threadblock-scoped tile
using WarpArrangement = layout::PitchLinearShape<
kWarpsContiguous, kWarpsStrided
>;
};
///< Iterations along each dimension (concept: PitchLinearShape)
using Iterations = layout::PitchLinearShape<
Detail::WarpAccessIterations::kContiguous / Detail::kWarpsContiguous,
Detail::WarpAccessIterations::kStrided / Detail::kWarpsStrided
>;
static_assert(Iterations::kCount,
"Number of iterations must be non-zero");
///< Delta betweeen accesses (units of elements, concept: PitchLinearShape)
using Delta = layout::PitchLinearShape<
Detail::WarpThreadArrangement::kContiguous * kElementsPerAccess,
Detail::WarpThreadArrangement::kStrided
>;
/// Maps thread ID to a coordinate offset within the tensor's logical coordinate space
CUTLASS_HOST_DEVICE
static TensorCoord initial_offset(int thread_id) {
int warp_id = (thread_id / Detail::kWarpSize);
int lane_id = (thread_id % Detail::kWarpSize);
//
// compute warp-level offset
//
// This is the shape of the entire area covered by a warp's memory access (in units of vectors)
layout::PitchLinearCoord warp_footprint{
Detail::WarpThreadArrangement::kContiguous * Iterations::kContiguous,
Detail::WarpThreadArrangement::kStrided * Iterations::kStrided
};
// This is the offset of a specific warp (in units of vectors)
layout::PitchLinearCoord warp_offset{
(warp_id % Detail::kWarpsContiguous),
(warp_id / Detail::kWarpsContiguous)
};
// This is the offset of a specific thread within a warp (units of vectors)
layout::PitchLinearCoord thread_offset_in_warp{
lane_id % Detail::WarpThreadArrangement::kContiguous,
lane_id / Detail::WarpThreadArrangement::kContiguous
};
// This is the offset of a thread within a threadblock tile (units of vectors)
layout::PitchLinearCoord thread_offset_in_threadblock_tile_vec =
warp_footprint * warp_offset + thread_offset_in_warp;
// This is the offset of a thread within a threadblock tile (units of elements)
layout::PitchLinearCoord thread_offset_in_threadblock_tile_base{
thread_offset_in_threadblock_tile_vec.contiguous() * kElementsPerAccess,
thread_offset_in_threadblock_tile_vec.strided()
};
return thread_offset_in_threadblock_tile_base;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Policy defining a warp-raked arrangement in which a shape is partitioned into contiguous
/// elements. Warps are arranged based on a stride.
///
/// This ThreadMap is used by tensor core kernels for NCxHWx layout.
template <
typename Shape_,
int Threads,
typename WarpThreadArrangement_,
int ElementsPerAccess = 1
>
struct PitchLinearStridedWarpRakedThreadMap {
/// Tensor coordinate
using TensorCoord = layout::PitchLinearCoord;
/// Tile shape
using Shape = Shape_;
/// Number of threads total
static int const kThreads = Threads;
using WarpThreadArrangement = WarpThreadArrangement_;
/// Extract vector length from Layout
static int const kElementsPerAccess = ElementsPerAccess;
/// Base ThreadMap
using BaseThreadMap = PitchLinearWarpRakedThreadMap<
Shape,
kThreads,
WarpThreadArrangement,
kElementsPerAccess
>;
/// Shape of access by each thread
using ThreadAccessShape = typename BaseThreadMap::ThreadAccessShape;
struct Detail {
using WarpThreadArrangement = WarpThreadArrangement_;
using WarpAccessIterations = typename BaseThreadMap::Detail::WarpAccessIterations;
static int const kWarpSize = BaseThreadMap::Detail::kWarpSize;
static int const kWarpCount = BaseThreadMap::Detail::kWarpCount;
using ShapeInAccesses = typename BaseThreadMap::Detail::ShapeInAccesses;
// Divide it into the number of warps, first partitioning the contiguous dimension then the
// stride.
static int const kWarpsContiguous =
(WarpAccessIterations::kContiguous >= kWarpCount
? kWarpCount
: WarpAccessIterations::kContiguous);
static int const kWarpsStrided =
(kWarpCount > WarpAccessIterations::kContiguous
? kWarpCount / kWarpsContiguous
: 1);
/// Arrangement of warps within a threadblock-scoped tile
using WarpArrangement = layout::PitchLinearShape<
kWarpsContiguous, kWarpsStrided
>;
};
///< Iterations along each dimension (concept: PitchLinearShape)
using Iterations = layout::PitchLinearShape<
Detail::WarpAccessIterations::kContiguous / Detail::kWarpsContiguous,
Detail::WarpAccessIterations::kStrided / Detail::kWarpsStrided
>;
static_assert(Iterations::kCount,
"Number of iterations must be non-zero");
///< Delta betweeen accesses (units of elements, concept: PitchLinearShape)
using Delta = typename BaseThreadMap::Delta;
/// Maps thread ID to a coordinate offset within the tensor's logical coordinate space
CUTLASS_HOST_DEVICE
static TensorCoord initial_offset(int thread_id) {
int warp_id = (thread_id / Detail::kWarpSize);
int lane_id = (thread_id % Detail::kWarpSize);
//
// compute warp-level offset
//
// This is the shape of the entire area covered by a warp's memory access (in units of vectors)
layout::PitchLinearCoord warp_footprint{
Detail::WarpThreadArrangement::kContiguous * Iterations::kContiguous,
Detail::WarpThreadArrangement::kStrided * Iterations::kStrided
};
// This is the offset of a specific warp (in units of vectors)
layout::PitchLinearCoord warp_offset{
(warp_id % Detail::kWarpsContiguous),
(warp_id / Detail::kWarpsContiguous)
};
// This is the offset of a specific thread within a warp (units of vectors)
layout::PitchLinearCoord thread_offset_in_warp{
lane_id % Detail::WarpThreadArrangement::kContiguous,
lane_id / Detail::WarpThreadArrangement::kContiguous
};
// This is the offset of a thread within a threadblock tile (units of vectors)
layout::PitchLinearCoord thread_offset_in_threadblock_tile_vec =
warp_footprint * warp_offset + thread_offset_in_warp;
// This is the offset of a thread within a threadblock tile (units of elements)
layout::PitchLinearCoord thread_offset_in_threadblock_tile_base{
thread_offset_in_threadblock_tile_vec.contiguous() * kElementsPerAccess,
thread_offset_in_threadblock_tile_vec.strided()
};
return thread_offset_in_threadblock_tile_base;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Transpose the existing ThreadMap. For example, interleaved layout is like
/// congruous in the global memory and crosswise in the shared memory. We need
/// to transpose the coordinates between two.
template <typename ThreadMap_, typename WarpThreadArrangement_>
struct TransposePitchLinearThreadMap {
/// Underlying ThreadMap
using ThreadMap = ThreadMap_;
/// Tensor coordinate
using TensorCoord = typename ThreadMap::TensorCoord;
/// Tile shape
using Shape = typename ThreadMap::Shape;
/// Number of threads total
static int const kThreads = ThreadMap::kThreads;
/// Extract vector length from Layout
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
/// Shape of access by each thread
using ThreadAccessShape = layout::PitchLinearShape<kElementsPerAccess, 1>;
/// Internal details made public to facilitate introspection
struct Detail {
/// Fixed arrangement of threads within a warp (units of threads).
using WarpThreadArrangement = WarpThreadArrangement_;
/// Number of threads per warp
static int const kWarpSize = WarpThreadArrangement::kCount;
/// Number of participating warps
static int const kWarpCount = kThreads / kWarpSize;
static_assert(!(Shape::kContiguous % kElementsPerAccess),
"Shape must be divisible by vector length.");
/// Arrangement of warps within a threadblock-scoped tile
using WarpArrangement =
layout::PitchLinearShape<ThreadMap::Detail::kWarpsStrided,
ThreadMap::Detail::kWarpsContiguous>;
};
///< Iterations along each dimension (concept: PitchLinearShape)
using Iterations =
layout::PitchLinearShape<ThreadMap::Iterations::kStrided,
ThreadMap::Iterations::kContiguous>;
static_assert(Iterations::kContiguous == 1,
"Contiguous iteration has to be one to reuse the same shared store function with those that don't need transpose");
static_assert(Iterations::kCount, "Number of iterations must be non-zero");
///< Delta betweeen accesses (units of elements, concept: PitchLinearShape)
using Delta =
layout::PitchLinearShape<Detail::WarpThreadArrangement::kContiguous *
kElementsPerAccess,
Detail::WarpThreadArrangement::kStrided>;
/// Maps thread ID to a coordinate offset within the tensor's logical
/// coordinate space Note this is slightly different from the one of
/// PitchLinearWarpRakedThreadMap.
CUTLASS_HOST_DEVICE
static TensorCoord initial_offset(int thread_id) {
int warp_id = (thread_id / Detail::kWarpSize);
int lane_id = (thread_id % Detail::kWarpSize);
//
// compute warp-level offset
//
// This is the shape of the entire area covered by a warp's memory access
// (in units of vectors)
layout::PitchLinearCoord warp_footprint{
Detail::WarpThreadArrangement::kContiguous * Iterations::kContiguous,
Detail::WarpThreadArrangement::kStrided * Iterations::kStrided};
// This is the offset of a specific warp (in units of vectors)
// Note the order of / and %. Also the 2nd operand is kStrided.
layout::PitchLinearCoord warp_offset{
(warp_id / Detail::WarpArrangement::kStrided),
(warp_id % Detail::WarpArrangement::kStrided)};
// This is the offset of a specific thread within a warp (units of vectors)
layout::PitchLinearCoord thread_offset_in_warp{
lane_id % Detail::WarpThreadArrangement::kContiguous,
lane_id / Detail::WarpThreadArrangement::kContiguous};
// This is the offset of a thread within a threadblock tile (units of
// vectors)
layout::PitchLinearCoord thread_offset_in_threadblock_tile_vec =
warp_footprint * warp_offset + thread_offset_in_warp;
// This is the offset of a thread within a threadblock tile (units of
// elements)
layout::PitchLinearCoord thread_offset_in_threadblock_tile_base{
thread_offset_in_threadblock_tile_vec.contiguous() * kElementsPerAccess,
thread_offset_in_threadblock_tile_vec.strided()};
return thread_offset_in_threadblock_tile_base;
}
};
template <typename ThreadMap_>
struct TransposePitchLinearThreadMapSimt {
/// Underlying ThreadMap
using ThreadMap = ThreadMap_;
/// Tensor coordinate
using TensorCoord = typename ThreadMap::TensorCoord;
/// Tile shape
using Shape = typename ThreadMap::Shape;
/// Number of threads total
static int const kThreads = ThreadMap::kThreads;
/// Extract vector length from Layout
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
static_assert(kElementsPerAccess == 1 , "Simt transpose requires elements per access to be 1");
///< Iterations along each dimension (concept: PitchLinearShape)
using Iterations =
layout::PitchLinearShape<ThreadMap::Iterations::kStrided,
ThreadMap::Iterations::kContiguous>;
static_assert(Iterations::kCount, "Number of iterations must be non-zero");
static_assert(Iterations::kStrided == 1,
"Strided iteration has to be one to reuse the same shared store function with those that don't need transpose");
/// Shape of access by each thread
using ThreadAccessShape = typename ThreadMap::ThreadAccessShape;
///< Delta betweeen accesses (units of elements, concept: PitchLinearShape)
using Delta =
layout::PitchLinearShape<ThreadMap::Delta::kStrided,
ThreadMap::Delta::kContiguous>;
/// Maps thread ID to a coordinate offset within the tensor's logical
/// coordinate space Note this is slightly different from the one of
/// PitchLinearWarpRakedThreadMap.
CUTLASS_HOST_DEVICE
static TensorCoord initial_offset(int thread_id) {
TensorCoord coord = ThreadMap::initial_offset(thread_id);
return TensorCoord(
coord.strided(),
coord.contiguous()
);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Policy defining a warp-striped arrangement. This partitions a tile into vectorized memory
/// accesses performed by each warp then distributes warps across them. Warps are striped in the
/// strided dimension and raked across the contiguous dimension.
template <
typename Shape_, /// Overall shape to partition in units of elements
int Threads, /// Number of partiticipation threads
typename WarpThreadArrangement_, /// Describes the shape of one memory access per warp
int ElementsPerAccess = 1 /// Number of elements accessed by each thread per memory operation (i.e. vector size)
>
struct PitchLinearWarpStripedThreadMap {
/// Tensor coordinate
using TensorCoord = layout::PitchLinearCoord;
/// Tile shape
using Shape = Shape_;
/// Number of threads total
static int const kThreads = Threads;
/// Extract vector length from Layout
static int const kElementsPerAccess = ElementsPerAccess;
/// Shape of access by each thread
using ThreadAccessShape = layout::PitchLinearShape<kElementsPerAccess, 1>;
/// Internal details made public to facilitate introspection
struct Detail {
/// Fixed arrangement of threads within a warp (units of threads).
using WarpThreadArrangement = WarpThreadArrangement_;
/// Number of threads per warp
static int const kWarpSize = WarpThreadArrangement::kCount;
/// Number of participating warps
static int const kWarpCount = kThreads / kWarpSize;
static_assert(
!(Shape::kContiguous % kElementsPerAccess),
"Shape must be divisible by vector length.");
/// Compute the 'shape' of the overall tile in units of vectors
using ShapeInAccesses = layout::PitchLinearShape<
Shape::kContiguous / kElementsPerAccess,
Shape::kStrided
>;
// compute number of warp-level accesses total
using WarpAccessIterations = layout::PitchLinearShape<
ShapeInAccesses::kContiguous / WarpThreadArrangement::kContiguous,
ShapeInAccesses::kStrided / WarpThreadArrangement::kStrided
>;
// Divide it into the number of warps, first partitioning the strided dimension then the
// contiguous.
static int const kWarpsStrided =
(WarpAccessIterations::kStrided >= kWarpCount
? kWarpCount : (kWarpCount / WarpAccessIterations::kStrided));
static int const kWarpsContiguous =
(kWarpCount > WarpAccessIterations::kStrided ?
WarpAccessIterations::kContiguous / kWarpsStrided : 1);
/// Arrangement of warps within a threadblock-scoped tile
using WarpArrangement = layout::PitchLinearShape<
kWarpsContiguous, kWarpsStrided
>;
};
///< Iterations along each dimension (concept: PitchLinearShape)
using Iterations = layout::PitchLinearShape<
Detail::WarpAccessIterations::kContiguous / Detail::kWarpsContiguous,
Detail::WarpAccessIterations::kStrided / Detail::kWarpsStrided
>;
static_assert(Iterations::kCount,
"Number of iterations must be non-zero");
///< Delta betweeen accesses (units of elements, concept: PitchLinearShape)
using Delta = layout::PitchLinearShape<
Detail::WarpThreadArrangement::kContiguous * kElementsPerAccess,
Detail::WarpThreadArrangement::kStrided * Detail::WarpArrangement::kStrided
>;
/// Maps thread ID to a coordinate offset within the tensor's logical coordinate space
CUTLASS_HOST_DEVICE
static TensorCoord initial_offset(int thread_id) {
int warp_id = (thread_id / Detail::kWarpSize);
int lane_id = (thread_id % Detail::kWarpSize);
//
// compute warp-level offset
//
// This is the shape of the entire area covered by a warp's memory access (in units of vectors)
layout::PitchLinearCoord warp_footprint{
Detail::WarpThreadArrangement::kContiguous * Iterations::kContiguous,
Detail::WarpThreadArrangement::kStrided
};
// This is the offset of a specific warp (in units of vectors)
layout::PitchLinearCoord warp_offset{
(warp_id % Detail::kWarpsContiguous),
(warp_id / Detail::kWarpsContiguous)
};
// This is the offset of a specific thread within a warp (units of vectors)
layout::PitchLinearCoord thread_offset_in_warp{
lane_id % Detail::WarpThreadArrangement::kContiguous,
lane_id / Detail::WarpThreadArrangement::kContiguous
};
// This is the offset of a thread within a threadblock tile (units of vectors)
layout::PitchLinearCoord thread_offset_in_threadblock_tile_vec =
warp_footprint * warp_offset + thread_offset_in_warp;
// This is the offset of a thread within a threadblock tile (units of elements)
layout::PitchLinearCoord thread_offset_in_threadblock_tile_base{
thread_offset_in_threadblock_tile_vec.contiguous() * kElementsPerAccess,
thread_offset_in_threadblock_tile_vec.strided()
};
return thread_offset_in_threadblock_tile_base;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Strip-mines a pitch-linear tile among a given number of threads, first along the contiguous
/// dimension then along the strided dimension, while each thread access a 2D thread-tile.
///
/// The tile must be divisible by the thread count such that all threads may execute the same
/// number of iterations with the same delta to exhaustively cover the tile.
///
/// This class satisfies the "RegularThreadMapping" concept.
template <
typename Shape_,
int Threads,
typename ThreadTileShape
>
struct PitchLinear2DThreadTileStripminedThreadMap;
template <
typename Shape_,
int Threads
>
struct PitchLinear2DThreadTileStripminedThreadMap <Shape_, Threads, cutlass::layout::PitchLinearShape<4, 4>>{
/// Tensor coordinate
using TensorCoord = layout::PitchLinearCoord;
/// Tile shape
using Shape = Shape_;
/// Access Shape of each thread
using ThreadAccessShape = cutlass::layout::PitchLinearShape<4, 4>;
//using ThreadAccessShape = ThreadTileShape;
/// Number of threads total
static int const kThreads = Threads;
/// Extract length of each access from Layout
static int const kElementsPerAccess = ThreadAccessShape::kContiguous;
static_assert(!(kElementsPerAccess % 4) , "kElementsPerAccess, needs to be multiple of 4 (32bits)");
/// Internal implementation details
struct Detail {
static_assert(!(ThreadAccessShape::kContiguous % 4), "ThreadAccessShape, needs to be multiple of 4");
static_assert(!(Shape::kContiguous % ThreadAccessShape::kContiguous), "");
static_assert(!((Shape::kContiguous * Shape::kStrided) % (kThreads * ThreadAccessShape::kCount)),
"Shape must be divisible thread count * accesses per thread.");
/// Shape of the tile in units of vectors
using ShapeVec = layout::PitchLinearShape<
Shape::kContiguous / ThreadAccessShape::kContiguous,
Shape::kStrided / ThreadAccessShape::kStrided
>;
static_assert(
(Threads < ShapeVec::kContiguous && !(ShapeVec::kContiguous % kThreads)) ||
(!(kThreads % ShapeVec::kContiguous) && !(ShapeVec::kStrided % (kThreads / ShapeVec::kContiguous))),
"Shape must be divisible by number of iterations of each thread."
);
};
/// Number of iterations by each thread
using Iterations = typename platform::conditional<
Threads >= Detail::ShapeVec::kContiguous,
layout::PitchLinearShape<
1,
// Redo the comparison here to work around divide by zero compiler
// error. The compiler evaluates both path of platform::conditional.
(Threads >= Detail::ShapeVec::kContiguous
? Detail::ShapeVec::kStrided /
(kThreads / Detail::ShapeVec::kContiguous)
: 0)>,
layout::PitchLinearShape<Detail::ShapeVec::kContiguous / kThreads,
Detail::ShapeVec::kStrided>>::type;
/// Interval between accesses along each dimension of the tensor's logical coordinate space
/// (in units of Elements)
using Delta = typename platform::conditional<
Threads >= Detail::ShapeVec::kContiguous,
layout::PitchLinearShape<
Shape::kContiguous,
kThreads * ThreadAccessShape::kStrided / Detail::ShapeVec::kContiguous
>,
layout::PitchLinearShape<
kThreads * ThreadAccessShape::kContiguous,
1
>
>::type;
/// Maps thread ID to a coordinate offset within the tensor's logical coordinate space
/// (in units of Elements)
CUTLASS_HOST_DEVICE
static TensorCoord initial_offset(int thread_id) {
return TensorCoord(
(thread_id % Detail::ShapeVec::kContiguous) * ThreadAccessShape::kContiguous,
(thread_id / Detail::ShapeVec::kContiguous) * ThreadAccessShape::kStrided);
}
};
/// Thread Mapping a 2D threadtiled mapping as a transposed Pitchlinear2DThreadTile mapping
template <typename ThreadMap_>
struct TransposePitchLinearThreadMap2DThreadTile {
/// Underlying ThreadMap
using ThreadMap = ThreadMap_;
/// Tensor coordinate
using TensorCoord = typename ThreadMap::TensorCoord;
/// Tile shape
using Shape = typename ThreadMap::Shape;
/// Number of threads total
static int const kThreads = ThreadMap::kThreads;
/// Extract vector length from Layout
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
static_assert(kElementsPerAccess > 1 , "Simt transpose requires elements per access to be 1");
///< Iterations along each dimension (concept: PitchLinearShape)
using Iterations =
layout::PitchLinearShape<ThreadMap::Iterations::kStrided,
ThreadMap::Iterations::kContiguous>;
static_assert(Iterations::kCount, "Number of iterations must be non-zero");
/// Shape of access by each thread
using ThreadAccessShape = typename ThreadMap::ThreadAccessShape;
///< Delta betweeen accesses (units of elements, concept: PitchLinearShape)
using Delta =
layout::PitchLinearShape<ThreadMap::Delta::kStrided,
ThreadMap::Delta::kContiguous>;
/// Maps thread ID to a coordinate offset within the tensor's logical
/// coordinate space Note this is slightly different from the one of
/// PitchLinearWarpRakedThreadMap.
CUTLASS_HOST_DEVICE
static TensorCoord initial_offset(int thread_id) {
TensorCoord coord = ThreadMap::initial_offset(thread_id);
return TensorCoord(
coord.strided(),
coord.contiguous()
);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace transform
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/transform/pitch_linear_thread_map.h/0 | {
"file_path": "include/cutlass/transform/pitch_linear_thread_map.h",
"repo_id": "include",
"token_count": 10979
} | 32 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing computing the addresses of storing of small
scale and bias vectors in the shared memory.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// RegularScaleBiasVectorAccessIterator
///
template <typename Shape, typename Element, typename Layout>
class RegularScaleBiasVectorAccessIterator;
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for congruous arrangements for TensorOps
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_>
class RegularScaleBiasVectorAccessIterator<Shape_, Element_, layout::PitchLinear> {
public:
using Shape = Shape_;
using Element = Element_;
using Layout = layout::PitchLinear;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
/// Element type per access
static int const kElementsPerAccess = 128 / sizeof_bits<Element>::value;
static int const kThreads = Shape::kContiguous / kElementsPerAccess;
using AccessType = Array<Element, kElementsPerAccess>;
private:
//
// Data members
//
/// Internal pointer
AccessType *pointer_;
/// Internal byte offset
Index byte_offset_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularScaleBiasVectorAccessIterator(
TensorRef scale_bias_ref, ///< Pointer to the start of the scale and bias
///< vector
int thread_id ///< ID of each participating thread
)
: byte_offset_(0) {
// Per-thread offset in logical coordinates of tensor
int thread_offset = thread_id * kElementsPerAccess;
// initialize pointer
pointer_ =
reinterpret_cast<AccessType *>(scale_bias_ref.data() + thread_offset);
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_offset_ += pointer_offset * sizeof(Element);
}
/// Returns a pointer
CUTLASS_DEVICE
AccessType *get() const {
char *access_byte_ptr =
reinterpret_cast<char *>(pointer_);
return reinterpret_cast<AccessType *>(access_byte_ptr + byte_offset_);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularScaleBiasVectorAccessIterator &operator++() { return *this; }
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularScaleBiasVectorAccessIterator operator++(int) {
RegularScaleBiasVectorAccessIterator prev(*this);
this->operator++();
return prev;
}
/// Adds a tile offset in the unit of tile.
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
// Multiply by 2 because we store scale and bias belong to the same stage
// next to each other.
add_pointer_offset(coord.contiguous() * Shape::kContiguous * 2);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for row major layouts
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_>
class RegularScaleBiasVectorAccessIterator<
Shape_, Element_,
layout::RowMajor> {
public:
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajor;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
/// Underlying iterator type
using UnderlyingIterator = RegularScaleBiasVectorAccessIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::PitchLinear>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularScaleBiasVectorAccessIterator(
TensorRef scale_bias_ref, ///< Pointer to the start of the scale and bias
///< vector
int thread_id ///< ID of each participating thread
)
: iterator_({scale_bias_ref.data(), scale_bias_ref.stride()}, thread_id) {
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.column(), coord.row()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularScaleBiasVectorAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularScaleBiasVectorAccessIterator operator++(int) {
RegularScaleBiasVectorAccessIterator prev(*this);
++iterator_;
return prev;
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/transform/threadblock/regular_scale_bias_vector_access_iterator.h/0 | {
"file_path": "include/cutlass/transform/threadblock/regular_scale_bias_vector_access_iterator.h",
"repo_id": "include",
"token_count": 2467
} | 33 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Utilities for initializing workspaces
*/
#pragma once
#if !defined(__CUDACC_RTC__)
#include "cuda.h"
#include "cuda_runtime.h"
#include "cutlass/trace.h"
#endif
#include "cutlass.h"
#include "cutlass/cuda_host_adapter.hpp"
namespace cutlass {
/////////////////////////////////////////////////////////////////////////////////////////////////
static constexpr int MinWorkspaceAlignment = 16;
#if !defined(__CUDACC_RTC__)
static Status
zero_workspace(void* workspace, size_t workspace_size, cudaStream_t stream = nullptr) {
if (workspace_size > 0) {
if (workspace == nullptr) {
CUTLASS_TRACE_HOST(" error: device workspace must not be null");
return Status::kErrorWorkspaceNull;
}
CUTLASS_TRACE_HOST(" clearing workspace");
cudaError_t result = cudaMemsetAsync(workspace, 0, workspace_size, stream);
if (cudaSuccess != result) {
result = cudaGetLastError(); // to clear the error bit
CUTLASS_TRACE_HOST(" cudaMemsetAsync() returned error " << cudaGetErrorString(result));
return Status::kErrorInternal;
}
}
return Status::kSuccess;
}
#endif
#if !defined(__CUDACC_RTC__)
template <typename T>
Status
fill_workspace(void* workspace, T fill_value, size_t fill_count, cudaStream_t stream = nullptr, CudaHostAdapter *cuda_adapter = nullptr) {
static_assert(sizeof(T) == 4 || sizeof(T) == 2 || sizeof(T) == 1, "Unsupported fill type");
if (fill_count > 0) {
if (workspace == nullptr) {
CUTLASS_TRACE_HOST(" error: device workspace must not be null");
return Status::kErrorWorkspaceNull;
}
CUTLASS_TRACE_HOST(" filling workspace");
CUdeviceptr d_workspace = reinterpret_cast<CUdeviceptr>(workspace);
#if defined(CUTLASS_ENABLE_CUDA_HOST_ADAPTER) && CUTLASS_ENABLE_CUDA_HOST_ADAPTER
//
// Use the cuda host adapter
//
CUTLASS_ASSERT(cuda_adapter);
if (cuda_adapter) {
Status status = Status::kErrorInternal;
status = cuda_adapter->memsetDevice(workspace, fill_value, fill_count, stream);
if (status!=Status::kSuccess) {
return Status::kErrorInternal;
}
}
else {
return Status::kErrorInternal;
}
#else
CUresult result = CUDA_SUCCESS;
if (sizeof(T) == 4) {
result = cuMemsetD32Async(d_workspace, reinterpret_cast<uint32_t&>(fill_value), fill_count, stream);
}
else if (sizeof(T) == 2) {
result = cuMemsetD16Async(d_workspace, reinterpret_cast<uint16_t&>(fill_value), fill_count, stream);
}
else if (sizeof(T) == 1) {
result = cuMemsetD8Async(d_workspace, reinterpret_cast<uint8_t&>(fill_value), fill_count, stream);
}
if (CUDA_SUCCESS != result) {
const char** error_string_ptr = nullptr;
(void) cuGetErrorString(result, error_string_ptr);
if (error_string_ptr != nullptr) {
CUTLASS_TRACE_HOST(" cuMemsetD" << sizeof(T) * 8 << "Async() returned error " << *error_string_ptr);
}
else {
CUTLASS_TRACE_HOST(" cuMemsetD" << sizeof(T) * 8 << "Async() returned unrecognized error");
}
return Status::kErrorInternal;
}
#endif
}
return Status::kSuccess;
}
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
| include/cutlass/workspace.h/0 | {
"file_path": "include/cutlass/workspace.h",
"repo_id": "include",
"token_count": 1721
} | 34 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Epilogue Visitor interface for compiling, and running visitor-based epilogue.
"""
import ctypes
from cuda import cuda
from cutlass_library import DataType
import numpy as np
from cutlass.backend.epilogue import EpilogueFunctorBase
import cutlass.backend.evt.backend
from cutlass.backend.frontend import TensorFrontend
from cutlass.utils.datatypes import is_numpy_tensor
from cutlass.backend.evt.passes.util import cc_map
class EpilogueFunctorVisitor(EpilogueFunctorBase):
"""
Apply an epilogue functor described by the epilogue EVT
:param cc: compute capability
:param visitor_frontend: user-provide visitor frontend
"""
def __init__(self, cc: int, visitor, element_compute=DataType.f32) -> None:
# Type of Emitter based on CC
self.emit_cls = getattr(cutlass.backend.evt.backend, f"Sm{cc_map[cc]}Emitter")
# Visitor Types
self.visitor = visitor
self.graph = visitor.dag_ir
# Data types
self.element_epilogue = element_compute # element compute
self.element_output = self.graph.get_node_meta('D').underlying_impl.element
# Epilogue Thread Type
epilogue_thread_type = self.visitor.epilogue_thread_type
if cc == 90:
self.arg_c_type = self.visitor.arg_c_type
self.arg_d_type = self.visitor.arg_d_type
output_names = self.visitor.return_names
reduction_names = self.visitor.reduction_names
# Epilogue stages specialized for sm80 kernel
if cc == 80:
if hasattr(self.visitor, "epilogue_stages"):
self.epilogue_stages = self.visitor.epilogue_stages
assert self.epilogue_stages <= 2, "Only supports Stages <=2 in SM80 Epilogue"
# Epilogue Argument Type
class _Arguments(ctypes.Structure):
"""
Concepts:
class _EpilogueArguments(ctypes.Structure):
_fields_ = [
("epilogue", _Arguments), <- this class
("ptr_C", ctypes.c_void_p),
("stride_C", StrideBatched_),
("ptr_D", ctypes.c_void_p),
("stride_D", StrideBatched_)
]
"""
_fields_ = [
("output_op", epilogue_thread_type)
]
def __init__(self, kwargs: dict) -> None:
# The user-input kwargs is a dict of (name: tensors)
# We first convert all of them to device pointers
ptr_kwargs = {}
for key in kwargs.keys():
is_output = key in output_names and key not in reduction_names
ptr_kwargs[key] = self.get_tensor_ptr(key, kwargs, is_output)
# Initialize the thread arguments
self.output_op = epilogue_thread_type(ptr_kwargs)
def get_tensor_ptr(self, tensor_name, kwargs, is_output=False):
"""
Helper function for extracting device pointer
"""
# Skip the special tensors
if cc == 90:
if tensor_name in ["C", "D"]:
return 0
if tensor_name not in kwargs.keys():
raise ValueError(f"Tensor {tensor_name} is not provided.")
tensor = kwargs[tensor_name]
# For float scalar constant, directly return the value
if isinstance(tensor, float):
return tensor
# The tensor frontend returns a device buffer for np.ndarray
# and device ptr for other frontends
buffer_or_ptr = TensorFrontend.argument(tensor, is_output)
if is_numpy_tensor(tensor):
# Remember the host tensor for later synchronization
setattr(self, f"{tensor_name}_buffer", buffer_or_ptr)
setattr(self, f"{tensor_name}_host", tensor)
return int(buffer_or_ptr.ptr)
else:
return int(buffer_or_ptr)
def sync(self):
"""
Synchronize the results from device to host
"""
for name in output_names:
if hasattr(self, f"{name}_host"):
host_tensor = getattr(self, f"{name}_host")
tensor_ptr = getattr(self, f"{name}_buffer").ptr
(err,) = cuda.cuMemcpyDtoH(
host_tensor,
tensor_ptr,
host_tensor.size * host_tensor.itemsize,
)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("CUDA Error %s" % str(err))
self.epilogue_type = _Arguments
def emit(self, operation):
"""
Emit the C++ code
"""
emitter = self.emit_cls(operation, self.graph)
return emitter.emit()
def get_smem_size(self, tile_description):
"""
Get the shared memory size in bytes
"""
return self.visitor.get_smem_size(tile_description)
| python/cutlass/backend/evt/epilogue.py/0 | {
"file_path": "python/cutlass/backend/evt/epilogue.py",
"repo_id": "python",
"token_count": 3124
} | 35 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Merge non-tree sub-graphs of the DAG IR into a single DAG. The fused DAG will be implemented
by the topological visitor, while the rest of the graph will be implemented with the tree visitor.
"""
from copy import deepcopy
from cutlass.backend.evt.ir import DAGIR, TopoVisitorNode
from cutlass.backend.evt.passes.pass_get_impl import PassGetImpl
from cutlass.backend.evt.passes.pass_manager import EVTPassBase
from cutlass.backend.evt.passes.pass_shape_type_propagation import PassShapeTypePropagation
class PassDAG2Tree(EVTPassBase):
"""
Convert the DAG IR to Tree by fusing subgraphs
"""
dependencies = [
PassShapeTypePropagation,
PassGetImpl
]
def call(self):
# Step 1: find the nodes that have multiple parents
multi_parent_nodes = []
for node in self.dag_ir.nodes_topological_order():
if self.dag_ir.out_degree(node) > 1:
multi_parent_nodes.append(node)
# Step 2: find the lowest common ancestor (LCA) of all its parents
for node in multi_parent_nodes:
# A multi-parent node could be already fused by the previous node
if not self.dag_ir.has_node(node):
continue
# A node uncovered by the previous fusions can have out degree change
# Case 1: it has <= 1 edges to the previously fused subgraph, no degree change
# Case 2: it has more than one edges to the previously fused subgraph, degree drops
if self.dag_ir.out_degree(node) <= 1:
continue
# Otherwise, the node still
reachable_nodes = []
# Complexity: O(Dout*N)
for parent in self.dag_ir.get_users(node):
reachable_nodes.append(set(self.dag_ir.all_reachable_nodes(parent)))
# get the common reachable objects
common_items = set.intersection(*reachable_nodes)
# If common ancestor exists, find the lowest one
if len(common_items) > 0:
topo_order = self.dag_ir.nodes_topological_order()
lca = None
topo_idx = -1
for item in common_items:
if lca is None:
lca = item
topo_idx = topo_order.index(item)
else:
if topo_idx > topo_order.index(item):
lca = item
topo_idx = topo_order.index(item)
# The lca is the output node of the DAG node
# Get the nodes to be fused
node_to_fuse = set.union(*reachable_nodes).difference(common_items)
node_to_fuse.add(lca)
# Get all the input nodes
all_input_nodes = []
all_output_nodes = []
for node in node_to_fuse:
all_input_nodes.append(set(self.dag_ir.get_all_inputs(node)))
all_output_nodes.append(set(self.dag_ir.get_users(node)))
all_input_nodes = set.union(*all_input_nodes)
all_output_nodes = set.union(*all_output_nodes)
new_subgraph_nodes = set.union(node_to_fuse, all_input_nodes, all_output_nodes)
# Create the subgraph
subgraph_ = self.dag_ir._graph.subgraph(new_subgraph_nodes)
subgraph = DAGIR()
for node in subgraph_.nodes:
meta = deepcopy(self.dag_ir.get_node_meta(node))
if node not in node_to_fuse:
meta.disabled = True
subgraph.add_node(meta)
for edge in subgraph_.edges:
subgraph.add_edge(edge[0], edge[1], self.dag_ir.get_edge_weight(edge[0], edge[1]))
# Create the fused node
dag_node = TopoVisitorNode(
name=f"dag_{lca}", subgraph=subgraph,
output_node=self.dag_ir.get_node_meta(lca))
self.dag_ir.add_node(dag_node)
# Add input edges
for idx, node in enumerate(all_input_nodes):
self.dag_ir.add_edge(node, dag_node.name, weight=idx)
# Replace all uses with DAG node (only 1 output node)
self.dag_ir.replace_all_uses_with(lca, dag_node.name)
# Remove all fused nodes
node_to_fuse.remove(lca)
for node in node_to_fuse:
self.dag_ir.remove_node(node)
else:
raise NotImplementedError("No LCA found. Consider SplitTreeVisitor.")
def ensures(self) -> None:
# Ensure that after the pass, the resulting DAG becomes a tree
for node in self.dag_ir.nodes:
out_degree = self.dag_ir.out_degree(node)
if out_degree > 1:
raise RuntimeError(f"PassDAG2Tree failed. Node {node} still have outdegree = {out_degree}")
| python/cutlass/backend/evt/passes/pass_dag_2_tree.py/0 | {
"file_path": "python/cutlass/backend/evt/passes/pass_dag_2_tree.py",
"repo_id": "python",
"token_count": 2991
} | 36 |
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Utilities for emitting Trmm kernels
"""
import enum
import functools
import operator
import os.path
import shutil
try:
import builtins
if hasattr(builtins, "CUTLASS_IGNORE_PACKAGE") and CUTLASS_IGNORE_PACKAGE == True:
raise ImportError("Disabling attempt to import cutlass_library")
from cutlass_library.library import *
except ImportError:
from library import *
###################################################################################################
#
# Data structure modeling a TRMM operation
#
###################################################################################################
#
class TrmmOperation:
#
def __init__(self, trmm_kind, arch, tile_description, A, B, C, element_epilogue, \
epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity8):
self.operation_kind = OperationKind.Trmm
self.arch = arch
self.tile_description = tile_description
self.trmm_kind = trmm_kind
self.A = A
self.B = B
self.C = C
self.element_epilogue = element_epilogue
self.epilogue_functor = epilogue_functor
self.swizzling_functor = swizzling_functor
#
def is_complex(self):
complex_operators = [
MathOperation.multiply_add_complex,
MathOperation.multiply_add_complex_gaussian,
MathOperation.multiply_add_complex_fast_f32
]
return self.tile_description.math_instruction.math_operation in complex_operators
return False
#
def is_planar_complex(self):
# return self.trmm_kind in (TrmmKind.PlanarComplex, TrmmKind.PlanarComplexArray)
return False
#
def is_mixed_input(self):
return self.A.element != self.B.element
#
def accumulator_type(self):
accum = self.tile_description.math_instruction.element_accumulator
if self.is_complex():
return get_complex_from_real(accum)
return accum
#
def short_math_name(self):
if self.tile_description.math_instruction.math_operation == MathOperation.multiply_add_complex_gaussian:
return "g%s" % ShortDataTypeNames[self.accumulator_type()]
return ShortDataTypeNames[self.accumulator_type()]
#
def core_name(self):
''' The basic operation kind is prefixed with a letter indicating the accumulation type. '''
inst_shape = ''
inst_operation = ''
intermediate_type = ''
math_operations_map = {
MathOperation.xor_popc: 'xor',
MathOperation.and_popc: 'and'
}
if self.tile_description.math_instruction.opcode_class == OpcodeClass.TensorOp or \
self.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp:
math_op = self.tile_description.math_instruction.math_operation
math_op_string = math_operations_map[math_op] if math_op in math_operations_map.keys() else ''
inst_shape = "%d%d%d" % tuple(self.tile_description.math_instruction.instruction_shape)
inst_shape += math_op_string
if self.tile_description.math_instruction.element_a != self.A.element and \
self.tile_description.math_instruction.element_a != self.tile_description.math_instruction.element_accumulator:
intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a]
return "%s%s%s%s" % (self.short_math_name(), inst_shape, intermediate_type, TrmmKindNames[self.trmm_kind])
#
def extended_name(self):
''' Append data types if they differ from compute type. '''
if self.is_complex():
extended_name = "${core_name}"
else:
if self.C.element != self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${element_c}_${core_name}_${element_a}"
elif self.C.element == self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${core_name}_${element_a}"
else:
extended_name = "${core_name}"
extended_name = SubstituteTemplate(extended_name, {
'element_a': DataTypeNames[self.A.element],
'element_c': DataTypeNames[self.C.element],
'core_name': self.core_name()
})
return extended_name
#
def layout_name(self):
if self.is_complex() or self.is_planar_complex():
return "%s%s" % (
ShortComplexLayoutNames[(self.A.layout, self.A.complex_transform)],
ShortComplexLayoutNames[(self.B.layout, self.B.complex_transform)]
)
return "%s%s" % (ShortLayoutTypeNames[self.A.layout], ShortLayoutTypeNames[self.B.layout])
#
def side_mode_name(self):
return "%s" % (ShortSideModeNames[self.A.side_mode])
#
def fill_mode_name(self):
return "%s" % (ShortFillModeNames[self.A.fill_mode])
#
def diag_type_name(self):
return "%s" % (ShortDiagTypeNames[self.A.diag_type])
#
def procedural_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
threadblock = self.tile_description.procedural_name()
opcode_class_name = OpcodeClassNames[self.tile_description.math_instruction.opcode_class]
alignment = max([self.C.alignment])
return SubstituteTemplate(
"cutlass_${opcode_class}_${extended_name}_${threadblock}_${layout}_${side_mode}_${fill_mode}_${diag_type}_align${alignment}",
{
'opcode_class': opcode_class_name,
'extended_name': self.extended_name(),
'threadblock': threadblock,
'layout': self.layout_name(),
'side_mode': self.side_mode_name(),
'fill_mode': self.fill_mode_name(),
'diag_type': self.diag_type_name(),
'alignment': "%d" % self.C.alignment,
}
)
#
def configuration_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
return self.procedural_name()
###################################################################################################
#
# Emits single instances of a CUTLASS device-wide operator
#
###################################################################################################
#
class EmitTrmmUniversalInstance:
''' Responsible for emitting a CUTLASS template definition'''
def __init__(self):
self.trmm_template = """
// Trmm operator ${operation_name}
using Operation_${operation_name} =
typename cutlass::gemm::device::Trmm<
${element_a}, ${layout_a},
${side_mode}, ${fill_mode}, ${diag_type},
${element_b}, ${layout_b},
${element_c}, ${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue},
cutlass::epilogue::thread::ScaleType::OnlyAlphaScaling
>,
${swizzling_functor},
${stages},
${align_a},
${align_b},
${split_k_serial},
${math_operation}
>;
"""
self.trmm_complex_template = """
// Trmm operator ${operation_name}
using Operation_${operation_name} =
typename cutlass::gemm::device::Trmm<
${element_a}, ${layout_a},
${side_mode}, ${fill_mode}, ${diag_type},
${element_b}, ${layout_b},
${element_c}, ${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue},
cutlass::epilogue::thread::ScaleType::OnlyAlphaScaling
>,
${swizzling_functor},
${stages},
${align_a},
${align_b},
${split_k_serial},
${math_operation},
${transform_a}
>;
"""
def emit(self, operation):
threadblock_shape = operation.tile_description.threadblock_shape
warp_count = operation.tile_description.warp_count
warp_shape = [threadblock_shape[idx] // warp_count[idx] for idx in range(3)]
epilogue_vector_length = int(min(operation.C.alignment * DataTypeSize[operation.C.element], 128) / DataTypeSize[operation.C.element])
values = {
'operation_name': operation.procedural_name(),
'element_a': DataTypeTag[operation.A.element],
'layout_a': LayoutTag[operation.A.layout],
'side_mode' : SideModeTag[operation.A.side_mode],
'fill_mode': FillModeTag[operation.A.fill_mode],
'diag_type' : DiagTypeTag[operation.A.diag_type],
'element_b': DataTypeTag[operation.B.element],
'layout_b': LayoutTag[operation.B.layout],
'element_c': DataTypeTag[operation.C.element],
'layout_c': LayoutTag[operation.C.layout],
'element_accumulator': DataTypeTag[operation.accumulator_type()],
'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class],
'arch': "cutlass::arch::Sm%d" % operation.arch,
'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]),
'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]),
'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]),
'warp_shape_m': str(warp_shape[0]),
'warp_shape_n': str(warp_shape[1]),
'warp_shape_k': str(warp_shape[2]),
'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]),
'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]),
'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]),
'epilogue_vector_length': str(epilogue_vector_length),
'element_epilogue': str(DataTypeTag[operation.element_epilogue]),
'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor],
'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor],
'stages': str(operation.tile_description.stages),
'align_a': str(1), # TRMM A's alignment is always 1 for no padding to work until we make zfill work with variable bytes
'align_b': str(operation.B.alignment),
'split_k_serial': 'false',
'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation],
'transform_a': ComplexTransformTag[operation.A.complex_transform]
}
trmm_template = self.trmm_complex_template if operation.is_complex() else self.trmm_template
return SubstituteTemplate(trmm_template, values)
###################################################################################################
###################################################################################################
#
# Emitters functions for all targets
#
###################################################################################################
class EmitTrmmConfigurationLibrary:
def __init__(self, operation_path, configuration_name):
self.configuration_name = configuration_name
self.configuration_path = os.path.join(operation_path, "%s.cu" % configuration_name).replace('\\', '/')
self.instance_emitter = {
TrmmKind.Universal: EmitTrmmUniversalInstance,
}
self.trmm_kind_wrappers = {
TrmmKind.Universal: 'TrmmOperation',
}
self.instance_template = {
TrmmKind.Universal: """
${compile_guard_start}
manifest.append(new ${trmm_kind}<
Operation_${operation_name}
>("${operation_name}"));
${compile_guard_end}
"""
}
self.header_template = """
/*
Generated by trmm_operation.py - Do not edit.
*/
///////////////////////////////////////////////////////////////////////////////////////////////////
#include "cutlass/cutlass.h"
#include "cutlass/library/library.h"
#include "cutlass/library/manifest.h"
#include "library_internal.h"
#include "trmm_operation.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
self.initialize_function_template = """
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
void initialize_${configuration_name}(Manifest &manifest) {
"""
self.epilogue_template = """
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
def __enter__(self):
self.configuration_file = open(self.configuration_path, "w")
self.configuration_file.write(self.header_template)
self.instance_definitions = []
self.instance_wrappers = []
self.operations = []
return self
def emit(self, operation):
emitter = self.instance_emitter[operation.trmm_kind]()
self.operations.append(operation)
self.instance_definitions.append(emitter.emit(operation))
self.instance_wrappers.append(SubstituteTemplate(self.instance_template[operation.trmm_kind], {
'configuration_name': self.configuration_name,
'operation_name': operation.procedural_name(),
'trmm_kind': self.trmm_kind_wrappers[operation.trmm_kind],
'compile_guard_start': SubstituteTemplate(self.wmma_guard_start, {'sm_number': str(operation.arch)}) \
if operation.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp else "",
'compile_guard_end': "#endif" \
if operation.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp else ""
}))
def __exit__(self, exception_type, exception_value, traceback):
# Write instance definitions in top-level namespace
for instance_definition in self.instance_definitions:
self.configuration_file.write(instance_definition)
# Add wrapper objects within initialize() function
self.configuration_file.write(SubstituteTemplate(self.initialize_function_template, {
'configuration_name': self.configuration_name
}))
for instance_wrapper in self.instance_wrappers:
self.configuration_file.write(instance_wrapper)
self.configuration_file.write(self.epilogue_template)
self.configuration_file.close()
###################################################################################################
| python/cutlass_library/trmm_operation.py/0 | {
"file_path": "python/cutlass_library/trmm_operation.py",
"repo_id": "python",
"token_count": 5627
} | 37 |
/*
This CSS file should be overridden by the theme authors. It's
meant for debugging and developing the skeleton that this theme provides.
*/
body {
font-family: -apple-system, "Segoe UI", Roboto, Helvetica, Arial, sans-serif,
"Apple Color Emoji", "Segoe UI Emoji";
background: lavender;
}
.sb-announcement {
background: rgb(131, 131, 131);
}
.sb-announcement__inner {
background: black;
color: white;
}
.sb-header {
background: lightskyblue;
}
.sb-header__inner {
background: royalblue;
color: white;
}
.sb-header-secondary {
background: lightcyan;
}
.sb-header-secondary__inner {
background: cornflowerblue;
color: white;
}
.sb-sidebar-primary {
background: lightgreen;
}
.sb-main {
background: blanchedalmond;
}
.sb-main__inner {
background: antiquewhite;
}
.sb-header-article {
background: lightsteelblue;
}
.sb-article-container {
background: snow;
}
.sb-article-main {
background: white;
}
.sb-footer-article {
background: lightpink;
}
.sb-sidebar-secondary {
background: lightgoldenrodyellow;
}
.sb-footer-content {
background: plum;
}
.sb-footer-content__inner {
background: palevioletred;
}
.sb-footer {
background: pink;
}
.sb-footer__inner {
background: salmon;
}
.sb-article {
background: white;
}
| python/docs/_static/debug.css/0 | {
"file_path": "python/docs/_static/debug.css",
"repo_id": "python",
"token_count": 454
} | 38 |
@ECHO OFF
pushd %~dp0
REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
set SPHINXBUILD=sphinx-build
)
set SOURCEDIR=.
set BUILDDIR=_build
%SPHINXBUILD% >NUL 2>NUL
if errorlevel 9009 (
echo.
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
echo.installed, then set the SPHINXBUILD environment variable to point
echo.to the full path of the 'sphinx-build' executable. Alternatively you
echo.may add the Sphinx directory to PATH.
echo.
echo.If you don't have Sphinx installed, grab it from
echo.https://www.sphinx-doc.org/
exit /b 1
)
if "%1" == "" goto help
%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
goto end
:help
%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
:end
popd
| python/docs_src/make.bat/0 | {
"file_path": "python/docs_src/make.bat",
"repo_id": "python",
"token_count": 317
} | 39 |
# Installation
## Installing a stable release
Stable releases of the CUTLASS Python interface are available via the `nvidia-cutlass` PyPI package. Any other packages with the name `cutlass` are not affiliated with NVIDIA CUTLASS.
```bash
pip install nvidia-cutlass
```
## Installing from source
Installing from source requires the latest CUDA Toolkit that matches the major.minor of CUDA Python installed.
Prior to installing the CUTLASS Python interface, one may optionally set the following environment variables:
* `CUTLASS_PATH`: the path to the cloned CUTLASS repository
* `CUDA_INSTALL_PATH`: the path to the installation of CUDA
If these environment variables are not set, the installation process will infer them to be the following:
* `CUTLASS_PATH`: either one directory level above the current directory (i.e., `$(pwd)/..`) if installed locally or in the `source` directory of the location in which `cutlass_library` was installed
* `CUDA_INSTALL_PATH`: the directory holding `/bin/nvcc` for the first version of `nvcc` on `$PATH` (i.e., `which nvcc | awk -F'/bin/nvcc' '{print $1}'`)
**NOTE:** The version of `cuda-python` installed must match the CUDA version in `CUDA_INSTALL_PATH`.
### Installing a developer-mode package
The CUTLASS Python interface can currently be installed by navigating to the root of the CUTLASS directory and performing
```bash
pip install .
```
If you would like to be able to make changes to CULASS Python interface and have them reflected when using the interface, perform:
```bash
pip install -e .
```
## Docker
We recommend using the CUTLASS Python interface via an [NGC PyTorch Docker container](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/pytorch):
```bash
docker run --gpus all -it --rm nvcr.io/nvidia/pytorch:23.08-py3
```
| python/docs_src/source/install.md/0 | {
"file_path": "python/docs_src/source/install.md",
"repo_id": "python",
"token_count": 528
} | 40 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Tests emitting a CUTLASS kernel to a PyTorch CUDA extension
"""
import random
import tempfile
import unittest
from cutlass_library import ConvMode
import cutlass
if cutlass.utils.datatypes.is_torch_available():
import torch
def _initialize(dtype, M: int, N: int, K: int):
"""
Utility function to initialize A, B, C, and D matrices corresponding to dimensions M, N, and K
:param dtype: data type of tensors
:param M: M dimension of GEMM problem
:type M: int
:param N: N dimension of GEMM problem
:type N: int
:param K: N dimension of GEMM problem
:type K: int
:return: initialized tensors A, B, C, and D
:rtype: list
"""
sizes = [(M, K), (K, N), (M, N), (M, N)]
return [torch.randint(-3, 3, size, device='cuda').to(dtype) for size in sizes]
def _generate_problems(dtype, num):
"""
Utility function to generate `num` GEMMs of random sizes
:param dtype: data type of tensors
:param num: number of GEMMs to generate
:type num: int
:return: lists of A, B, C, and D tensors
:rtype: list
"""
valid_sizes = [128, 256, 512, 1024]
As, Bs, Cs, Ds = [], [], [], []
for _ in range(num):
M, N, K = [random.choice(valid_sizes) for _ in range(3)]
A, B, C, D = _initialize(dtype, M, N, K)
As.append(A)
Bs.append(B)
Cs.append(C)
Ds.append(D)
return As, Bs, Cs, Ds
def _generate_conv2d_problem(conv_kind, dtype, ps):
"""
Utility function to generate conv2d inputs
:param conv_kind: kind of convolution
:type conv_kind: str
:param dtype: data type of tensors
:param problem_size: the conv2d problem size
:type problem_size: cutlass.shape.Conv2DProblemSize
:return: initialized tensors A, B, C, and D
:rtype: list
"""
if conv_kind == "fprop":
tensor_A_size = (ps.N, ps.C, ps.H, ps.W)
tensor_B_size = (ps.K, ps.C, ps.R, ps.S)
tensor_C_size = (ps.N, ps.K, ps.P, ps.Q)
elif conv_kind == "dgrad":
tensor_A_size = (ps.N, ps.K, ps.P, ps.Q)
tensor_B_size = (ps.K, ps.C, ps.R, ps.S)
tensor_C_size = (ps.N, ps.C, ps.H, ps.W)
else:
tensor_A_size = (ps.N, ps.K, ps.P, ps.Q)
tensor_B_size = (ps.N, ps.C, ps.H, ps.W)
tensor_C_size = (ps.K, ps.C, ps.R, ps.S)
sizes = [tensor_A_size, tensor_B_size, tensor_C_size]
return [torch.ceil(torch.empty(size, dtype=dtype, device='cuda').uniform_(-4.5, 3.5)).to(memory_format=torch.channels_last) for size in sizes]
@unittest.skipIf(not cutlass.utils.datatypes.is_torch_available(), 'PyTorch must be available to run PyTorch extension tests')
class PyTorchExtensionTest(unittest.TestCase):
def test_gemm(self):
random.seed(2023)
dtype = torch.float16
plan = cutlass.op.Gemm(element=dtype, layout=cutlass.LayoutType.RowMajor)
op = plan.construct()
with tempfile.TemporaryDirectory() as tmpdir:
mod = cutlass.emit.pytorch(op, name='gemm_mod', cc=plan.cc, sourcedir=tmpdir, jit=True)
A, B, C, _ = _initialize(dtype, 1024, 256, 512)
D_ref = A @ B
D = mod.run(A, B)
assert torch.allclose(D, D_ref)
D = mod.run(A, B, C)
assert torch.allclose(D, D_ref)
D = mod.run(A, B, C, 1.0)
assert torch.allclose(D, D_ref)
D = mod.run(A, B, C, 1.0, 0.0)
assert torch.allclose(D, D_ref)
alpha = 2.0
beta = -1.0
D_ref = (A @ B) * alpha + (beta * C)
D = mod.run(A, B, C, alpha, beta)
assert torch.allclose(D, D_ref)
def test_grouped_gemm(self):
random.seed(2023)
dtype = torch.float16
plan = cutlass.op.GroupedGemm(element=dtype, layout=cutlass.LayoutType.RowMajor)
op = plan.construct()
with tempfile.TemporaryDirectory() as tmpdir:
mod = cutlass.emit.pytorch(op, name='grouped_gemm_mod', cc=plan.cc, sourcedir=tmpdir, jit=True)
As, Bs, Cs, _ = _generate_problems(dtype, 50)
def check_all(X, Y):
for x, y in zip(X, Y):
assert torch.allclose(x, y)
Ds_ref = [a @ b for a, b in zip(As, Bs)]
Ds = mod.run(As, Bs)
check_all(Ds, Ds_ref)
Ds = mod.run(As, Bs, Cs)
check_all(Ds, Ds_ref)
Ds = mod.run(As, Bs, Cs, 1.0)
check_all(Ds, Ds_ref)
Ds = mod.run(As, Bs, Cs, 1.0, 0.0)
check_all(Ds, Ds_ref)
alpha = 2.0
beta = -1.0
Ds_ref = [(a @ b) * alpha + (beta * c) for a, b, c in zip(As, Bs, Cs)]
Ds = mod.run(As, Bs, Cs, alpha, beta)
check_all(Ds, Ds_ref)
def test_conv2d_fprop(self):
torch.manual_seed(2023)
dtype = torch.float16
plan = cutlass.op.Conv2d(kind="fprop", element=dtype, element_accumulator=torch.float32)
plan.activation = "relu"
op = plan.construct()
with tempfile.TemporaryDirectory() as tmpdir:
mod = cutlass.emit.pytorch(op, name="conv2d_mod", cc=plan.cc, sourcedir=tmpdir, jit=True)
problem_size = cutlass.shape.Conv2DProblemSize(
1, 4, 4, 16,
8, 3, 3, 16,
0, 0,
3, 3,
1, 1
)
A, B, C = _generate_conv2d_problem("fprop", dtype, problem_size)
stride = (problem_size.stride_h, problem_size.stride_w)
padding = (problem_size.pad_h, problem_size.pad_w)
alpha = 1.0
beta = 0.5
D_ref = alpha * torch.ops.aten.conv2d(
A, B, stride=stride, padding=padding
) + beta * C
D_ref = torch.nn.functional.relu(D_ref)
D = mod.run(A, B, C, stride, padding, alpha=alpha, beta=beta)
assert torch.allclose(D, D_ref)
# Test serial split-K
D_serial_split_k = mod.run(A, B, C, stride, padding, alpha=alpha, beta=beta, split_k_mode="serial", split_k_slices=3)
assert torch.allclose(D, D_serial_split_k)
# Test parallel split-K
D_parallel_split_k = mod.run(A, B, C, stride, padding, alpha=alpha, beta=beta, split_k_mode="parallel", split_k_slices=7)
assert torch.allclose(D, D_parallel_split_k)
def test_conv2d_dgrad(self):
torch.manual_seed(2023)
dtype = torch.float16
plan = cutlass.op.Conv2d(kind="dgrad", element=dtype, element_accumulator=torch.float32)
op = plan.construct()
with tempfile.TemporaryDirectory() as tmpdir:
mod = cutlass.emit.pytorch(op, name="conv2d_dgrad_mod", cc=plan.cc, sourcedir=tmpdir, jit=True)
problem_size = cutlass.shape.Conv2DProblemSize(
1, 4, 4, 16,
8, 3, 3, 16,
0, 0,
3, 3,
1, 1,
ConvMode.CrossCorrelation,
1, 1
)
A, B, C = _generate_conv2d_problem("dgrad", dtype, problem_size)
stride = (problem_size.stride_h, problem_size.stride_w)
padding = (problem_size.pad_h, problem_size.pad_w)
alpha = 1.0
beta = 0.5
input_size = (problem_size.N, problem_size.C, problem_size.H, problem_size.W)
D_ref = alpha * torch.nn.grad.conv2d_input(
input_size, B, A,
stride=stride, padding=padding
) + beta * C
D = mod.run(input_size, A, B, C, stride, padding, alpha=alpha, beta=beta, )
assert torch.allclose(D, D_ref)
def test_conv2d_wgrad(self):
torch.manual_seed(2023)
dtype = torch.float16
plan = cutlass.op.Conv2d(kind="wgrad", element=dtype, element_accumulator=torch.float32)
op = plan.construct()
with tempfile.TemporaryDirectory() as tmpdir:
mod = cutlass.emit.pytorch(op, name="conv2d_wgrad_mod", cc=plan.cc, sourcedir=tmpdir, jit=True)
problem_size = cutlass.shape.Conv2DProblemSize(
1, 4, 4, 16,
8, 3, 3, 16,
0, 0,
3, 3,
1, 1,
ConvMode.CrossCorrelation,
1, 1
)
A, B, C = _generate_conv2d_problem("wgrad", dtype, problem_size)
stride = (problem_size.stride_h, problem_size.stride_w)
padding = (problem_size.pad_h, problem_size.pad_w)
alpha = 1.0
beta = 0.5
weight_size = (problem_size.K, problem_size.C, problem_size.R, problem_size.S)
D_ref = alpha * torch.nn.grad.conv2d_weight(
B, weight_size, A,
stride=stride, padding=padding
) + beta * C
D = mod.run(weight_size, A, B, C, stride, padding, alpha=alpha, beta=beta)
assert torch.allclose(D, D_ref)
# Test serial split-K
D_serial_split_k = mod.run(weight_size, A, B, C, stride, padding, alpha=alpha, beta=beta, split_k_mode="serial", split_k_slices=3)
assert torch.allclose(D, D_serial_split_k)
# Test parallel split-K
D_parallel_split_k = mod.run(weight_size, A, B, C, stride, padding, alpha=alpha, beta=beta, split_k_mode="parallel", split_k_slices=7)
assert torch.allclose(D, D_parallel_split_k)
if __name__ == '__main__':
unittest.main()
| test/python/cutlass/emit/pytorch.py/0 | {
"file_path": "test/python/cutlass/emit/pytorch.py",
"repo_id": "test",
"token_count": 4953
} | 41 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for the CUTLASS Quaternion template class.
*/
#include "../common/cutlass_unit_test.h"
#include "cutlass/cutlass.h"
#include "cutlass/core_io.h"
#include "cutlass/quaternion.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/constants.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
static float const half_pi = cutlass::constants::half_pi<float>();
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Quaternion, add_f32) {
cutlass::Quaternion<float> q0(1, 1, 1, 1);
cutlass::Quaternion<float> q1(0, 0, 0, 2);
cutlass::Quaternion<float> q2 = q0 + q1;
EXPECT_TRUE(
q2.x() == 1 &&
q2.y() == 1 &&
q2.z() == 1 &&
q2.w() == 3
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Quaternion, rotation) {
cutlass::Matrix3x1<float> x(1.0f, 0.0f, 0.0f);
cutlass::Quaternion<float> q = cutlass::Quaternion<float>::rotation(0, 0, 1, half_pi) * 2.0f;
cutlass::Matrix3x1<float> v = q.rotate(x);
float epsilon = 0.001f;
EXPECT_TRUE(
std::abs(v.at(0)) < epsilon &&
std::abs(v.at(1)) > (1 - epsilon) &&
std::abs(v.at(2)) < epsilon
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Quaternion, rotation_inv) {
cutlass::Matrix3x1<float> x(1.0f, 0.0f, 0.0f);
cutlass::Quaternion<float> q = cutlass::Quaternion<float>::rotation(0, 0, 1, half_pi) * 2.0f;
cutlass::Matrix3x1<float> v = q.rotate(x);
float epsilon = 0.001f;
EXPECT_TRUE(
std::abs(v.at(0)) < epsilon &&
std::abs(-v.at(1)) > (1 - epsilon) &&
std::abs(v.at(2)) < epsilon
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Quaternion, spinor_rotation) {
cutlass::Matrix3x1<float> x(1.0f, 0.0f, 0.0f);
cutlass::Quaternion<float> q = cutlass::Quaternion<float>::rotation(0, 0, 1, half_pi);
cutlass::Matrix3x1<float> v = cutlass::spinor_rotation(q, x);
float epsilon = 0.001f;
EXPECT_TRUE(
std::abs(v.at(0)) < epsilon &&
std::abs(v.at(1)) > (1 - epsilon) &&
std::abs(v.at(2)) < epsilon
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Quaternion, spinor_rotation_inv) {
cutlass::Matrix3x1<float> x(1.0f, 0.0f, 0.0f);
cutlass::Quaternion<float> q = cutlass::Quaternion<float>::rotation(0, 0, 1, half_pi);
cutlass::Matrix3x1<float> v = cutlass::spinor_rotation_inv(q, x);
float epsilon = 0.001f;
EXPECT_TRUE(
std::abs(v.at(0)) < epsilon &&
std::abs(-v.at(1)) > (1 - epsilon) &&
std::abs(v.at(2)) < epsilon
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Quaternion, as_rotation_matrix3x3) {
cutlass::Matrix3x1<float> x(1.0f, 0.0f, 0.0f);
cutlass::Quaternion<float> q = cutlass::Quaternion<float>::rotation(0, 0, 1, half_pi);
cutlass::Matrix3x1<float> v = q.as_rotation_matrix_3x3().product(x);
float epsilon = 0.001f;
EXPECT_TRUE(
std::abs(v.at(0)) < epsilon &&
std::abs(v.at(1)) > (1 - epsilon) &&
std::abs(v.at(2)) < epsilon
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Quaternion, as_rotation_matrix4x4) {
cutlass::Matrix4x1<float> x(1.0f, 0.0f, 0.0f, 1.0f);
cutlass::Quaternion<float> q = cutlass::Quaternion<float>::rotation(0, 0, 1, half_pi);
cutlass::Matrix4x1<float> v = q.as_rotation_matrix_4x4().product(x);
float epsilon = 0.001f;
EXPECT_TRUE(
std::abs(v.at(0)) < epsilon &&
std::abs(v.at(1)) > (1 - epsilon) &&
std::abs(v.at(2)) < epsilon &&
std::abs(v.at(3)) > (1 - epsilon)
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/core/quaternion.cu/0 | {
"file_path": "test/unit/core/quaternion.cu",
"repo_id": "test",
"token_count": 1998
} | 42 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cutlass_unit_test.h"
#include <iostream>
#include <iomanip>
#include <utility>
#include <type_traits>
#include <vector>
#include <numeric>
#include <cute/container/bit_field.hpp>
#include <cute/algorithm/tuple_algorithms.hpp>
using namespace cute;
TEST(CuTe_core, Bitfield)
{
for_each(make_int_range<1,65>{}, [&](auto NumBits) {
constexpr auto num_bits = cute::remove_cvref_t<decltype(NumBits)>::value;
for_each(make_int_range<0, 129>{}, [&](auto BitStart) {
constexpr auto bit_start = cute::remove_cvref_t<decltype(BitStart)>::value;
using BF = bit_field<bit_start, cute::remove_cvref_t<decltype(NumBits)>::value>;
#if 0
printf("bit_field<%d,%d>:\n", bit_start, num_bits);
printf(" value_type_bits : %d\n", BF::value_type_bits);
printf(" storage_type_bits: %d\n", BF::storage_type_bits);
printf(" N : %d\n", BF::N);
printf(" idx : %d\n", BF::idx);
printf(" bit_lo : %d\n", BF::bit_lo);
printf(" bit_hi : %d\n", BF::bit_hi);
printf(" mask : 0x%lx\n", uint64_t(BF::mask));
printf(" mask_lo : 0x%lx\n", uint64_t(BF::mask_lo));
printf(" mask_hi : 0x%lx\n", uint64_t(BF::mask_hi));
#endif
// Test
uint64_t v = num_bits == 64 ? uint64_t(-1) : ((uint64_t(1) << NumBits) - 1);
BF bf{};
bf = v;
EXPECT_EQ(v, uint64_t(bf));
});
});
for_each(make_int_range<0,129>{}, [&](auto BitStart) {
using BF = bit_field<cute::remove_cvref_t<decltype(BitStart)>::value, 32, float>;
BF bf{};
bf = 3.14f;
EXPECT_EQ(3.14f, float(bf));
});
}
| test/unit/cute/core/bitfield.cpp/0 | {
"file_path": "test/unit/cute/core/bitfield.cpp",
"repo_id": "test",
"token_count": 1290
} | 43 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cutlass_unit_test.h"
#include <cutlass/trace.h>
#include <cute/pointer.hpp>
TEST(CuTe_core, Pointer)
{
using namespace cute;
CUTLASS_TRACE_HOST("-------------------------------");
CUTLASS_TRACE_HOST("CuTe pointer wrappers");
CUTLASS_TRACE_HOST("-------------------------------");
// Test T* overloads (T can be nonconst or const)
{
using T = float;
using expected_type = cute::gmem_ptr<T*>;
T* p = nullptr;
// explicit template argument
auto gmem_p0 = cute::make_gmem_ptr<T>(p);
static_assert(cute::is_same_v<decltype(gmem_p0), expected_type>);
// deduced template argument
auto gmem_p1 = cute::make_gmem_ptr(p);
static_assert(cute::is_same_v<decltype(gmem_p1), expected_type>);
}
{
using T = float const;
using expected_type = cute::gmem_ptr<T*>;
T* p = nullptr;
// explicit template argument
auto gmem_p0 = cute::make_gmem_ptr<T>(p);
static_assert(cute::is_same_v<decltype(gmem_p0), expected_type>);
// deduced template argument
auto gmem_p1 = cute::make_gmem_ptr(p);
static_assert(cute::is_same_v<decltype(gmem_p1), expected_type>);
}
// Test void* and void const* overloads
// (these require an explicit template argument)
{
using T = float;
using expected_type = cute::gmem_ptr<T*>;
void* p = nullptr;
auto gmem_p0 = cute::make_gmem_ptr<T>(p);
static_assert(cute::is_same_v<decltype(gmem_p0), expected_type>);
}
{
using T = float const;
using expected_type = cute::gmem_ptr<T*>;
void const* p = nullptr;
auto gmem_p0 = cute::make_gmem_ptr<T>(p);
static_assert(cute::is_same_v<decltype(gmem_p0), expected_type>);
}
// Test nullptr_t overload.
{
using T = float;
using expected_type = cute::gmem_ptr<T*>;
auto gmem_p0 = cute::make_gmem_ptr<T>(nullptr);
static_assert(cute::is_same_v<decltype(gmem_p0), expected_type>);
}
{
using T = float const;
using expected_type = cute::gmem_ptr<T*>;
auto gmem_p0 = cute::make_gmem_ptr<T>(nullptr);
static_assert(cute::is_same_v<decltype(gmem_p0), expected_type>);
}
}
| test/unit/cute/core/pointer.cpp/0 | {
"file_path": "test/unit/cute/core/pointer.cpp",
"repo_id": "test",
"token_count": 1348
} | 44 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for epilogues
*/
#pragma once
#include <fstream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/half.h"
#include "cutlass/complex.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/host/tensor_fill.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace kernel {
template <typename Epilogue>
__global__ void epilogue_with_reduction_threadblock(
typename Epilogue::ElementVector *ptr_Reduction,
typename Epilogue::OutputTileIterator::Params params_D,
typename Epilogue::OutputTileIterator::Element *ptr_D,
typename Epilogue::OutputTileIterator::Params params_C,
typename Epilogue::OutputTileIterator::Element *ptr_C,
typename Epilogue::TensorTileIterator::Params params_Tensor,
typename Epilogue::TensorTileIterator::Element *ptr_Tensor,
typename Epilogue::OutputOp::Params params_output_op,
cutlass::MatrixCoord problem_size,
cutlass::TensorRef<
typename Epilogue::WarpMmaOperator::ElementC,
typename Epilogue::WarpMmaOperator::LayoutC> accumulator_ref,
int epilogue_count = 1) {
__shared__ typename Epilogue::SharedStorage shared_storage;
int thread_idx = threadIdx.x;
int warp_idx = threadIdx.x / 32;
int lane_idx = threadIdx.x % 32;
//
// Construct the epilogue
//
// Tile iterator writing to output tile
typename Epilogue::OutputTileIterator iterator_D(
params_D,
ptr_D,
problem_size,
thread_idx
);
// Tile iterator writing to output tile
typename Epilogue::OutputTileIterator iterator_C(
params_C,
ptr_C,
problem_size,
thread_idx
);
// Tile iterator writing to output tile
typename Epilogue::TensorTileIterator iterator_T(
params_Tensor,
ptr_Tensor,
problem_size,
thread_idx
);
// Epilogue operator
Epilogue epilogue(
shared_storage,
thread_idx,
warp_idx,
lane_idx);
//
// Initialize the accumulators
//
int warp_mn = warp_idx % (Epilogue::WarpCount::kM * Epilogue::WarpCount::kN);
int warp_m = warp_mn % Epilogue::WarpCount::kM;
int warp_n = warp_mn / Epilogue::WarpCount::kM;
accumulator_ref.add_coord_offset({
warp_m * Epilogue::WarpMmaOperator::Shape::kM,
warp_n * Epilogue::WarpMmaOperator::Shape::kN});
typename Epilogue::WarpMmaOperator::IteratorC accumulator_iterator(accumulator_ref, lane_idx);
typename Epilogue::AccumulatorTile accumulators;
accumulators.clear();
accumulator_iterator.load(accumulators);
#if 0
// For debugging, enable this block of code to fill each accumulator element with its
// source thread ID.
CUTLASS_PRAGMA_UNROLL
for (size_t i = 0; i < accumulators.size(); ++i) {
typename Epilogue::WarpMmaOperator::ElementC x(threadIdx.x);
accumulators[i] = x;
}
__syncthreads();
#endif
//
// Perform the epilogue operation
//
typename Epilogue::OutputOp output_op(params_output_op);
// Place the epilogue in a loop
for (int iter = 0; iter < epilogue_count; ++iter) {
epilogue(output_op, ptr_Reduction, iterator_D, accumulators, iterator_C, iterator_T);
}
}
} // namespace kernel
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Epilogue_
>
class EpilogueWithReductionTestbed {
public:
using Epilogue = Epilogue_;
using ElementAccumulator = typename Epilogue::ElementAccumulator;
using ElementCompute = typename Epilogue::OutputOp::ElementCompute;
using ElementTensor = typename Epilogue::TensorTileIterator::Element;
using ElementOutput = typename Epilogue::ElementOutput;
using OutputOpParams = typename Epilogue::OutputOp::Params;
public:
//
// Data members
//
cutlass::MatrixCoord quantized_size;
cutlass::HostTensor<ElementAccumulator, cutlass::layout::RowMajor> accumulator_tensor;
cutlass::HostTensor<ElementOutput, cutlass::layout::RowMajor> source_tensor;
cutlass::HostTensor<ElementOutput, cutlass::layout::RowMajor> output_tensor;
cutlass::HostTensor<ElementTensor, cutlass::layout::RowMajor> additional_tensor;
cutlass::HostTensor<ElementAccumulator, cutlass::layout::RowMajor> reduction_tensor;
public:
//
// Methods
//
EpilogueWithReductionTestbed():
quantized_size(Epilogue::Shape::kM, Epilogue::Shape::kN),
accumulator_tensor({Epilogue::Shape::kM, Epilogue::Shape::kN}),
source_tensor({Epilogue::Shape::kM, Epilogue::Shape::kN}),
output_tensor({Epilogue::Shape::kM, Epilogue::Shape::kN}),
additional_tensor({Epilogue::Shape::kM, Epilogue::Shape::kN}),
reduction_tensor({1, Epilogue::Shape::kN}) {
//
// Initialize problem space
//
uint64_t seed = 2019;
cutlass::reference::host::TensorFillRandomUniform(
accumulator_tensor.host_view(),
seed,
20,
-20,
0);
cutlass::reference::host::TensorFillRandomUniform(
source_tensor.host_view(),
seed + 2018,
20,
-20,
0);
cutlass::reference::host::TensorFill(additional_tensor.host_view(), ElementTensor(1));
}
bool run_all() {
/*
double alpha_values[] = {1, 0, 2.25};
double beta_values[] = {0, 1, -1.25};
// Test runtime explodes if we tried to test every case exhaustively. This tests the full
// output tile and several smaller sizes to stress predication.
for (int m_idx = 0; m_idx < 3; ++m_idx) {
for (int n_idx = 0; n_idx < 3; ++n_idx) {
int m = quantized_size.row() - m_idx * 3;
int n = quantized_size.column() - n_idx * Epilogue::kElementsPerAccess;
for (double const &alpha : alpha_values) {
for (double const &beta : beta_values) {
bool passed = run({m, n}, {cutlass::from_real<ElementCompute>(alpha), cutlass::from_real<ElementCompute>(beta)});
if (!passed) {
return false;
}
}
}
}
}
return true;
*/
double alpha = 1;
double beta = 0;
return run(
{quantized_size.row(), quantized_size.column()},
{cutlass::from_real<ElementCompute>(alpha), cutlass::from_real<ElementCompute>(beta)});
}
/// Runs the test
bool run(
cutlass::MatrixCoord problem_size,
OutputOpParams output_params) {
//
// Initialize problem space
//
ElementOutput default_output = ElementOutput(-127);
ElementAccumulator default_reduction = ElementAccumulator();
cutlass::reference::host::TensorFill(output_tensor.host_view(), default_output);
cutlass::reference::host::TensorFill(reduction_tensor.host_view(), default_reduction);
accumulator_tensor.sync_device();
output_tensor.sync_device();
source_tensor.sync_device();
additional_tensor.sync_device();
reduction_tensor.sync_device();
//
// Initialize epilogue parameters
//
typename Epilogue::OutputTileIterator::Params params_D(output_tensor.device_ref().layout());
typename Epilogue::OutputTileIterator::Params params_C(source_tensor.device_ref().layout());
typename Epilogue::TensorTileIterator::Params params_T(additional_tensor.device_ref().layout());
//
// Launch kernel
//
dim3 grid(1, 1);
dim3 block(Epilogue::WarpCount::kCount * 32, 1);
test::kernel::epilogue_with_reduction_threadblock<Epilogue><<< grid, block >>>(
reduction_tensor.device_data(),
params_D,
output_tensor.device_data(),
params_C,
source_tensor.device_data(),
params_T,
additional_tensor.device_data(),
output_params,
problem_size,
accumulator_tensor.device_view());
cudaError_t result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
std::cerr << "Kernel error: " << cudaGetErrorString(result) << std::endl;
return false;
}
//
// Verify results
//
output_tensor.sync_host();
reduction_tensor.sync_host();
int errors = 0;
int const kMaxErrors = 5;
//
// The output has two parts:
// - GEMM tensor epilogue in canonical layout
// - partial reduction in canonical row-major layout
//
// Verify the GEMM tensor output
for (int r = 0; errors < kMaxErrors && r < quantized_size.row(); ++r) {
for (int c = 0; errors < kMaxErrors && c < quantized_size.column(); ++c) {
cutlass::MatrixCoord coord{r, c};
ElementOutput got = output_tensor.at(coord);
ElementOutput expected;
if (coord.row() < problem_size.row() && coord.column() < problem_size.column()) {
expected = ElementOutput(output_params.alpha * ElementCompute(accumulator_tensor.at(coord)) +
output_params.beta * ElementCompute(source_tensor.at(coord)));
}
else {
expected = default_output;
}
if (expected != got) {
using OutputIO = cutlass::ScalarIO<ElementOutput>;
EXPECT_TRUE(false)
<< "-------\n"
<< "Error - output element (" << coord << ") - expected: "
<< OutputIO(expected)
<< ", got: " << OutputIO(got) << std::endl;
++errors;
}
}
}
// Verify the partial reduction
for (int c = 0; c < quantized_size.column(); ++c) {
ElementAccumulator reduction_acc = ElementAccumulator();
for (int r = 0; r < quantized_size.row(); ++r) {
reduction_acc += accumulator_tensor.at({r, c});
}
ElementAccumulator expected = default_reduction;
ElementAccumulator got = reduction_tensor.at({0, c});
if (c < problem_size.column()) {
expected = reduction_acc;
}
else {
expected = default_reduction;
}
if (expected != got) {
using OutputIO = cutlass::ScalarIO<ElementAccumulator>;
EXPECT_TRUE(false)
<< "-------\n"
<< "Error - reduction element (" << c << ") - expected: "
<< OutputIO(expected)
<< ", got: " << OutputIO(got) << std::endl;
}
}
//
// Report results on error
//
if (errors) {
std::stringstream ss;
ss
<< "output_tensor_op_" << Epilogue::Shape::kM << "x" << Epilogue::Shape::kN << "_"
<< Epilogue::WarpTileIterator::WarpShape::kM << "x"
<< Epilogue::WarpTileIterator::WarpShape::kN
<< "_slice_" << Epilogue::WarpCount::kK << ".csv";
std::ofstream output_file(ss.str());
output_file << output_tensor.host_view();
}
return !errors;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/epilogue/threadblock/epilogue_with_reduction_testbed.h/0 | {
"file_path": "test/unit/epilogue/threadblock/epilogue_with_reduction_testbed.h",
"repo_id": "test",
"token_count": 4670
} | 45 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests that the stream-K scheduler covers the entire problem space.
*/
#include "cutlass/cluster_launch.hpp"
#include "cutlass/kernel_hardware_info.hpp"
#include "cutlass/gemm/kernel/sm90_tile_scheduler_stream_k.hpp"
#include "cutlass/util/device_memory.h"
#include "cutlass/util/reference/device/tensor_fill.h"
#include "../../common/cutlass_unit_test.h"
// Grids are launched with clusters enabled in these tests,
// so the CTK version must support cluster launching.
#if defined(CUTLASS_SM90_CLUSTER_LAUNCH_ENABLED)
using namespace cute;
using ProblemShape_MNKL = Shape<int, int, int, int>;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Kernel for getting each piece of work for a given block from the scheduler and logging
/// the K iterations visited by the block.
template <
class Scheduler,
class TileShape,
class ClusterShape
>
__global__
void
run_scheduler(int* visit_counters, typename Scheduler::Params params, TileShape tile_shape, ClusterShape cluster_shape, ProblemShape_MNKL problem_shape_mnkl) {
Scheduler scheduler{params};
auto work_tile_info = scheduler.get_current_work();
while (work_tile_info.is_valid()) {
// Increment counters to indicate coverage
auto tile_idx = Scheduler::output_tile_index(params, work_tile_info);
auto offset = tile_idx * params.divmod_tiles_per_output_tile_.divisor + work_tile_info.K_idx;
for (auto i = 0; i < work_tile_info.k_tile_count; ++i) {
// Use atomicAdd because the visit counters are shared by multiple thread blocks.
// While having more than one block increment the same counter indicates failure,
// we need to ensure that this behavior is captured (by having both increments reflected).
atomicAdd(visit_counters + offset + i, 1);
}
bool continue_current = scheduler.continue_current_work(work_tile_info);
if (!continue_current) {
scheduler.advance_to_next_work();
work_tile_info = scheduler.get_current_work();
}
}
}
/// Host-side wrapper for launching the kernel to test the scheduler.
template <
class TileShape,
class ClusterShape,
uint32_t NumMmaWarpGroups = 2
>
bool
test_scheduler(
ProblemShape_MNKL problem_shape_mnkl,
TileShape tile_shape,
ClusterShape cluster_shape,
int sm_count,
int splits=1,
bool expect_data_parallel=false) {
using Scheduler = cutlass::gemm::kernel::detail::PersistentTileSchedulerSm90StreamK<TileShape, ClusterShape>;
cutlass::KernelHardwareInfo hw_info{0, sm_count};
auto params = Scheduler::to_underlying_arguments(problem_shape_mnkl, tile_shape, cluster_shape, hw_info, {splits}, nullptr);
typename Scheduler::Arguments args{};
// Set up the grid for the problem
dim3 grid = Scheduler::get_grid_shape(problem_shape_mnkl, tile_shape, cluster_shape, hw_info, args);
auto print_info = [&]() {
std::cout << "Failed with problem size "
<< size<0>(problem_shape_mnkl) << "x"
<< size<1>(problem_shape_mnkl) << "x"
<< size<2>(problem_shape_mnkl) << "x"
<< size<3>(problem_shape_mnkl)
<< " and grid size " << grid.x << "x"
<< grid.y << "x" << grid.z
<< " splits=" << params.splits_
<< " k_iter=" << params.divmod_tiles_per_output_tile_.divisor
<< " big_units_=" << params.big_units_
<< " big_groups_=" << params.big_groups_
<< " sk_tiles=" << params.sk_tiles_
<< " sk_units=" << params.sk_units_
<< " k_tiles_per_sk_unit=" << params.k_tiles_per_sk_unit_
<< " units_per_problem=" << params.units_per_problem_
<< " groups=" << params.divmod_sk_groups_.divisor << std::endl;
};
// If we expect the schedule to be data-parallel only, ensure that no stream-K tiles are launched.
if (expect_data_parallel && params.sk_tiles_ != 0) {
print_info();
std::cout << "Expected stream-K to select a data-parallel decomposition." << std::endl;
return false;
}
// Allocate counters indicating the number of times each k iteration of each output tile has been visited
auto [blk_m, blk_n, blk_l] = Scheduler::get_tiled_cta_shape_mnl(problem_shape_mnkl, tile_shape, cluster_shape);
auto total_counters = blk_m * blk_n * blk_l * params.divmod_tiles_per_output_tile_.divisor;
cutlass::DeviceAllocation<int> visit_counters(total_counters);
// Initialize counters to zero
cudaError_t err = cudaMemset((void*)visit_counters.get(), 0, sizeof(int) * total_counters);
if (err != cudaSuccess) {
print_info();
std::cout << __FILE__ << ":" << __LINE__ << " cudaMemset failed with error: " << cudaGetErrorString(err) << std::endl;
return false;
}
// Set up cluster and cluster launch. This is needed even for this simple kernel because
// the SM90 scheduler needs to be able to query the CTA id within a cluster, which requires
// explicitly launching with clusters.
dim3 cluster{
static_cast<uint32_t>(cute::get<0>(ClusterShape{})),
static_cast<uint32_t>(cute::get<1>(ClusterShape{})),
static_cast<uint32_t>(cute::get<2>(ClusterShape{}))
};
cudaLaunchConfig_t launch_config;
launch_config.gridDim = grid;
launch_config.blockDim = {1, 1, 1};
launch_config.dynamicSmemBytes = 0;
launch_config.stream = NULL;
cudaLaunchAttribute launch_attribute[1];
launch_attribute[0].id = cudaLaunchAttributeClusterDimension;
launch_attribute[0].val.clusterDim.x = cluster.x;
launch_attribute[0].val.clusterDim.y = cluster.y;
launch_attribute[0].val.clusterDim.z = cluster.z;
launch_config.attrs = launch_attribute;
launch_config.numAttrs = 1;
void const* kernel = (void const*) run_scheduler<Scheduler, TileShape, ClusterShape>;
int* counters_ptr = visit_counters.get();
void* kernel_params[] = {
&counters_ptr,
¶ms,
&tile_shape,
&cluster_shape,
&problem_shape_mnkl
};
// Run the scheduler to completion and log visits to each k iteration
err = cudaLaunchKernelExC(&launch_config, kernel, kernel_params);
if (err != cudaSuccess) {
print_info();
std::cout << __FILE__ << ":" << __LINE__
<< " cudaLaunchKernelExC failed with error: "
<< cudaGetErrorString(err) << std::endl;
return false;
}
err = cudaDeviceSynchronize();
if (err != cudaSuccess) {
print_info();
std::cout << __FILE__ << ":" << __LINE__
<< " scheduler kernel failed with error: "
<< cudaGetErrorString(err) << std::endl;
return false;
}
// Copy visit counts back to host and ensure that all entries are ones
std::vector<int> host_visit_counts(total_counters);
visit_counters.copy_to_host(host_visit_counts.data());
for (size_t i = 0; i < host_visit_counts.size(); ++i) {
if (host_visit_counts[i] != 1) {
print_info();
std::cout << "Error at idx: " << i << ". Got count " << host_visit_counts[i] << std::endl;
return false;
}
}
return true;
}
/// Executes tests of the scheduler with a sweep across problem size K
template <
class TileShape,
class ClusterShape
>
bool sweep_k(
ProblemShape_MNKL problem_shape_mnkl,
TileShape tile_shape,
ClusterShape cluster_shape,
int sm_count,
int splits=1,
bool expect_data_parallel=false,
int k_start=128,
int k_stop=16384,
int k_step=0) {
if (k_step == 0) {
k_step = 4 * cute::size<2>(tile_shape);
}
for (int k = k_start; k <= k_stop; k += k_step) {
ProblemShape_MNKL problem{get<0>(problem_shape_mnkl), get<1>(problem_shape_mnkl), k, get<3>(problem_shape_mnkl)};
bool passed = test_scheduler(problem, tile_shape, cluster_shape, sm_count, splits, expect_data_parallel);
if (!passed) {
return false;
}
}
return true;
}
/// Executes tests of the scheduler that are expected to result in a data-parallel schedule.
/// This function assumes that the problem, tile, and cluster shape, alongside the SM count,
/// are such that the problem executes only full waves on the device.
template <
class TileShape,
class ClusterShape
>
bool test_data_parallel(
int blocks_m,
int blocks_n,
TileShape tile_shape,
ClusterShape cluster_shape,
int sm_count) {
// Since the configuration passed in executes only full waves, increasing
// the batch dimension simply results in running more full waves.
for (int l = 1; l < 4; ++l) {
ProblemShape_MNKL problem_shape{
size<0>(tile_shape) * blocks_m, size<1>(tile_shape) * blocks_n, 1, l};
bool passed = sweep_k(problem_shape, tile_shape, cluster_shape, sm_count, /*splits=*/1, /*expect_data_parallel=*/true);
if (!passed) {
return false;
}
}
return true;
}
/// Executes tests of the scheduler on the generic stream-K decomposition.
template <
class TileShape,
class ClusterShape
>
bool test_stream_k(
TileShape tile_shape,
ClusterShape cluster_shape,
int sm_count) {
int tile_m = size<0>(tile_shape);
int tile_n = size<1>(tile_shape);
for (int m_blocks = 1; m_blocks <= 24; ++m_blocks) {
for (int n_blocks = 1; n_blocks <= 24; ++n_blocks) {
for (int l = 1; l < 4; ++l) {
ProblemShape_MNKL problem{m_blocks * tile_m, n_blocks * tile_n, 1, l};
if (!sweep_k(problem, tile_shape, cluster_shape, sm_count)) {
return false;
}
}
}
}
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_stream_k_scheduler, 256x128x64_2x1x1) {
using TileShape_MNK = Shape<_256,_128,_64>;
using ClusterShape_MNK = Shape<_2,_1,_1>;
TileShape_MNK tile_shape;
ClusterShape_MNK cluster_shape;
// Test various data-parallel cases
EXPECT_TRUE(test_data_parallel(/*blocks_m=*/ 4, /*blocks_n=*/ 4, tile_shape, cluster_shape, /*sm_count=*/ 16));
EXPECT_TRUE(test_data_parallel(/*blocks_m=*/16, /*blocks_n=*/ 4, tile_shape, cluster_shape, /*sm_count=*/ 64));
EXPECT_TRUE(test_data_parallel(/*blocks_m=*/ 8, /*blocks_n=*/27, tile_shape, cluster_shape, /*sm_count=*/108));
// Test various stream-K cases
EXPECT_TRUE(test_stream_k(tile_shape, cluster_shape, /*sm_count=*/ 16));
EXPECT_TRUE(test_stream_k(tile_shape, cluster_shape, /*sm_count=*/ 64));
EXPECT_TRUE(test_stream_k(tile_shape, cluster_shape, /*sm_count=*/108));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_stream_k_scheduler, 128x128x64_2x1x1) {
using TileShape_MNK = Shape<_128,_128,_64>;
using ClusterShape_MNK = Shape<_2,_1,_1>;
TileShape_MNK tile_shape;
ClusterShape_MNK cluster_shape;
EXPECT_TRUE(test_scheduler({128, 512, 2048, 1}, tile_shape, cluster_shape, 114));
}
#endif // defined(CUTLASS_SM90_CLUSTER_LAUNCH_ENABLED)
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/gemm/device/sm90_gemm_stream_k_scheduler.cu/0 | {
"file_path": "test/unit/gemm/device/sm90_gemm_stream_k_scheduler.cu",
"repo_id": "test",
"token_count": 4355
} | 46 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide GEMM interface
*/
#include <iostream>
#include <sstream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/core_io.h"
#include "testbed.h"
namespace test {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// List of Gemm internal paramters this testbed supports user verification
//
enum class ParameterID {
// Threadblock-level parameters
kSmemASize,
kSmemBSize,
// Warp-level parameters
kWarpFragmentASize,
kWarpFragmentBSize,
kWarpFragmentCSize,
kInvalid
};
struct Reference {
ParameterID parameter_id;
union {
int value;
struct {
int m, n, k;
} gemm_shape;
struct {
int row, column;
} matrix_shape;
};
std::string error_msg;
Reference(
ParameterID parameter_id_,
int value_=-1,
std::string const &error_msg_="") : parameter_id(parameter_id_), value(value_), error_msg(error_msg_) {}
};
template <typename Gemm>
struct TestbedSanity {
//
// Type definitions (All Gemm types top down)
//
// Unpacking Gemm types in the following order
// Kernel-level > Threadblock-level > Warp-level > Instruction-level
// kernel-level cutlass Gemm
using GemmKernel = typename Gemm::GemmKernel;
//
// Threadblock-level gemm types
//
using MmaThreadBlock = typename GemmKernel::Mma;
// Threadblock-level gemm shape covering one stage
using ThreadblockShape = typename MmaThreadBlock::Shape;
// Shared memory size covering all stages
using SmemShapeA = typename MmaThreadBlock::Base::SharedStorage::ShapeA;
using SmemPaddingA = typename MmaThreadBlock::Policy::SmemPaddingA;
using SmemShapeB = typename MmaThreadBlock::Base::SharedStorage::ShapeB;
using SmemPaddingB = typename MmaThreadBlock::Policy::SmemPaddingB;
/// Number of stages
static int const kStages = MmaThreadBlock::Base::kStages;
/// Number of warp-level GEMM oeprations
static int const kWarpGemmIterations = MmaThreadBlock::kWarpGemmIterations;
//
// Warp-level gemm types
//
// Warp-level gemm operator
using MmaWarp = typename MmaThreadBlock::Operator;
// Warp-level gemm shape covering all kgroups
using WarpShape = typename MmaWarp::Shape;
// Warp-level framents holding operands A & B operand and destination C
using WarpFragmentA = typename MmaWarp::FragmentA;
using WarpFragmentB = typename MmaWarp::FragmentB;
using WarpFragmentC = typename MmaWarp::FragmentC;
//
// Instruction-level gemm types
//
// Instruction-level gemm operator
using MmaInstruction = typename MmaWarp::Policy::Operator;
// Instruction shape
using InstructionShape = typename MmaInstruction::Shape;
// Instruction-level framents holding operands A & B operand and destination C
using InstructionFragmentA = typename MmaInstruction::FragmentA;
using InstructionFragmentB = typename MmaInstruction::FragmentB;
using InstructionFragmentC = typename MmaInstruction::FragmentC;
//
// Testbed types
//
// Vector of values holding user provided reference
using ReferenceVector = std::vector<Reference>;
//
// Data members
//
ReferenceVector references;
//
// Methods
//
TestbedSanity(ReferenceVector const &references_ = ReferenceVector()) : references(references_){ }
// verify all parameter in ReferenceVector
bool verify() {
for(auto ref : references)
verify_parameter(ref);
return true;
}
// verify parameter of type Reference
void verify_parameter(Reference const& ref) {
switch(ref.parameter_id) {
case ParameterID::kWarpFragmentASize : EXPECT_TRUE(WarpFragmentA::kElements == ref.value) << *this; break;
case ParameterID::kWarpFragmentBSize : EXPECT_TRUE(WarpFragmentB::kElements == ref.value) << *this; break;
case ParameterID::kWarpFragmentCSize : EXPECT_TRUE(WarpFragmentC::kElements == ref.value) << *this; break;
}
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Overload output operators for TesbedSanity<Gemm>
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Gemm>
std::ostream & operator<<(std::ostream &out, TestbedSanity<Gemm> const &test) {
out << "Gemm internal parameters" << std::endl
<< " Threadblock-level parameters:" << std::endl
<< " ThreadblockShape = " << typename TestbedSanity<Gemm>::ThreadblockShape() << std::endl
<< " kStages = " << TestbedSanity<Gemm>::kStages << std::endl
<< " kWarpGemmIterations = "<< TestbedSanity<Gemm>::kWarpGemmIterations << std::endl
<<" Shared memory sizes:" << std::endl
<<" SmemPaddingA = " << typename TestbedSanity<Gemm>::SmemPaddingA() << std::endl
<<" SmemPaddingB = " << typename TestbedSanity<Gemm>::SmemPaddingB() << std::endl
<<" SmemShapeA = " << typename TestbedSanity<Gemm>::SmemShapeA() << std::endl
<<" SmemShapeB = " << typename TestbedSanity<Gemm>::SmemShapeB() << std::endl
<<" Warp-level parameters" << std::endl
<<" WarpShape = " << typename TestbedSanity<Gemm>::WarpShape() << std::endl
<<" Fragment sizes:" << std::endl
<<" WarpFragmentA::kElements = " << TestbedSanity<Gemm>::WarpFragmentA::kElements << std::endl
<<" WarpFragmentB::kElements = " << TestbedSanity<Gemm>::WarpFragmentB::kElements << std::endl
<<" WarpFragmentC::kElements = " << TestbedSanity<Gemm>::WarpFragmentC::kElements << std::endl
<<" Instruction-level parameters" << std::endl
<<" InstructionShape = " << typename TestbedSanity<Gemm>::InstructionShape() << std::endl
<<" Fragment sizes:" << std::endl
<<" InstructionFragmentA::kElements = " << TestbedSanity<Gemm>::InstructionFragmentA::kElements << std::endl
<<" InstructionFragmentB::kElements = " << TestbedSanity<Gemm>::InstructionFragmentB::kElements << std::endl
<<" InstructionFragmentC::kElements = " << TestbedSanity<Gemm>::InstructionFragmentC::kElements << std::endl;
return out;
}
} // namespace device
} // namespace gemm
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/gemm/device/testbed_sanity.h/0 | {
"file_path": "test/unit/gemm/device/testbed_sanity.h",
"repo_id": "test",
"token_count": 2831
} | 47 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit test for the PipelineTmaAsync class used in a WarpSpecialized Persistent loop
*/
#define KERNEL_DBG_TRACE false
#include "../common/cutlass_unit_test.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <cute/tensor.hpp>
#include <cute/arch/cluster_sm90.hpp>
#include <cutlass/util/reference/host/gemm.h>
#include <cutlass/cluster_launch.hpp>
#include "cutlass/core_io.h"
#include "cutlass/util/print_error.hpp"
#include "cutlass/util/GPU_Clock.hpp"
#include "testbed.h"
#include "cutlass/pipeline/pipeline.hpp"
#include "cutlass/arch/barrier.h"
#include "cute/arch/cluster_sm90.hpp"
#include "cutlass/arch/barrier.h"
#include "cutlass/arch/reg_reconfig.h"
using namespace cute;
using namespace cutlass;
//////////////////// KERNEL /////////////////////////
template <uint32_t Stages, typename PingPongBarrier>
struct SharedStorage
{
typename cutlass::PipelineTmaAsync<Stages>::SharedStorage pipeline_storage;
typename PingPongBarrier::SharedStorage pingpong_storage;
};
template <typename ClusterShape, uint32_t Stages>
struct CollectiveSimulation {
using MainloopPipeline = typename cutlass::PipelineTmaAsync<Stages>;
using PipelineState = typename cutlass::PipelineState<Stages>;
CUTLASS_DEVICE
static void
dma_wg_simulation(MainloopPipeline pipeline, PipelineState tile_start_state_pipe,
uint32_t const num_iterations) {
uint32_t const per_cta_bytes = sizeof(uint32_t);
int warp_idx_in_warpgroup = __shfl_sync(0xffffffff, (threadIdx.x / 32) % 4, 0);
int lane_predicate = cute::elect_one_sync();
if (warp_idx_in_warpgroup==0 && lane_predicate) {
int tma_k_prologue = min(Stages, num_iterations);
// Simulating Prologue TMA Loads
CUTLASS_PRAGMA_UNROLL
for(int i = 0; i < tma_k_prologue; ++i) {
pipeline.producer_acquire(tile_start_state_pipe);
// Simulating cp.async.bulk.tensor behavior
pipeline.producer_commit(tile_start_state_pipe, per_cta_bytes);
++tile_start_state_pipe;
}
int tma_k_iter = num_iterations - tma_k_prologue;
PipelineState wr_pipe = tile_start_state_pipe;
// Simulating Mainloop TMA Loads
CUTE_NO_UNROLL
for ( ; tma_k_iter > 0; --tma_k_iter){
pipeline.producer_acquire(wr_pipe);
// Simulating cp.async.bulk.tensor behavior
pipeline.producer_commit(wr_pipe, per_cta_bytes);
// Advance write stage
++wr_pipe;
}
}
}
CUTLASS_DEVICE
static void
math_wg_simulation(MainloopPipeline pipeline, PipelineState tile_start_state_pipe,
uint32_t const num_iterations, int* data_ptr) {
PipelineState rd_pipe = tile_start_state_pipe;
PipelineState release_pipe = rd_pipe;
// simulates accumulators + extra reg. pressure
int arr[168];
// Init Shared Memory read stages & PhaseBit
static constexpr uint32_t K_PIPE_MMAS = 1;
static_assert( K_PIPE_MMAS < Stages, "ERROR : Too many MMAs in flight");
// Total number of gemm iterations
auto gemm_k_iterations = num_iterations;
// Simulating Prologue MMAs
int mma_k_prologue = min(K_PIPE_MMAS, gemm_k_iterations);
CUTLASS_PRAGMA_UNROLL
for (int iter = 0; iter < mma_k_prologue; ++iter) {
pipeline.consumer_wait(rd_pipe);
warpgroup_arrive();
// GMMA would typically happen here
++rd_pipe;
}
gemm_k_iterations -= mma_k_prologue;
// Simulating Mainloop MMAs
CUTLASS_PRAGMA_NO_UNROLL
for ( ; gemm_k_iterations > 0; --gemm_k_iterations) {
/// Wait on the rd_pipe stage / phase
pipeline.consumer_wait(rd_pipe);
warpgroup_arrive();
// GMMA would typically happen here
// Dummy op - which will never happen
// But simulates high register usage.
CUTE_UNROLL
for(int i = 0; i < 168; ++i){
if (threadIdx.x > 384){
arr[i] += data_ptr[i];
}
}
pipeline.consumer_release(release_pipe);
// Advance stages
++rd_pipe;
++release_pipe;
}
// Dummy op - which will never happen
CUTE_UNROLL
for(int i = 0; i < 168; ++i){
if (threadIdx.x > 384){
data_ptr[i] = arr[i];
}
}
// Tail Loop
for (int i = 0; i < K_PIPE_MMAS; ++i){
pipeline.consumer_release(release_pipe);
++release_pipe;
}
}
};
struct KernelParams
{
uint32_t num_iterations;
int tiles_per_cluster;
int* data_ptr;
};
// Goal of this kernel is to complete deadlock-free
template <typename ClusterShape, uint32_t Stages>
__launch_bounds__(384, 1)
__global__ static
void pipeline_device(KernelParams params)
{
extern __shared__ char shared_memory[];
using MainloopPipeline = typename cutlass::PipelineTmaAsync<Stages>;
using PipelineState = typename cutlass::PipelineState<Stages>;
/* One for Mainloop and one for Epilogue */
constexpr int StagesPerMathWarpGroup = 2;
constexpr int MathWarpGroupCountPersistent = 2;
using PingPongBarrier = typename cutlass::OrderedSequenceBarrier<StagesPerMathWarpGroup, MathWarpGroupCountPersistent>;
using SharedStorage = SharedStorage<Stages, PingPongBarrier>;
SharedStorage& shared_storage = *reinterpret_cast<SharedStorage*>(shared_memory);
[[maybe_unused]] auto cta_layout = Layout<ClusterShape>{}; // (m,n) -> cta_id
int warp_group_idx = __shfl_sync(0xffffffff, threadIdx.x / NumThreadsPerWarpGroup, 0);
int warp_group_thread_idx = threadIdx.x % NumThreadsPerWarpGroup;
dim3 block_id_in_cluster = cute::block_id_in_cluster();
auto cluster_shape = ClusterShape{};
// #Producers = #RowsInCluster + #ColsInCluster - 1
uint32_t const NumProducers = cute::size<0>(cluster_shape) + cute::size<1>(cluster_shape) - 1;
uint32_t const TmaTransactionBytes = static_cast<uint32_t>(sizeof(uint32_t) * NumProducers);
// mbarrier.init
typename MainloopPipeline::Params pipeline_params;
pipeline_params.transaction_bytes = TmaTransactionBytes;
if (warp_group_idx == 0) {
pipeline_params.role = MainloopPipeline::ThreadCategory::Producer;
}
else {
pipeline_params.role = MainloopPipeline::ThreadCategory::Consumer;
}
pipeline_params.is_leader = warp_group_thread_idx == 0;
pipeline_params.num_consumers = NumThreadsPerWarpGroup;
MainloopPipeline pipeline(shared_storage.pipeline_storage, pipeline_params, cluster_shape);
PipelineState tile_start_state_pipe;
int tiles_per_cluster = params.tiles_per_cluster;
/* Offset pipeline start state for Math WG 2 */
if (warp_group_idx == 2) {
// Update pipeline state for next persistent tile
tile_start_state_pipe.advance(params.num_iterations);
tiles_per_cluster--;
}
typename PingPongBarrier::Params pingpong_params;
pingpong_params.group_id = warp_group_idx - 1; // Since DMA Warp Group Idx 0 will not participate
pingpong_params.group_size = NumThreadsPerWarpGroup; // Number of threads / participants in a group
PingPongBarrier math_wg_barrier(shared_storage.pingpong_storage, pingpong_params);
__syncthreads();
// Ensure All CTAs in Cluster have completed init before issuing commits
cute::cluster_arrive_relaxed();
cute::cluster_wait();
// Producer/DMA WarpGroup
if (warp_group_idx == 0) {
cutlass::arch::warpgroup_reg_dealloc<40>();
// For the DMA (prologue) - we start with an opposite phase - since we skip all waits
// i.e., we know that the buffer is indeed empty
PipelineState tile_prologue_state_pipe = make_producer_start_state<MainloopPipeline>();
while (tiles_per_cluster > 0) {
CollectiveSimulation<ClusterShape,Stages>::dma_wg_simulation(pipeline, tile_prologue_state_pipe, params.num_iterations);
// Update pipeline state for next persistent tile
tile_prologue_state_pipe.advance(params.num_iterations);
tiles_per_cluster--;
}
}
// Math WarpGropups
if(warp_group_idx == 1 || warp_group_idx == 2) {
cutlass::arch::warpgroup_reg_alloc<232>();
while (tiles_per_cluster > 0) {
// MMA
math_wg_barrier.wait();
CollectiveSimulation<ClusterShape,Stages>::math_wg_simulation(pipeline, tile_start_state_pipe, params.num_iterations, params.data_ptr);
math_wg_barrier.arrive();
// Epilogue
math_wg_barrier.wait();
// Simulates long running stage
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 700)
__nanosleep(100000);
#endif
math_wg_barrier.arrive();
// Update pipeline state for next persistent tile
tile_start_state_pipe.advance(params.num_iterations * 2);
tiles_per_cluster -= 2;
}
}
// Makes sure remote SMEM doesn't get destroyed
cute::cluster_arrive_relaxed();
cute::cluster_wait();
}
/////////////////////////////////////////////////////
/// Device NT GMMA + TMA specialized
template<uint32_t Stages_, typename ClusterShape_>
struct PipelineTest {
//
// Data members
//
static constexpr uint32_t Stages = Stages_;
static constexpr uint32_t kBlockSize = 128 * 3;
using ClusterShape = ClusterShape_;
//
// Methods
//
// Run CuTe GEMM kernel
cudaError_t run(uint32_t const kNumIters,
cudaStream_t stream = 0) {
float elapsed_ms = 0.0f;
// Pipeline (multistage pipeline)
auto cluster_shape = Shape<Int<ClusterShape::kM>, Int<ClusterShape::kN>, _1>{};
//
// Configure and launch
//
int iterations = 1;
cudaEvent_t events[2];
cudaError_t result;
for (cudaEvent_t & event : events) {
result = cudaEventCreate(&event);
if (result != cudaSuccess) {
std::cerr << "Error: Failed to create event.";
return result;
}
}
result = cudaEventRecord(events[0]);
if (result != cudaSuccess) {
std::cerr << "Error: Failed to record start event.";
return result;
}
for (int iter = 0; iter < iterations; ++iter) {
constexpr int StagesPerMathWarpGroup = 2;
constexpr int MathWarpGroupCountPersistent = 2;
int smem_size = int(sizeof(SharedStorage<Stages,
typename cutlass::OrderedSequenceBarrier<StagesPerMathWarpGroup, MathWarpGroupCountPersistent>>));
result = cudaFuncSetAttribute(
pipeline_device<decltype(cluster_shape), Stages>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
// Launch a single Cluster, with kBlockSize threads per CTA
dim3 dimCluster(size<0>(cluster_shape), size<1>(cluster_shape), 1);
dim3 dimGrid(size<0>(cluster_shape), size<1>(cluster_shape), 1);
dim3 dimBlock(kBlockSize,1,1);
int tiles_per_cluster = (kNumIters % 10) + 1;
printf("Persistent version: Tiles per Cluster = %d\n", tiles_per_cluster);
const void* kernel = (const void*)pipeline_device<decltype(cluster_shape), Stages>;
KernelParams params{kNumIters, tiles_per_cluster, nullptr};
void *kernel_params[] = {¶ms};
cutlass::ClusterLauncher::launch(dimGrid, dimCluster, dimBlock, smem_size, stream, kernel, kernel_params);
}
result = cudaEventRecord(events[1]);
if (result != cudaSuccess) {
std::cerr << "Error: Failed to record stop event.";
return result;
}
result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
std::cerr << "Error: cudaDeviceSynchronize() failed" << std::endl;
return result;
}
result = cudaEventElapsedTime(&elapsed_ms, events[0], events[1]);
if (result != cudaSuccess) {
std::cerr << "Failed to create event.";
return result;
}
for (cudaEvent_t & event : events) {
(void)cudaEventDestroy(event);
}
return cudaSuccess;
}
};
#if CUDA_12_0_SM90_FEATURES_SUPPORTED
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster1x1_Stage2) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<1, 1, 1>;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster1x1_Stage5) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<1, 1, 1>;
static constexpr uint32_t Stages = 5;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster1x1_Stage10) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<1, 1, 1>;
static constexpr uint32_t Stages = 10;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster2x2_Stage2) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<2, 2, 1>;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster2x2_Stage5) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<2, 2, 1>;
static constexpr uint32_t Stages = 5;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster2x2_Stage7) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<2, 2, 1>;
static constexpr uint32_t Stages = 7;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster4x4_Stage2) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<4, 4, 1>;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster4x4_Stage7) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<4, 4, 1>;
static constexpr uint32_t Stages = 7;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster2x1_Stage2) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<2, 1, 1>;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster2x1_Stage7) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<2, 1, 1>;
static constexpr uint32_t Stages = 7;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster1x2_Stage2) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<1, 2, 1>;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster1x2_Stage7) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<1, 2, 1>;
static constexpr uint32_t Stages = 7;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster4x1_Stage2) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<4, 1, 1>;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster4x1_Stage7) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<4, 1, 1>;
static constexpr uint32_t Stages = 7;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster1x4_Stage2) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<1, 4, 1>;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster1x4_Stage7) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<1, 4, 1>;
static constexpr uint32_t Stages = 7;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster2x4_Stage2) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<2, 4, 1>;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster2x4_Stage7) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<2, 4, 1>;
static constexpr uint32_t Stages = 7;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster4x2_Stage2) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<4, 2, 1>;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster4x2_Stage7) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<4, 2, 1>;
static constexpr uint32_t Stages = 7;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
#endif
| test/unit/pipeline/pipeline_tma_async_warp_specialized_persistent.cu/0 | {
"file_path": "test/unit/pipeline/pipeline_tma_async_warp_specialized_persistent.cu",
"repo_id": "test",
"token_count": 7455
} | 48 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Defines operations for reduction operation in CUTLASS Library.
*/
#pragma once
#include <iostream>
#include "cutlass/cutlass.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/epilogue/thread/linear_combination_clamp.h"
#include "cutlass/reduction/thread/reduction_operators.h"
#include "cutlass/reduction/device/reduce_split_k.h"
#include "cutlass/library/library.h"
#include "library_internal.h"
#include "cutlass/core_io.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Operator_>
class ReductionOperation : public Operation {
public:
using Operator = Operator_;
using ElementWorkspace = typename Operator::ElementWorkspace;
using ElementAccumulator = typename Operator::ElementAccumulator;
using ElementOutput = typename Operator::ElementOutput;
using ElementCompute = typename Operator::OutputOp::ElementCompute;
using OperatorArguments = typename Operator::Arguments;
protected:
///
ReductionDescription description_;
public:
/// Constructor
ReductionOperation(char const *name = "unknown_reduction") {
description_.name = name;
description_.provider = Provider::kCUTLASS;
description_.kind = OperationKind::kReduction;
description_.tile_description.threadblock_shape = make_Coord(Operator::Shape::kRow, Operator::Shape::kColumn, 1);
description_.tile_description.math_instruction.instruction_shape = make_Coord(1, 1, 1);
description_.tile_description.math_instruction.element_accumulator = NumericTypeMap<ElementAccumulator>::kId;
description_.tile_description.math_instruction.opcode_class = OpcodeClassID::kSimt;
description_.tile_description.math_instruction.math_operation = MathOperationID::kAdd;
description_.tile_description.minimum_compute_capability = 50;
description_.tile_description.maximum_compute_capability = 1024;
description_.element_workspace = NumericTypeMap<ElementWorkspace>::kId;
description_.element_output = NumericTypeMap<ElementOutput>::kId;
description_.element_epilogue = NumericTypeMap<ElementCompute>::kId;
}
/// Returns the description of the Reduction operation
virtual OperationDescription const & description() const {
return description_;
}
protected:
/// Constructs the arguments structure given the configuration and arguments
static Status construct_arguments_(
OperatorArguments &operator_args,
ReductionConfiguration const *configuration) {
operator_args.problem_size = configuration->problem_size;
operator_args.partitions = configuration->partitions;
operator_args.partition_stride = configuration->partition_stride;
operator_args.workspace = {nullptr, int(configuration->ldw)};
operator_args.source = {nullptr, int(configuration->lds)};
operator_args.destination = {nullptr, int(configuration->ldd)};
return Status::kSuccess;
}
/// Constructs the arguments structure given the configuration and arguments
static Status update_arguments_(
OperatorArguments &operator_args,
ReductionArguments const *arguments) {
if (arguments->pointer_mode == ScalarPointerMode::kHost) {
typename Operator::OutputOp::Params params(
*static_cast<ElementCompute const *>(arguments->alpha),
*static_cast<ElementCompute const *>(arguments->beta)
);
operator_args.output = params;
}
else if (arguments->pointer_mode == ScalarPointerMode::kDevice){
typename Operator::OutputOp::Params params(
static_cast<ElementCompute const *>(arguments->alpha),
static_cast<ElementCompute const *>(arguments->beta)
);
operator_args.output = params;
}
else {
return Status::kErrorInvalidProblem;
}
operator_args.workspace.reset(static_cast<ElementWorkspace *>(const_cast<void *>(arguments->workspace)));
operator_args.source.reset(static_cast<ElementOutput *>(const_cast<void *>(arguments->source)));
operator_args.destination.reset(static_cast<ElementOutput *>(const_cast<void *>(arguments->destination)));
return Status::kSuccess;
}
public:
/// Returns success if the operation can proceed
virtual Status can_implement(
void const *configuration_ptr,
void const *arguments_ptr) const {
ReductionConfiguration const *configuration =
static_cast<ReductionConfiguration const *>(configuration_ptr);
ReductionArguments const *arguments =
static_cast<ReductionArguments const *>(arguments_ptr);
OperatorArguments args;
Status status = construct_arguments_(args, configuration);
if (status != Status::kSuccess) {
return status;
}
status = update_arguments_(args, arguments);
if (status != Status::kSuccess) {
return status;
}
return Operator::can_implement(args);
}
/// Gets the host-side workspace
virtual uint64_t get_host_workspace_size(
void const *configuration) const {
return sizeof(Operator);
}
/// Gets the device-side workspace
virtual uint64_t get_device_workspace_size(
void const *configuration_ptr,
void const *arguments_ptr = nullptr) const {
OperatorArguments args;
Status status = construct_arguments_(
args,
static_cast<ReductionConfiguration const *>(configuration_ptr));
if (status != Status::kSuccess) {
return 0;
}
return Operator::get_workspace_size(args);
}
/// Initializes the workspace
virtual Status initialize(
void const *configuration_ptr,
void *host_workspace,
void *device_workspace,
cudaStream_t stream = nullptr) const {
OperatorArguments args;
Status status = construct_arguments_(
args,
static_cast<ReductionConfiguration const *>(configuration_ptr));
if (status != Status::kSuccess) {
return status;
}
Operator *op = new (host_workspace) Operator;
//std::cout << "initialize library::Reduction" << std::endl;
//print_operator_args(args);
return op->initialize(args, device_workspace, stream);
}
/// Runs the kernel
virtual Status run(
void const *arguments_ptr,
void *host_workspace,
void *device_workspace = nullptr,
cudaStream_t stream = nullptr) const {
OperatorArguments args;
Status status = update_arguments_(
args,
static_cast<ReductionArguments const *>(arguments_ptr));
if (status != Status::kSuccess) {
return status;
}
Operator *op = static_cast<Operator *>(host_workspace);
status = op->update(args, device_workspace);
if (status != Status::kSuccess) {
return status;
}
//std::cout << "run library::Reduction" << std::endl;
//print_operator_args(args);
return op->run(stream);
}
/// Call print_operator_args from the Reduction::initialize()
// to dump arguments passed on to cutlass operator for debugging
void print_operator_args(OperatorArguments &operator_args) const {
std::cout << "Reduction::OperatorArguments" << std::endl
<< " problem_size: "
<< operator_args.problem_size << std::endl
<< " partitions: "
<< operator_args.partitions << std::endl
<< " partition_stride: "
<< operator_args.partition_stride << std::endl
<< " epilogue (alpha, beta): "
<< operator_args.output.alpha << ", "
<< operator_args.output.beta << std::endl
<< " workspace (ptr, stride): "
<< operator_args.workspace.data() << ", "
<< operator_args.workspace.stride(0) << std::endl
<< " source (ptr, stride): "
<< operator_args.source.data() << ", "
<< operator_args.source.stride(0) << std::endl
<< " destination (ptr, stride): "
<< operator_args.destination.data() << ", "
<< operator_args.destination.stride(0) << std::endl;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
| tools/library/src/reduction/reduction_operation.h/0 | {
"file_path": "tools/library/src/reduction/reduction_operation.h",
"repo_id": "tools",
"token_count": 3308
} | 49 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief
*/
#pragma once
#include <map>
#include <string>
#include "cutlass/library/library.h"
#include "cutlass/library/util.h"
#include "options.h"
#include "device_allocation.h"
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Collection of allocations on the device
class DeviceContext {
public:
//
// Type definitions
//
using AllocationMap = std::map<std::string, DeviceAllocation *>;
private:
//
// Data members
//
/// Memory allocations that exist (owning)
DeviceAllocationList device_memory_;
/// Non-owning set of named allocations
AllocationMap allocations_;
public:
/// Allocates memory of a given type, capacity (elements), and name
DeviceAllocation *allocate_block(
std::string const &name,
library::NumericTypeID type,
size_t capacity);
/// Allocates memory of a given type, capacity (elements), and name
DeviceAllocation *allocate_tensor(
std::string const &name,
library::NumericTypeID type,
library::LayoutTypeID layout_id,
std::vector<int> const &extent,
std::vector<int64_t> const &stride = std::vector<int64_t>(),
int batch_count = 1);
/// Allocates memory of a given type, capacity (elements), and name
DeviceAllocation *allocate_tensor(
Options const &options,
std::string const &name,
library::NumericTypeID type,
library::LayoutTypeID layout_id,
std::vector<int> const &extent,
std::vector<int64_t> const &stride,
int batch_count,
int seed_shift = 0);
/// Allocates memory for sparse meta data
DeviceAllocation *allocate_sparsemeta_tensor(
Options const &options,
std::string const &name,
library::NumericTypeID type,
library::LayoutTypeID layout_id,
library::NumericTypeID type_a,
std::vector<int> const &extent,
std::vector<int64_t> const &stride,
int batch_count,
int seed_shift = 0);
/// Clears named allocations (but does not necessarily free memory)
void clear();
/// Frees all device memory allocations
void free();
/// Gets the allocation by name
DeviceAllocation &at(std::string const &name);
size_t size() const;
AllocationMap::iterator begin();
AllocationMap::iterator end();
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
| tools/profiler/include/cutlass/profiler/device_context.h/0 | {
"file_path": "tools/profiler/include/cutlass/profiler/device_context.h",
"repo_id": "tools",
"token_count": 1232
} | 50 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Execution environment
*/
#include <iostream>
#include <stdexcept>
#include <iomanip>
#include <ios>
#include "cutlass/core_io.h"
#include "cutlass/profiler/cublas_helpers.h"
#include "cutlass/profiler/rank_k_operation_profiler.h"
#include "cutlass/profiler/gpu_timer.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Ctor
RankKOperationProfiler::RankKOperationProfiler(Options const &options):
OperationProfiler(
options,
library::OperationKind::kRankK,
{
{ArgumentTypeID::kEnumerated, {"rank_k_kind"}, "Variant of RankK (universal)"},
{ArgumentTypeID::kInteger, {"n", "problem-size::n"}, "N dimension of the RankK problem space"},
{ArgumentTypeID::kInteger, {"k", "problem-size::k"}, "K dimension of the RankK problem space"},
{ArgumentTypeID::kTensor, {"A"}, "Tensor storing the A operand"},
{ArgumentTypeID::kTensor, {"C"}, "Tensor storing the C operand"},
{ArgumentTypeID::kEnumerated, {"fill_mode"}, "Fill Mode for RankK kernel (lower or upper)"},
{ArgumentTypeID::kEnumerated, {"blas_mode"}, "Blas Mode for RankK kernel (symmetric or hermitian)"},
{ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"},
{ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"},
{ArgumentTypeID::kInteger, {"split_k_slices", "split-k-slices"}, "Number of partitions of K dimension"},
{ArgumentTypeID::kInteger, {"batch_count", "batch-count"}, "Number of RankK computed in one batch"},
},
{ library::Provider::kCUBLAS}
) {
description_ = " Rank-k Update. D = alpha * A*A^T + beta * C (symmetric) or D = alpha * A*A^H + beta * C (hermitian)";
}
/// Destructor
RankKOperationProfiler::~RankKOperationProfiler() {
}
/// Prints usage statement for the math function
void RankKOperationProfiler::print_usage(std::ostream &out) const {
out << "RankK" << "\n\n";
OperationProfiler::print_usage(out);
}
/// Prints examples
void RankKOperationProfiler::print_examples(std::ostream &out) const {
out << "\nExamples:\n\n"
<< "Profile a particular problem size Syrk kernel:\n"
<< " $ cutlass_profiler --operation=rank_k --blas_mode=symmetric --n=1024 --k=128\n\n"
<< "Profile a particular problem size Herk kernel:\n"
<< " $ cutlass_profiler --operation=rank_k --blas_mode=hermitian --n=1024 --k=128\n\n"
<< "Schmoo over problem size and beta:\n"
<< " $ cutlass_profiler --operation=rank_k --n=1024:4096:256 --k=128:8192:128 --beta=0,1,2.5\n\n"
<< "Schmoo over accumulator types:\n"
<< " $ cutlass_profiler --operation=rank_k --accumulator-type=f16,f32\n\n"
<< "Schmoo over fill modees:\n"
<< " $ cutlass_profiler --operation=rank_k --fill_mode=lower/upper\n\n"
<< "Run when A is f16 with column-major or A is any datatype with row-major (For column major, use column, col, or n. For row major use, row or t):\n"
<< " $ cutlass_profiler --operation=rank_k --A=f16:column or --A=*:row\n\n"
<< "Using various input value distribution:\n"
<< " $ cutlass_profiler --operation=rank_k --dist=uniform,min:0,max:3\n"
<< " $ cutlass_profiler --operation=rank_k --dist=gaussian,mean:0,stddev:3\n"
<< " $ cutlass_profiler --operation=rank_k --dist=sequential,start:0,delta:1\n\n"
<< "Run a kernel with cta tile size of 256x128x32 and save workspace if results are incorrect (note that --cta-tile::k=32 is default cta-tile size):\n"
<< " $ cutlass_profiler --operation=rank_k --cta_m=256 --cta_n=128 --cta_k=32 --save-workspace=incorrect\n\n"
<< "Test your changes to rank_k kernels with a quick functional test and save results in functional-test.csv:\n"
<< " $ cutlass_profiler --operation=rank_k \\ \n"
<< " --n=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n"
<< " --k=8,16,32,64,128,256,288,384,504,512,520 \\ \n"
<< " --beta=0,1,2 --profiling-iterations=1 \\ \n"
<< " --providers=cutlass --output=functional-test.csv\n\n";
}
/////////////////////////////////////////////////////////////////////////////////////////////////
#if 0
// used this for debugging
static std::string byte_string(std::vector<uint8_t> const &bytes) {
std::stringstream ss;
ss << "0x";
for (size_t idx = bytes.size(); idx > 0; --idx) {
ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1));
}
return ss.str();
}
#endif
Status RankKOperationProfiler::RankKProblem::parse(
library::RankKDescription const &operation_desc,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (!arg_as_int(this->n, "n", problem_space, problem)) {
// default value
this->n = 1024;
}
if (!arg_as_int(this->k, "k", problem_space, problem)) {
// default value
this->k = 1024;
}
if (!arg_as_int(this->split_k_slices, "split_k_slices", problem_space, problem)) {
// default value
this->split_k_slices = 1;
}
if (!arg_as_int(this->batch_count, "batch_count", problem_space, problem)) {
// default value
this->batch_count = 1;
}
if (this->split_k_slices > 1 && this->batch_count > 1) {
// At least one of these must be one
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.A, "A", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.C, "C", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!arg_as_scalar(
this->alpha,
operation_desc.element_epilogue,
"alpha",
problem_space,
problem)) {
if (!cast_from_double(this->alpha, operation_desc.element_epilogue, 1)) {
return Status::kErrorInternal;
}
}
if (!arg_as_scalar(
this->beta,
operation_desc.element_epilogue,
"beta",
problem_space,
problem)) {
if (!cast_from_double(this->beta, operation_desc.element_epilogue, 0)) {
return Status::kErrorInternal;
}
}
this->lda = DeviceAllocation::get_packed_layout(
operation_desc.A.layout, {int(this->n), int(this->k)}).front();
this->ldc = DeviceAllocation::get_packed_layout(
operation_desc.C.layout, {int(this->n), int(this->n)}).front();
return Status::kSuccess;
}
/// Total number of bytes loaded
int64_t RankKOperationProfiler::RankKProblem::bytes(library::RankKDescription const &operation_desc) const {
// Input bytes read and Output bytes written for the gemm problem
int64_t bytes =
int64_t(library::sizeof_bits(operation_desc.A.element) * n / 8) * k +
int64_t(library::sizeof_bits(operation_desc.A.element) * n / 8) * k +
// Half matrix including the diagonal will have (N*(N+1))/2 elements
int64_t(library::sizeof_bits(operation_desc.C.element) * n / 8) * (n+1) / 2;
// Set is_beta_zero true if beta is zero
bool is_beta_zero = std::all_of(beta.begin(), beta.end(), [](uint8_t i) { return i==0; });
// Output bytes read for the gemm problem for non-zero beta values
if (!is_beta_zero) {
bytes += int64_t(library::sizeof_bits(operation_desc.C.element) * n / 8) * (n+1) / 2;
}
bytes *= batch_count;
return bytes;
}
/// Total number of flops computed
int64_t RankKOperationProfiler::RankKProblem::flops(library::RankKDescription const &operation_desc) const {
// FLOPs = 2 * n(n+1)k/2 [mma] + 2 * n(n+1)/2 [epilogue]
// FLOPs = n(n+1)(k + 1)
int64_t flops_ = n * (n + 1) * (k + 1);
// complex-valued support
switch (operation_desc.tile_description.math_instruction.math_operation) {
case library::MathOperationID::kMultiplyAddComplex:
flops_ *= 4;
break;
case library::MathOperationID::kMultiplyAddComplexFastF32:
flops_ *= 4;
break;
case library::MathOperationID::kMultiplyAddGaussianComplex:
flops_ *= 3;
break;
default: break;
}
return flops_;
}
/// Initializes a performance result
void RankKOperationProfiler::RankKProblem::initialize_result(
PerformanceResult &result,
library::RankKDescription const &operation_desc,
ProblemSpace const &problem_space) {
result.arguments.resize(problem_space.rank());
set_argument(result, "rank_k_kind", problem_space, library::to_string(operation_desc.rank_k_kind));
set_argument(result, "A", problem_space,
std::string(library::to_string(operation_desc.A.element)) + ":" + library::to_string(operation_desc.A.layout));
set_argument(result, "C", problem_space,
std::string(library::to_string(operation_desc.C.element)) + ":" + library::to_string(operation_desc.C.layout));
set_argument(result, "fill_mode", problem_space, library::to_string(operation_desc.fill_mode));
set_argument(result, "blas_mode", problem_space, library::to_string(operation_desc.blas_mode));
set_argument(result, "n", problem_space, n);
set_argument(result, "k", problem_space, k);
set_argument(result, "split_k_slices", problem_space, split_k_slices);
set_argument(result, "batch_count", problem_space, batch_count);
set_argument(result, "alpha", problem_space,
library::lexical_cast(alpha, operation_desc.element_epilogue));
set_argument(result, "beta", problem_space,
library::lexical_cast(beta, operation_desc.element_epilogue));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Extracts the problem dimensions
Status RankKOperationProfiler::initialize_configuration(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
library::RankKDescription const &operation_desc =
static_cast<library::RankKDescription const &>(operation->description());
if (operation_desc.rank_k_kind != library::RankKKind::kUniversal) {
return Status::kErrorInvalidProblem;
}
Status status = problem_.parse(operation_desc, problem_space, problem);
if (status != Status::kSuccess) {
return status;
}
rank_k_workspace_.configuration.problem_size.m() = int(problem_.n);
rank_k_workspace_.configuration.problem_size.n() = int(problem_.n);
rank_k_workspace_.configuration.problem_size.k() = int(problem_.k);
rank_k_workspace_.configuration.lda = problem_.lda;
rank_k_workspace_.configuration.ldc = problem_.ldc;
rank_k_workspace_.configuration.ldd = problem_.ldc;
//rank_k_workspace_.configuration.split_k_slices = int(problem_.split_k_slices);
rank_k_workspace_.configuration.batch_count = int(problem_.split_k_slices);
rank_k_workspace_.arguments.A = nullptr;
rank_k_workspace_.arguments.C = nullptr;
rank_k_workspace_.arguments.D = nullptr;
rank_k_workspace_.arguments.alpha = problem_.alpha.data();
rank_k_workspace_.arguments.beta = problem_.beta.data();
rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
initialize_result_(this->model_result_, options, operation_desc, problem_space);
return operation->can_implement(&rank_k_workspace_.configuration, &rank_k_workspace_.arguments);
}
/// Initializes the performance result
void RankKOperationProfiler::initialize_result_(
PerformanceResult &result,
Options const &options,
library::RankKDescription const &operation_desc,
ProblemSpace const &problem_space) {
result.provider = library::Provider::kCUTLASS;
result.disposition = Disposition::kNotRun;
result.status = Status::kSuccess;
result.operation_name = operation_desc.name;
problem_.initialize_result(result, operation_desc, problem_space);
OperationProfiler::initialize_result_(result, operation_desc, problem_space);
result.bytes = problem_.bytes(operation_desc);
result.flops = problem_.flops(operation_desc);
result.runtime = 0;
// complex-valued support
switch (operation_desc.tile_description.math_instruction.math_operation) {
case library::MathOperationID::kMultiplyAddComplex:
result.flops *= 4;
break;
case library::MathOperationID::kMultiplyAddComplexFastF32:
result.flops *= 4;
break;
default: break;
}
}
/// Initializes workspace
Status RankKOperationProfiler::initialize_workspace(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
library::RankKDescription const &operation_desc =
static_cast<library::RankKDescription const &>(operation->description());
if (options.execution_mode != ExecutionMode::kDryRun) {
int seed_shift = 0;
rank_k_workspace_.A = device_context.allocate_tensor(
options,
"A",
operation_desc.A.element,
operation_desc.A.layout,
{int(problem_.n), int(problem_.k)},
{int(problem_.lda)},
1, // batch_count
seed_shift++
);
rank_k_workspace_.C = device_context.allocate_tensor(
options,
"C",
operation_desc.C.element,
operation_desc.C.layout,
{int(problem_.n), int(problem_.n)},
{int(problem_.ldc)},
1, // batch_count
seed_shift++
);
rank_k_workspace_.Computed = device_context.allocate_tensor(
"D",
operation_desc.C.element,
operation_desc.C.layout,
{int(problem_.n), int(problem_.n)},
{int(problem_.ldc)}
);
rank_k_workspace_.Reference = device_context.allocate_tensor(
"Reference",
operation_desc.C.element,
operation_desc.C.layout,
{int(problem_.n), int(problem_.n)},
{int(problem_.ldc)}
);
rank_k_workspace_.Computed->copy_from_device(rank_k_workspace_.C->data());
rank_k_workspace_.Reference->copy_from_device(rank_k_workspace_.C->data());
}
//
// Initialize the CUTLASS operation
//
Status status = Status::kSuccess;
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
if (options.execution_mode != ExecutionMode::kDryRun) {
uint64_t workspace_size = operation->get_host_workspace_size(&rank_k_workspace_.configuration);
rank_k_workspace_.host_workspace.resize(workspace_size, 0);
workspace_size = operation->get_device_workspace_size(&rank_k_workspace_.configuration);
rank_k_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size);
status = operation->initialize(
&rank_k_workspace_.configuration,
rank_k_workspace_.host_workspace.data(),
rank_k_workspace_.device_workspace.data());
}
//
// If CUTLASS is enabled, generate a result for it
//
results_.push_back(model_result_);
results_.back().provider = library::Provider::kCUTLASS;
results_.back().op_kind = library::OperationKind::kRankK;
results_.back().disposition = Disposition::kNotRun;
for(auto provider : verification_providers_) {
results_.back().verification_map[provider] = Disposition::kNotRun;
}
}
return status;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Verifies CUTLASS against references
bool RankKOperationProfiler::verify_cutlass(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
return true;
}
if (options.execution_mode == ExecutionMode::kDryRun) {
return true;
}
// Initialize structure containing RankK arguments
rank_k_workspace_.arguments.A = rank_k_workspace_.A->data();
rank_k_workspace_.arguments.C = rank_k_workspace_.C->data();
rank_k_workspace_.arguments.D = rank_k_workspace_.Computed->data();
rank_k_workspace_.arguments.alpha = problem_.alpha.data();
rank_k_workspace_.arguments.beta = problem_.beta.data();
rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
//
// Run the CUTLASS operation
//
results_.back().status = operation->run(
&rank_k_workspace_.arguments,
rank_k_workspace_.host_workspace.data(),
rank_k_workspace_.device_workspace.data());
if (results_.back().status != Status::kSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
cudaError_t result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
// CUTLASS op ran the but not yet verified against any verification provider
results_.back().disposition = Disposition::kNotVerified;
//
// Run verification providers
//
if (options.verification.enabled) {
#if CUTLASS_ENABLE_CUBLAS
if (options.verification.provider_enabled(library::Provider::kCUBLAS)) {
// Guard against unsupported cases
auto const & rank_k_desc = static_cast<library::RankKDescription const &>(operation->description());
if (cublas_satisfies(rank_k_desc) == Status::kSuccess) {
// call cublas verification if supported
verify_with_cublas_(
options,
report,
device_context,
operation,
problem_space,
problem);
}
else {
// set verification map for cublas to not supported
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotSupported;
}
}
#endif // #if CUTLASS_ENABLE_CUBLAS
// Update disposition to worst case verification outcome among all
// verification providers which are supported
bool is_any_verification_run_passed = false;
for(auto &m : results_.back().verification_map) {
if(m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) {
results_.back().disposition = m.second;
return true;
}
if(!is_any_verification_run_passed && m.second == Disposition::kPassed) {
is_any_verification_run_passed = true;
}
}
if(is_any_verification_run_passed) {
results_.back().disposition = Disposition::kPassed;
}
}
// Return true means continue profiling
return true;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Verifies CUTLASS against references
bool RankKOperationProfiler::verify_with_cublas_(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
#if CUTLASS_ENABLE_CUBLAS
library::RankKDescription const &rank_k_desc =
static_cast<library::RankKDescription const &>(operation->description());
//
// Construct cuBLAS operators
//
CublasCreate handle;
cublasStatus_t status = handle.get_cublas_create_status();
if (status != CUBLAS_STATUS_SUCCESS) {
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed;
return true;
}
//
// Initialize state
//
try {
//
// Construct dispatcher to cublas<t>Syrk()
//
// Initialize structure containing RankK arguments
rank_k_workspace_.arguments.A = rank_k_workspace_.A->data();
rank_k_workspace_.arguments.C = rank_k_workspace_.Reference->data();
rank_k_workspace_.arguments.D = rank_k_workspace_.Reference->data();
rank_k_workspace_.arguments.alpha = problem_.alpha.data();
rank_k_workspace_.arguments.beta = problem_.beta.data();
rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
detail::cublasRankKDispatcher rank_k_op(
rank_k_desc,
rank_k_workspace_.configuration,
rank_k_workspace_.arguments
);
if (rank_k_op.status != Status::kSuccess) {
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotRun;
return true;
}
results_.back().status = Status::kSuccess;
status = rank_k_op(handle);
// Handle errors
if (status != CUBLAS_STATUS_SUCCESS) {
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed;
return true;
}
//
// Verify results
//
results_.back().verification_map[library::Provider::kCUBLAS] = compare_tensors(
options,
*rank_k_workspace_.Computed,
*rank_k_workspace_.Reference
);
// Save workspace if incorrect
if (options.verification.save_workspace == SaveWorkspace::kIncorrect &&
results_.back().verification_map[library::Provider::kCUBLAS] == Disposition::kIncorrect) {
save_workspace(
device_context,
options,
rank_k_desc,
library::Provider::kCUTLASS,
library::Provider::kCUBLAS);
}
}
catch (...) {
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed;
}
#endif
// Return true means continue profiling
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Measures performance results
bool RankKOperationProfiler::profile(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
// Initialize structure containing RankK arguments
rank_k_workspace_.arguments.A = rank_k_workspace_.A->data();
rank_k_workspace_.arguments.C = rank_k_workspace_.C->data();
rank_k_workspace_.arguments.D = rank_k_workspace_.Computed->data();
rank_k_workspace_.arguments.alpha = problem_.alpha.data();
rank_k_workspace_.arguments.beta = problem_.beta.data();
rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
results_.back().status = profile_cutlass_(
results_.back().runtime,
options,
operation,
&rank_k_workspace_.arguments,
rank_k_workspace_.host_workspace.data(),
rank_k_workspace_.device_workspace.data()
);
}
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| tools/profiler/src/rank_k_operation_profiler.cu/0 | {
"file_path": "tools/profiler/src/rank_k_operation_profiler.cu",
"repo_id": "tools",
"token_count": 8486
} | 51 |
/******************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#pragma once
/**
* \file
* \brief cuda kernels to transform a device memory tensor from NHWC layout to NCHW layout.
*/
#include "cutlass/cutlass.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/tensor_ref.h"
namespace cutlass {
/** \brief interface to transform a device memory tensor from NHWC layout to NCHW layout.
* \tparam T: data type
*/
template <typename T>
void nhwc_to_nchw(cutlass::Tensor4DCoord input_tensor_size,
cutlass::Tensor4DCoord output_tensor_size,
TensorRef<T, layout::TensorNHWC> ref_input,
TensorRef<T, layout::TensorNCHW> ref_output,
cudaStream_t stream);
template <typename T>
__global__ void nhwc_to_nchw_kernel(T *output,
const T *input,
const int n,
const int h,
const int w,
const int c) {
const int hw = h*w;
const int hwc = hw*c;
__shared__ T shbuf[32 * (32 + 1)];
const int32_t tid = threadIdx.y*blockDim.x + threadIdx.x;
const int32_t wid = tid / 32;
const int32_t lid = tid % 32;
const int32_t ni = blockIdx.z;
const int32_t hwi0 = blockIdx.y * 32;
const int32_t ci0 = blockIdx.x * 32;
const size_t input_idx = ni * hwc + (hwi0 + wid) * c + ci0;
const T *A = input + input_idx;
if (ci0 + lid < c) {
const int lid_x_33 = lid * 33;
if ((hwi0 + 32) <= hw) {
int hwi = wid; // between 0 and 7
CUTLASS_PRAGMA_UNROLL
for (int cLoopIdx = 0; cLoopIdx < 4; cLoopIdx++) {
shbuf[lid_x_33 + hwi] = A[lid];
A = &A[8 * c];
hwi += 8;
}
} else {
for (int hwi = wid; hwi < 32; hwi += 8) {
if ((hwi + hwi0) < hw) {
shbuf[lid_x_33 + hwi] = A[lid];
}
A = &A[8 * c];
}
}
}
__syncthreads();
const int32_t hwiOut = hwi0 + lid;
output = &output[ni * hwc + hwiOut];
if (hwiOut < hw) {
if (ci0 + 32 < c) {
int cI = wid;
CUTLASS_PRAGMA_UNROLL
for (int hwLoopIdx = 0; hwLoopIdx < 4; ++hwLoopIdx) {
output[(ci0 + cI) * hw] = shbuf[(cI)*33 + lid];
cI += 8;
}
} else {
for (int cI = wid; cI < 32; cI += 8) {
if (ci0 + cI < c) {
output[(ci0 + cI) * hw] = shbuf[(cI)*33 + lid];
}
}
}
}
}
template <typename T>
void nhwc_to_nchw(cutlass::Tensor4DCoord input_tensor_size,
cutlass::Tensor4DCoord output_tensor_size,
TensorRef<T, layout::TensorNHWC> ref_input,
TensorRef<T, layout::TensorNCHW> ref_output,
cudaStream_t stream) {
assert(
input_tensor_size.n() == output_tensor_size.n() &&
input_tensor_size.h() == output_tensor_size.c() &&
input_tensor_size.w() == output_tensor_size.h() &&
input_tensor_size.c() == output_tensor_size.w());
int n = input_tensor_size.n();
int h = input_tensor_size.h();
int w = input_tensor_size.w();
int c = input_tensor_size.c();
dim3 grid((c + 31)/32, (h*w + 31)/32, n);
dim3 block(32, 8);
nhwc_to_nchw_kernel<<<grid, block, 0, stream>>>(ref_output.data(), ref_input.data(),
n, h, w, c);
}
} //namespace cutlass
| tools/util/include/cutlass/util/device_nhwc_to_nchw.h/0 | {
"file_path": "tools/util/include/cutlass/util/device_nhwc_to_nchw.h",
"repo_id": "tools",
"token_count": 2295
} | 52 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Reference implementation for convolution in device-side code.
*/
#pragma once
#include "cutlass/coord.h"
#include "cutlass/functional.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/conv3d_problem_size.h"
namespace cutlass {
namespace reference {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace kernel {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Conv2d device reference kernel
////////////////////////////////////////////////////////////////////////////////////////////////////
// Conv2d Fprop kernel - y = fprop(x, w)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>,
int kThreadM = 2, // shape of a thread's tile in the GEMM M dimension
int kThreadN = 4, // shape of a thread's tile in the GEMM N dimension
int kCtaShapeM = 16, // shape of a threadblock in units of threads
int kCtaShapeN = 8 // shape of a threadblock in units of threads
>
__global__ void Conv2dFprop(
conv::Conv2dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_x,
TensorRef<ElementB, LayoutB> tensor_w,
TensorRef<ElementC, LayoutC> tensor_y_in,
TensorRef<ElementC, LayoutC> tensor_y_out,
ElementCompute alpha,
ElementCompute beta
) {
ConvertOp convert_op;
InnerProductOp inner_product_op;
ElementAccumulator element_A[kThreadM];
ElementAccumulator element_B[kThreadN];
ElementAccumulator accum[kThreadM][kThreadN];
int64_t npq_start = int64_t(blockIdx.x) * kCtaShapeM * kThreadM + threadIdx.x * kThreadM;
int k_start = blockIdx.y * kCtaShapeN * kThreadN + threadIdx.y * kThreadN;
int thread_n[kThreadM];
int thread_p[kThreadM];
int thread_q[kThreadM];
// Compute N, P, Q coordinates for each row of a thread's tile
int64_t PQ = int64_t(problem_size.P) * problem_size.Q;
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
int64_t npq = npq_start + m;
thread_n[m] = int(npq / PQ);
int64_t residual = npq % PQ;
thread_p[m] = int(residual / problem_size.Q);
thread_q[m] = int(residual % problem_size.Q);
}
// Clear accumulators
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
accum[m][n] = ElementAccumulator();
}
}
int c_per_group = problem_size.C / problem_size.groups;
int k_per_group = problem_size.K / problem_size.groups;
// Compute convolution
for (int R = 0; R < problem_size.R; ++R) {
for (int S = 0; S < problem_size.S; ++S) {
for (int C = 0; C < problem_size.C; ++C) {
// Get group id of currnet channel
int c_group_idx = C / c_per_group;
// Load from activations tensor
int filter_r = R;
int filter_s = S;
if (problem_size.mode == cutlass::conv::Mode::kConvolution) {
filter_r = problem_size.R - 1 - R;
filter_s = problem_size.S - 1 - S;
}
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
int h = thread_p[m] * problem_size.stride_h - problem_size.pad_h + filter_r * problem_size.dilation_h;
int w = thread_q[m] * problem_size.stride_w - problem_size.pad_w + filter_s * problem_size.dilation_w;
if (thread_n[m] < problem_size.N && h >= 0 && h < problem_size.H && w >= 0 && w < problem_size.W) {
element_A[m] = ElementAccumulator(tensor_x.at({thread_n[m], h, w, C}));
}
else {
element_A[m] = ElementAccumulator();
}
}
// Load from filters tensor
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
int thread_k = k_start + n;
int k_group_idx = thread_k / k_per_group;
if (thread_k < problem_size.K && k_group_idx == c_group_idx) {
element_B[n] = ElementAccumulator(tensor_w.at({thread_k, R, S, C % c_per_group}));
}
else {
element_B[n] = ElementAccumulator();
}
}
// Accumulate matrix product
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
accum[m][n] = inner_product_op(element_A[m], element_B[n], accum[m][n]);
}
}
}
}
}
// Write out the results
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
if (thread_n[m] < problem_size.N && thread_p[m] < problem_size.P && thread_q[m] < problem_size.Q) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
int thread_k = k_start + n;
if (thread_k < problem_size.K) {
ElementCompute c_ref = ElementCompute();
if (beta != ElementCompute()) {
c_ref = ElementCompute(tensor_y_in.at({thread_n[m], thread_p[m], thread_q[m], thread_k}));
}
tensor_y_out.at({thread_n[m], thread_p[m], thread_q[m], thread_k}) = convert_op(
alpha * ElementCompute(accum[m][n]) + beta * c_ref);
}
}
}
}
}
// Conv3d Fprop kernel - y = fprop(x, w)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>,
int kThreadM = 2, // shape of a thread's tile in the GEMM M dimension
int kThreadN = 4, // shape of a thread's tile in the GEMM N dimension
int kCtaShapeM = 16, // shape of a threadblock in units of threads
int kCtaShapeN = 8 // shape of a threadblock in units of threads
>
__global__ void Conv3dFprop(
conv::Conv3dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_x,
TensorRef<ElementB, LayoutB> tensor_w,
TensorRef<ElementC, LayoutC> tensor_y_in,
TensorRef<ElementC, LayoutC> tensor_y_out,
ElementCompute alpha,
ElementCompute beta
) {
ConvertOp convert_op;
InnerProductOp inner_product_op;
ElementAccumulator element_A[kThreadM];
ElementAccumulator element_B[kThreadN];
ElementAccumulator accum[kThreadM][kThreadN];
int64_t nzpq_start = int64_t(blockIdx.x) * kCtaShapeM * kThreadM + threadIdx.x * kThreadM;
int k_start = blockIdx.y * kCtaShapeN * kThreadN + threadIdx.y * kThreadN;
int thread_n[kThreadM];
int thread_z[kThreadM];
int thread_p[kThreadM];
int thread_q[kThreadM];
// Compute N, Z, P, Q coordinates for each row of a thread's tile
int64_t PQ = int64_t(problem_size.P) * problem_size.Q;
int64_t ZPQ = PQ * problem_size.Z;
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
int64_t nzpq = nzpq_start + m;
thread_n[m] = int(nzpq / ZPQ);
int64_t residual = nzpq % ZPQ;
thread_z[m] = int(residual / PQ);
residual = residual % PQ;
thread_p[m] = int(residual / problem_size.Q);
thread_q[m] = int(residual % problem_size.Q);
}
// Clear accumulators
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
accum[m][n] = ElementAccumulator();
}
}
// Compute convolution
for (int T = 0; T < problem_size.T; ++T) {
for (int R = 0; R < problem_size.R; ++R) {
for (int S = 0; S < problem_size.S; ++S) {
for (int C = 0; C < problem_size.C; ++C) {
// Load from activations tensor
int filter_t = T;
int filter_r = R;
int filter_s = S;
if (problem_size.mode == cutlass::conv::Mode::kConvolution) {
filter_t = problem_size.T - 1 - T;
filter_r = problem_size.R - 1 - R;
filter_s = problem_size.S - 1 - S;
}
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
int d = thread_z[m] * problem_size.stride_d - problem_size.pad_d + filter_t * problem_size.dilation_d;
int h = thread_p[m] * problem_size.stride_h - problem_size.pad_h + filter_r * problem_size.dilation_h;
int w = thread_q[m] * problem_size.stride_w - problem_size.pad_w + filter_s * problem_size.dilation_w;
if (thread_n[m] < problem_size.N &&
d >= 0 && d < problem_size.D &&
h >= 0 && h < problem_size.H &&
w >= 0 && w < problem_size.W) {
element_A[m] = ElementAccumulator(tensor_x.at({thread_n[m], d, h, w, C}));
}
else {
element_A[m] = ElementAccumulator();
}
}
// Load from filters tensor
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
int thread_k = k_start + n;
if (thread_k < problem_size.K) {
element_B[n] = ElementAccumulator(tensor_w.at({thread_k, T, R, S, C}));
}
else {
element_B[n] = ElementAccumulator();
}
}
// Accumulate matrix product
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
accum[m][n] = inner_product_op(element_A[m], element_B[n], accum[m][n]);
}
}
} // for (C)
} // for (S)
} // for (R)
} // for (T)
// Write out the results
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
if (thread_n[m] < problem_size.N &&
thread_z[m] < problem_size.Z &&
thread_p[m] < problem_size.P &&
thread_q[m] < problem_size.Q) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
int thread_k = k_start + n;
if (thread_k < problem_size.K) {
ElementCompute c_ref = ElementCompute();
if (beta != ElementCompute()) {
c_ref = ElementCompute(tensor_y_in.at({thread_n[m], thread_z[m], thread_p[m], thread_q[m], thread_k}));
}
tensor_y_out.at({thread_n[m], thread_z[m], thread_p[m], thread_q[m], thread_k}) = convert_op(
alpha * ElementCompute(accum[m][n]) + beta * c_ref);
}
} // for (n)
}
} // for (m)
}
///////////////////////////////////////////////////////////////////////////////////////////////////
// Conv2d dgrad kernel - dx = dgrad(dy, w)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>,
int kThreadM = 2, // shape of a thread's tile in the GEMM M dimension
int kThreadN = 4, // shape of a thread's tile in the GEMM N dimension
int kCtaShapeM = 16, // shape of a threadblock in units of threads
int kCtaShapeN = 8 // shape of a threadblock in units of threads
>
__global__ void Conv2dDgrad(
conv::Conv2dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_dy,
TensorRef<ElementB, LayoutB> tensor_w,
TensorRef<ElementC, LayoutC> tensor_dx_in,
TensorRef<ElementC, LayoutC> tensor_dx_out,
ElementCompute alpha,
ElementCompute beta
) {
ConvertOp convert_op;
InnerProductOp inner_product_op;
ElementAccumulator element_A[kThreadM];
ElementAccumulator element_B[kThreadN];
ElementAccumulator accum[kThreadM][kThreadN];
int64_t nhw_start = int64_t(blockIdx.x) * kCtaShapeM * kThreadM + threadIdx.x * kThreadM;
int c_start = blockIdx.y * kCtaShapeN * kThreadN + threadIdx.y * kThreadN;
int thread_n[kThreadM];
int thread_h[kThreadM];
int thread_w[kThreadM];
// Compute N, H, W coordinates for each row of a thread's tile
int64_t HW = int64_t(problem_size.H) * problem_size.W;
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
int64_t nhw = nhw_start + m;
thread_n[m] = int(nhw / HW);
int64_t residual = nhw % HW;
thread_h[m] = int(residual / problem_size.W);
thread_w[m] = int(residual % problem_size.W);
}
// Clear accumulators
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
accum[m][n] = ElementAccumulator();
}
}
// Compute convolution
for (int R = 0; R < problem_size.R; ++R) {
for (int S = 0; S < problem_size.S; ++S) {
for (int K = 0; K < problem_size.K; ++K) {
// Load from activations tensor
int filter_r = R;
int filter_s = S;
if (problem_size.mode == cutlass::conv::Mode::kConvolution) {
filter_r = problem_size.R - 1 - R;
filter_s = problem_size.S - 1 - S;
}
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
int p = thread_h[m] + problem_size.pad_h - filter_r * problem_size.dilation_h;
int q = thread_w[m] + problem_size.pad_w - filter_s * problem_size.dilation_w;
element_A[m] = ElementAccumulator();
if (p >= 0 && !(p % problem_size.stride_h) && q >= 0 && !(q % problem_size.stride_w)) {
p = p / problem_size.stride_h;
q = q / problem_size.stride_w;
if (thread_n[m] < problem_size.N && p < problem_size.P && q < problem_size.Q) {
element_A[m] = ElementAccumulator(tensor_dy.at({thread_n[m], p, q, K}));
}
}
}
// Load from filters tensor
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
int thread_c = c_start + n;
if (thread_c < problem_size.C) {
element_B[n] = ElementAccumulator(tensor_w.at({K, R, S, thread_c}));
}
else {
element_B[n] = ElementAccumulator();
}
}
// Accumulate matrix product
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
accum[m][n] = inner_product_op(element_A[m], element_B[n], accum[m][n]);
}
}
}
}
}
// Write out the results
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
if (thread_n[m] < problem_size.N && thread_h[m] < problem_size.H && thread_w[m] < problem_size.W) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
int thread_c = c_start + n;
if (thread_c < problem_size.C) {
ElementCompute c_ref = ElementCompute();
if (beta != ElementCompute()) {
c_ref = ElementCompute(tensor_dx_in.at({thread_n[m], thread_h[m], thread_w[m], thread_c}));
}
tensor_dx_out.at({thread_n[m], thread_h[m], thread_w[m], thread_c}) = convert_op(
alpha * ElementCompute(accum[m][n]) + beta * c_ref);
}
}
}
}
}
// Conv3d dgrad kernel - dx = dgrad(dy, w)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>,
int kThreadM = 2, // shape of a thread's tile in the GEMM M dimension
int kThreadN = 4, // shape of a thread's tile in the GEMM N dimension
int kCtaShapeM = 16, // shape of a threadblock in units of threads
int kCtaShapeN = 8 // shape of a threadblock in units of threads
>
__global__ void Conv3dDgrad(
conv::Conv3dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_dy,
TensorRef<ElementB, LayoutB> tensor_w,
TensorRef<ElementC, LayoutC> tensor_dx_in,
TensorRef<ElementC, LayoutC> tensor_dx_out,
ElementCompute alpha,
ElementCompute beta
) {
ConvertOp convert_op;
InnerProductOp inner_product_op;
ElementAccumulator element_A[kThreadM];
ElementAccumulator element_B[kThreadN];
ElementAccumulator accum[kThreadM][kThreadN];
int64_t ndhw_start = int64_t(blockIdx.x) * kCtaShapeM * kThreadM + threadIdx.x * kThreadM;
int c_start = blockIdx.y * kCtaShapeN * kThreadN + threadIdx.y * kThreadN;
int thread_n[kThreadM];
int thread_d[kThreadM];
int thread_h[kThreadM];
int thread_w[kThreadM];
// Compute N, H, W coordinates for each row of a thread's tile
int64_t HW = int64_t(problem_size.H) * problem_size.W;
int64_t DHW = HW * problem_size.D;
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
int64_t ndhw = ndhw_start + m;
thread_n[m] = int(ndhw / DHW);
int64_t residual = ndhw % DHW;
thread_d[m] = int(residual / HW);
residual = residual % HW;
thread_h[m] = int(residual / problem_size.W);
thread_w[m] = int(residual % problem_size.W);
}
// Clear accumulators
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
accum[m][n] = ElementAccumulator();
}
}
// Compute convolution
for (int T = 0; T < problem_size.T; ++T) {
for (int R = 0; R < problem_size.R; ++R) {
for (int S = 0; S < problem_size.S; ++S) {
for (int K = 0; K < problem_size.K; ++K) {
// Load from activations tensor
int filter_t = T;
int filter_r = R;
int filter_s = S;
if (problem_size.mode == cutlass::conv::Mode::kConvolution) {
filter_t = problem_size.T - 1 - T;
filter_r = problem_size.R - 1 - R;
filter_s = problem_size.S - 1 - S;
}
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
int z = thread_d[m] + problem_size.pad_d - filter_t * problem_size.dilation_d;
int p = thread_h[m] + problem_size.pad_h - filter_r * problem_size.dilation_h;
int q = thread_w[m] + problem_size.pad_w - filter_s * problem_size.dilation_w;
element_A[m] = ElementAccumulator();
if (z >= 0 && !(z % problem_size.stride_d) &&
p >= 0 && !(p % problem_size.stride_h) &&
q >= 0 && !(q % problem_size.stride_w)) {
z = z / problem_size.stride_d;
p = p / problem_size.stride_h;
q = q / problem_size.stride_w;
if (thread_n[m] < problem_size.N && z < problem_size.Z && p < problem_size.P && q < problem_size.Q) {
element_A[m] = ElementAccumulator(tensor_dy.at({thread_n[m], z, p, q, K}));
}
}
}
// Load from filters tensor
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
int thread_c = c_start + n;
if (thread_c < problem_size.C) {
element_B[n] = ElementAccumulator(tensor_w.at({K, T, R, S, thread_c}));
}
else {
element_B[n] = ElementAccumulator();
}
}
// Accumulate matrix product
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
accum[m][n] = inner_product_op(element_A[m], element_B[n], accum[m][n]);
}
}
} // for (C)
} // for (S)
} // for (R)
} // for (T)
// Write out the results
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
if (thread_n[m] < problem_size.N &&
thread_d[m] < problem_size.D &&
thread_h[m] < problem_size.H &&
thread_w[m] < problem_size.W) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
int thread_c = c_start + n;
if (thread_c < problem_size.C) {
ElementCompute c_ref = ElementCompute();
if (beta != ElementCompute()) {
c_ref = ElementCompute(tensor_dx_in.at({thread_n[m], thread_d[m], thread_h[m], thread_w[m], thread_c}));
}
tensor_dx_out.at({thread_n[m], thread_d[m], thread_h[m], thread_w[m], thread_c}) = convert_op(
alpha * ElementCompute(accum[m][n]) + beta * c_ref);
}
}
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
// Conv2d wgrad kernel - dw = wgrad(dy, x)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>,
int kThreadM = 2, // shape of a thread's tile in the GEMM M dimension
int kThreadN = 4, // shape of a thread's tile in the GEMM N dimension
int kCtaShapeM = 8, // shape of a threadblock in units of threads
int kCtaShapeN = 16 // shape of a threadblock in units of threads
>
__global__ void Conv2dWgrad(
conv::Conv2dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_dy,
TensorRef<ElementB, LayoutB> tensor_x,
TensorRef<ElementC, LayoutC> tensor_dw_in,
TensorRef<ElementC, LayoutC> tensor_dw_out,
ElementCompute alpha,
ElementCompute beta
) {
ConvertOp convert_op;
InnerProductOp inner_product_op;
ElementAccumulator element_A[kThreadM];
ElementAccumulator element_B[kThreadN];
ElementAccumulator accum[kThreadM][kThreadN];
int k_start = blockIdx.x * kCtaShapeM * kThreadM + threadIdx.x * kThreadM;
int64_t rsc_start = int64_t(blockIdx.y) * kCtaShapeN * kThreadN + threadIdx.y * kThreadN;
int thread_r[kThreadN];
int thread_s[kThreadN];
int thread_c[kThreadN];
// Compute R, S, C coordinates for each row of a thread's tile
int64_t SC = int64_t(problem_size.S) * problem_size.C;
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
int64_t rsc = rsc_start + n;
int64_t residual = rsc % SC;
thread_r[n] = int(rsc / SC);
thread_s[n] = int(residual / problem_size.C);
thread_c[n] = int(residual % problem_size.C);
}
// Clear accumulators
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
accum[m][n] = ElementAccumulator();
}
}
// Compute convolution
for (int N = 0; N < problem_size.N; ++N) {
for (int P = 0; P < problem_size.P; ++P) {
for (int Q = 0; Q < problem_size.Q; ++Q) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
int thread_k = k_start + m;
element_A[m] = ElementAccumulator();
if (thread_k < problem_size.K) {
element_A[m] = ElementAccumulator(tensor_dy.at({N, P, Q, thread_k}));
}
}
// Load from filters tensor
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
// Load from activations tensor
int filter_r = thread_r[n];
int filter_s = thread_s[n];
if (problem_size.mode == cutlass::conv::Mode::kConvolution) {
filter_r = problem_size.R - 1 - filter_r;
filter_s = problem_size.S - 1 - filter_s;
}
int h = P * problem_size.stride_h - problem_size.pad_h + filter_r * problem_size.dilation_h;
int w = Q * problem_size.stride_w - problem_size.pad_w + filter_s * problem_size.dilation_w;
element_B[n] = ElementAccumulator();
if (h >= 0 && h < problem_size.H && w >= 0 && w < problem_size.W && thread_c[n] < problem_size.C) {
element_B[n] = ElementAccumulator(tensor_x.at({N, h, w, thread_c[n]}));
}
}
// Accumulate matrix product
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
accum[m][n] = inner_product_op(element_A[m], element_B[n], accum[m][n]);
}
}
}
}
}
// Write out the results
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
int thread_k = k_start + m;
if (thread_k < problem_size.K) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
if (thread_r[n] < problem_size.R && thread_s[n] < problem_size.S && thread_c[n] < problem_size.C) {
ElementCompute c_ref = ElementCompute();
if (beta != ElementCompute()) {
c_ref = ElementCompute(tensor_dw_in.at({thread_k, thread_r[n], thread_s[n], thread_c[n]}));
}
tensor_dw_out.at({thread_k, thread_r[n], thread_s[n], thread_c[n]}) = convert_op(
alpha * ElementCompute(accum[m][n]) + beta * c_ref);
}
}
}
}
}
// Conv3d wgrad kernel - dw = wgrad(dy, x)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>,
int kThreadM = 2, // shape of a thread's tile in the GEMM M dimension
int kThreadN = 4, // shape of a thread's tile in the GEMM N dimension
int kCtaShapeM = 8, // shape of a threadblock in units of threads
int kCtaShapeN = 16 // shape of a threadblock in units of threads
>
__global__ void Conv3dWgrad(
conv::Conv3dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_dy,
TensorRef<ElementB, LayoutB> tensor_x,
TensorRef<ElementC, LayoutC> tensor_dw_in,
TensorRef<ElementC, LayoutC> tensor_dw_out,
ElementCompute alpha,
ElementCompute beta
) {
ConvertOp convert_op;
InnerProductOp inner_product_op;
ElementAccumulator element_A[kThreadM];
ElementAccumulator element_B[kThreadN];
ElementAccumulator accum[kThreadM][kThreadN];
int k_start = blockIdx.x * kCtaShapeM * kThreadM + threadIdx.x * kThreadM;
int64_t trsc_start = int64_t(blockIdx.y) * kCtaShapeN * kThreadN + threadIdx.y * kThreadN;
int thread_t[kThreadN];
int thread_r[kThreadN];
int thread_s[kThreadN];
int thread_c[kThreadN];
// Compute R, S, C coordinates for each row of a thread's tile
int64_t SC = int64_t(problem_size.S) * problem_size.C;
int64_t RSC = SC * problem_size.R;
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
int64_t trsc = trsc_start + n;
thread_t[n] = int(trsc / RSC);
int64_t residual = trsc % RSC;
thread_r[n] = int(residual / SC);
residual = residual % SC;
thread_s[n] = int(residual / problem_size.C);
thread_c[n] = int(residual % problem_size.C);
}
// Clear accumulators
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
accum[m][n] = ElementAccumulator();
}
}
// Compute convolution
for (int N = 0; N < problem_size.N; ++N) {
for (int Z = 0; Z < problem_size.Z; ++Z) {
for (int P = 0; P < problem_size.P; ++P) {
for (int Q = 0; Q < problem_size.Q; ++Q) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
int thread_k = k_start + m;
element_A[m] = ElementAccumulator();
if (thread_k < problem_size.K) {
element_A[m] = ElementAccumulator(tensor_dy.at({N, Z, P, Q, thread_k}));
}
}
// Load from filters tensor
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
// Load from activations tensor
int filter_t = thread_t[n];
int filter_r = thread_r[n];
int filter_s = thread_s[n];
if (problem_size.mode == cutlass::conv::Mode::kConvolution) {
filter_t = problem_size.T - 1 - filter_t;
filter_r = problem_size.R - 1 - filter_r;
filter_s = problem_size.S - 1 - filter_s;
}
int d = Z * problem_size.stride_d - problem_size.pad_w + filter_t * problem_size.dilation_d;
int h = P * problem_size.stride_h - problem_size.pad_h + filter_r * problem_size.dilation_h;
int w = Q * problem_size.stride_w - problem_size.pad_w + filter_s * problem_size.dilation_w;
element_B[n] = ElementAccumulator();
if (d >= 0 && d < problem_size.D &&
h >= 0 && h < problem_size.H &&
w >= 0 && w < problem_size.W &&
thread_c[n] < problem_size.C) {
element_B[n] = ElementAccumulator(tensor_x.at({N, d, h, w, thread_c[n]}));
}
}
// Accumulate matrix product
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
accum[m][n] = inner_product_op(element_A[m], element_B[n], accum[m][n]);
}
}
} // for (Q)
} // for (P)
} // for (Z)
} // for (N)
// Write out the results
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
int thread_k = k_start + m;
if (thread_k < problem_size.K) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
if (thread_t[n] < problem_size.T &&
thread_r[n] < problem_size.R &&
thread_s[n] < problem_size.S &&
thread_c[n] < problem_size.C) {
ElementCompute c_ref = ElementCompute();
if (beta != ElementCompute()) {
c_ref = ElementCompute(tensor_dw_in.at({thread_k, thread_t[n], thread_r[n], thread_s[n], thread_c[n]}));
}
tensor_dw_out.at({thread_k, thread_t[n], thread_r[n], thread_s[n], thread_c[n]}) = convert_op(
alpha * ElementCompute(accum[m][n]) + beta * c_ref);
}
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Conv2d Fprop dispatcher - y = fprop(x, w)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
Status Conv2dFprop(
conv::Conv2dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_x,
TensorRef<ElementB, LayoutB> tensor_w,
TensorRef<ElementC, LayoutC> tensor_y_in,
TensorRef<ElementC, LayoutC> tensor_y_out,
ElementCompute alpha,
ElementCompute beta,
cudaStream_t stream = nullptr) {
//
// Blocking factors improve performance of reference implementation
//
int const kThreadM = 4; // shape of a thread's tile in the GEMM M dimension
int const kThreadN = 4; // shape of a thread's tile in the GEMM N dimension
int const kCtaShapeM = 16; // shape of a threadblock in units of threads
int const kCtaShapeN = 8; // shape of a threadblock in units of threads
int64_t npq = int64_t(problem_size.N) * problem_size.P * problem_size.Q;
int64_t blocks_m = (npq + (kCtaShapeM * kThreadM) - 1) / (kCtaShapeM * kThreadM);
dim3 block(kCtaShapeM, kCtaShapeN);
dim3 grid(uint32_t(blocks_m), (problem_size.K + (kCtaShapeN * kThreadN) - 1) / (kCtaShapeN * kThreadN));
kernel::Conv2dFprop<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp,
InnerProductOp,
kThreadM,
kThreadN,
kCtaShapeM,
kCtaShapeN
><<< grid, block, 0, stream >>>(
problem_size,
tensor_x,
tensor_w,
tensor_y_in,
tensor_y_out,
alpha,
beta
);
cudaError_t result = cudaPeekAtLastError();
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
return Status::kSuccess;
}
/// Conv3d Fprop dispatcher - y = fprop(x, w)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
Status Conv3dFprop(
conv::Conv3dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_x,
TensorRef<ElementB, LayoutB> tensor_w,
TensorRef<ElementC, LayoutC> tensor_y_in,
TensorRef<ElementC, LayoutC> tensor_y_out,
ElementCompute alpha,
ElementCompute beta,
cudaStream_t stream = nullptr) {
//
// Blocking factors improve performance of reference implementation
//
int const kThreadM = 4; // shape of a thread's tile in the GEMM M dimension
int const kThreadN = 4; // shape of a thread's tile in the GEMM N dimension
int const kCtaShapeM = 16; // shape of a threadblock in units of threads
int const kCtaShapeN = 8; // shape of a threadblock in units of threads
int64_t nzpq = int64_t(problem_size.N) * problem_size.Z * problem_size.P * problem_size.Q;
int64_t blocks_m = (nzpq + (kCtaShapeM * kThreadM) - 1) / (kCtaShapeM * kThreadM);
dim3 block(kCtaShapeM, kCtaShapeN);
dim3 grid(uint32_t(blocks_m), (problem_size.K + (kCtaShapeN * kThreadN) - 1) / (kCtaShapeN * kThreadN));
kernel::Conv3dFprop<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp,
InnerProductOp,
kThreadM,
kThreadN,
kCtaShapeM,
kCtaShapeN
><<< grid, block, 0, stream >>>(
problem_size,
tensor_x,
tensor_w,
tensor_y_in,
tensor_y_out,
alpha,
beta
);
cudaError_t result = cudaPeekAtLastError();
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
return Status::kSuccess;
}
/// Conv2d Dgrad dispatcher - dx = dgrad(dy, w)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
Status Conv2dDgrad(
conv::Conv2dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_dy,
TensorRef<ElementB, LayoutB> tensor_w,
TensorRef<ElementC, LayoutC> tensor_dx_in,
TensorRef<ElementC, LayoutC> tensor_dx_out,
ElementCompute alpha,
ElementCompute beta,
cudaStream_t stream = nullptr) {
//
// Blocking factors improve performance of reference implementation
//
int const kThreadM = 2; // shape of a thread's tile in the GEMM M dimension
int const kThreadN = 4; // shape of a thread's tile in the GEMM N dimension
int const kCtaShapeM = 16; // shape of a threadblock in units of threads
int const kCtaShapeN = 8; // shape of a threadblock in units of threads
int64_t nhw = int64_t(problem_size.N) * problem_size.H * problem_size.W;
int64_t blocks_m = (nhw + (kCtaShapeM * kThreadM) - 1) / (kCtaShapeM * kThreadM);
dim3 block(kCtaShapeM, kCtaShapeN);
dim3 grid(uint32_t(blocks_m), (problem_size.C + (kCtaShapeN * kThreadN) - 1) / (kCtaShapeN * kThreadN));
kernel::Conv2dDgrad<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp,
InnerProductOp,
kThreadM,
kThreadN,
kCtaShapeM,
kCtaShapeN
><<< grid, block, 0, stream >>>(
problem_size,
tensor_dy,
tensor_w,
tensor_dx_in,
tensor_dx_out,
alpha,
beta
);
cudaError_t result = cudaPeekAtLastError();
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
return Status::kSuccess;
}
/// Conv3d Dgrad dispatcher - dx = dgrad(dy, w)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
Status Conv3dDgrad(
conv::Conv3dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_dy,
TensorRef<ElementB, LayoutB> tensor_w,
TensorRef<ElementC, LayoutC> tensor_dx_in,
TensorRef<ElementC, LayoutC> tensor_dx_out,
ElementCompute alpha,
ElementCompute beta,
cudaStream_t stream = nullptr) {
//
// Blocking factors improve performance of reference implementation
//
int const kThreadM = 2; // shape of a thread's tile in the GEMM M dimension
int const kThreadN = 4; // shape of a thread's tile in the GEMM N dimension
int const kCtaShapeM = 16; // shape of a threadblock in units of threads
int const kCtaShapeN = 8; // shape of a threadblock in units of threads
int64_t ndhw = int64_t(problem_size.N) * problem_size.D * problem_size.H * problem_size.W;
int64_t blocks_m = (ndhw + (kCtaShapeM * kThreadM) - 1) / (kCtaShapeM * kThreadM);
dim3 block(kCtaShapeM, kCtaShapeN);
dim3 grid(uint32_t(blocks_m), (problem_size.C + (kCtaShapeN * kThreadN) - 1) / (kCtaShapeN * kThreadN));
kernel::Conv3dDgrad<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp,
InnerProductOp,
kThreadM,
kThreadN,
kCtaShapeM,
kCtaShapeN
><<< grid, block, 0, stream >>>(
problem_size,
tensor_dy,
tensor_w,
tensor_dx_in,
tensor_dx_out,
alpha,
beta
);
cudaError_t result = cudaPeekAtLastError();
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
return Status::kSuccess;
}
/// Conv2d Wgrad dispatcher - dw = wgrad(dy, x)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
Status Conv2dWgrad(
conv::Conv2dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_dy,
TensorRef<ElementB, LayoutB> tensor_x,
TensorRef<ElementC, LayoutC> tensor_dw_in,
TensorRef<ElementC, LayoutC> tensor_dw_out,
ElementCompute alpha,
ElementCompute beta,
cudaStream_t stream = nullptr) {
//
// Blocking factors improve performance of reference implementation
//
int const kThreadM = 2; // shape of a thread's tile in the GEMM M dimension
int const kThreadN = 4; // shape of a thread's tile in the GEMM N dimension
int const kCtaShapeM = 8; // shape of a threadblock in units of threads
int const kCtaShapeN = 16; // shape of a threadblock in units of threads
int64_t rsc = int64_t(problem_size.R) * problem_size.S * problem_size.C;
int64_t blocks_n = (rsc + (kCtaShapeN * kThreadN) - 1) / (kCtaShapeN * kThreadN);
dim3 block(kCtaShapeM, kCtaShapeN);
dim3 grid((problem_size.K + (kCtaShapeM * kThreadM) - 1) / (kCtaShapeM * kThreadM), uint32_t(blocks_n));
kernel::Conv2dWgrad<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp,
InnerProductOp,
kThreadM,
kThreadN,
kCtaShapeM,
kCtaShapeN
><<< grid, block, 0, stream >>>(
problem_size,
tensor_dy,
tensor_x,
tensor_dw_in,
tensor_dw_out,
alpha,
beta
);
cudaError_t result = cudaPeekAtLastError();
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
return Status::kSuccess;
}
/// Conv3d Wgrad dispatcher - dw = wgrad(dy, x)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
Status Conv3dWgrad(
conv::Conv3dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_dy,
TensorRef<ElementB, LayoutB> tensor_x,
TensorRef<ElementC, LayoutC> tensor_dw_in,
TensorRef<ElementC, LayoutC> tensor_dw_out,
ElementCompute alpha,
ElementCompute beta,
cudaStream_t stream = nullptr) {
//
// Blocking factors improve performance of reference implementation
//
int const kThreadM = 2; // shape of a thread's tile in the GEMM M dimension
int const kThreadN = 4; // shape of a thread's tile in the GEMM N dimension
int const kCtaShapeM = 8; // shape of a threadblock in units of threads
int const kCtaShapeN = 16; // shape of a threadblock in units of threads
int64_t trsc = int64_t(problem_size.T) * problem_size.R * problem_size.S * problem_size.C;
int64_t blocks_n = (trsc + (kCtaShapeN * kThreadN) - 1) / (kCtaShapeN * kThreadN);
dim3 block(kCtaShapeM, kCtaShapeN);
dim3 grid((problem_size.K + (kCtaShapeM * kThreadM) - 1) / (kCtaShapeM * kThreadM), uint32_t(blocks_n));
kernel::Conv3dWgrad<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp,
InnerProductOp,
kThreadM,
kThreadN,
kCtaShapeM,
kCtaShapeN
><<< grid, block, 0, stream >>>(
problem_size,
tensor_dy,
tensor_x,
tensor_dw_in,
tensor_dw_out,
alpha,
beta
);
cudaError_t result = cudaPeekAtLastError();
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
return Status::kSuccess;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Generic 2D convolution targeting Conv2dFprop, Conv2dDgrad, and Conv2dWgrad.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
Status Conv2d(
conv::Operator convolutional_operator,
conv::Conv2dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_A,
TensorRef<ElementB, LayoutB> tensor_B,
TensorRef<ElementC, LayoutC> tensor_C,
TensorRef<ElementC, LayoutC> tensor_D,
ElementCompute alpha,
ElementCompute beta,
cudaStream_t stream = nullptr) {
switch (convolutional_operator) {
case conv::Operator::kFprop:
return Conv2dFprop<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp, InnerProductOp
>(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta, stream);
break;
case conv::Operator::kDgrad:
return Conv2dDgrad<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp, InnerProductOp
>(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta, stream);
break;
case conv::Operator::kWgrad:
return Conv2dWgrad<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp, InnerProductOp
>(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta, stream);
break;
default: break;
}
return Status::kErrorNotSupported;
}
/// Generic 3D convolution targeting Conv3dFprop, Conv3dDgrad, and Conv3dWgrad.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
Status Conv3d(
conv::Operator convolutional_operator,
conv::Conv3dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_A,
TensorRef<ElementB, LayoutB> tensor_B,
TensorRef<ElementC, LayoutC> tensor_C,
TensorRef<ElementC, LayoutC> tensor_D,
ElementCompute alpha,
ElementCompute beta,
cudaStream_t stream = nullptr) {
switch (convolutional_operator) {
case conv::Operator::kFprop:
return Conv3dFprop<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp, InnerProductOp
>(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta, stream);
case conv::Operator::kDgrad:
return Conv3dDgrad<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp, InnerProductOp
>(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta, stream);
case conv::Operator::kWgrad:
return Conv3dWgrad<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp, InnerProductOp
>(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta, stream);
default: break;
}
return Status::kErrorNotSupported;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace reference
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////////////////////////
| tools/util/include/cutlass/util/reference/device/convolution.h/0 | {
"file_path": "tools/util/include/cutlass/util/reference/device/convolution.h",
"repo_id": "tools",
"token_count": 20794
} | 53 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Reference implementation for convolution in host-side code.
*/
#pragma once
#include "cutlass/coord.h"
#include "cutlass/functional.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/conv3d_problem_size.h"
#include <iostream>
namespace cutlass {
namespace reference {
namespace host {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Forward propagation
////////////////////////////////////////////////////////////////////////////////////////////////////
/// y = conv2d(x, w)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ElementD = ElementC,
typename ConvertOp = NumericConverter<ElementD, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
void Conv2dFprop(
conv::Conv2dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_x,
TensorRef<ElementB, LayoutB> tensor_w,
TensorRef<ElementC, LayoutC> tensor_y_in,
TensorRef<ElementD, LayoutC> tensor_y_out,
ElementCompute alpha,
ElementCompute beta) {
ConvertOp convert_op;
InnerProductOp inner_product_op;
// Apply MMA and accumulate ElementAccumulator
for (int n = 0; n < problem_size.N; ++n) {
for (int p = 0; p < problem_size.P; ++p) {
for (int q = 0; q < problem_size.Q; ++q) {
for (int k = 0; k < problem_size.K; ++k) {
int group_idx = k / (problem_size.K / problem_size.groups);
int channels_per_group = problem_size.C / problem_size.groups;
ElementAccumulator acc = ElementAccumulator();
for (int r = 0; r < problem_size.R; ++r) {
for (int s = 0; s < problem_size.S; ++s) {
for (int c = 0; c < channels_per_group; ++c) {
int filter_r = r;
int filter_s = s;
if (problem_size.mode == cutlass::conv::Mode::kConvolution) {
filter_r = problem_size.R - 1 - r;
filter_s = problem_size.S - 1 - s;
}
int h = p * problem_size.stride_h - problem_size.pad_h + filter_r * problem_size.dilation_h;
int w = q * problem_size.stride_w - problem_size.pad_w + filter_s * problem_size.dilation_w;
if (h >= 0 && h < problem_size.H && w >= 0 && w < problem_size.W) {
ElementA a = tensor_x.at({n, h, w, c + group_idx * channels_per_group});
ElementB b = tensor_w.at({k, r, s, c});
acc = inner_product_op(ElementAccumulator(a), ElementAccumulator(b), acc);
}
}
}
}
// Apply Epilogue, compute ElementCompute, convert and store ElementC
ElementC c_ref = ElementC();
if (beta != ElementCompute()) {
c_ref = tensor_y_in.at(cutlass::make_Coord(n, p, q, k));
}
tensor_y_out.at(cutlass::make_Coord(n, p, q, k)) =
convert_op(alpha * ElementCompute(acc) + beta * ElementCompute(c_ref));
}
}
}
}
}
/// Depthwise-separable convolution
template <typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ElementD = ElementC,
typename ConvertOp = NumericConverter<ElementD, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>>
void Depsep_Fprop(cutlass::TensorView<ElementA, LayoutA> tensor_A,
cutlass::TensorView<ElementB, LayoutB> tensor_B,
cutlass::TensorView<ElementC, LayoutC> tensor_C,
cutlass::TensorView<ElementD, LayoutC> tensor_D,
ElementCompute alpha,
ElementCompute beta,
cutlass::Tensor4DCoord padding = cutlass::Tensor4DCoord(),
cutlass::Coord<2> conv_stride = cutlass::Coord<2>(),
cutlass::Coord<2> dilation = cutlass::Coord<2>(),
cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation) {
ConvertOp convert_op;
InnerProductOp inner_product_op;
// Apply MMA and accumulate ElementAccumulator
for (int n = 0; n < tensor_C.extent().n(); ++n) {
for (int p = 0; p < tensor_C.extent().h(); ++p) {
for (int q = 0; q < tensor_C.extent().w(); ++q) {
for (int g = 0; g < tensor_C.extent().c(); ++g) {
ElementAccumulator acc = ElementAccumulator();
for (int r = 0; r < tensor_B.extent().h(); ++r) {
for (int s = 0; s < tensor_B.extent().w(); ++s) {
// input activation H and W
int h = p * conv_stride[0] - padding[0] + r * dilation[0];
int w = q * conv_stride[1] - padding[2] + s * dilation[1];
if (h < tensor_A.extent().h() && h >= 0 && w < tensor_A.extent().w() && w >= 0) {
ElementA a = tensor_A.at(cutlass::make_Coord(n, h, w, g));
ElementB b = (mode == cutlass::conv::Mode::kCrossCorrelation)
? tensor_B.at(cutlass::make_Coord(g, r, s, 0))
: tensor_B.at(cutlass::make_Coord(
g, tensor_B.extent().h() - r - 1, tensor_B.extent().w() - s - 1, 0));
acc = inner_product_op(ElementAccumulator(a), ElementAccumulator(b), acc);
}
}
}
// Apply Epilogue, compute ElementCompute, convert and store ElementC
ElementC c_ref = tensor_C.at(cutlass::make_Coord(n, p, q, g));
tensor_D.at(cutlass::make_Coord(n, p, q, g)) =
convert_op(alpha * ElementCompute(acc) + beta * ElementCompute(c_ref));
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Dgrad / Deconv
////////////////////////////////////////////////////////////////////////////////////////////////////
/// dx = dgrad(dy, w)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ElementD = ElementC,
typename ConvertOp = NumericConverter<ElementD, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
void Conv2dDgrad(
cutlass::conv::Conv2dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_dy,
TensorRef<ElementB, LayoutB> tensor_w,
TensorRef<ElementC, LayoutC> tensor_dx_in,
TensorRef<ElementD, LayoutC> tensor_dx_out,
ElementCompute alpha,
ElementCompute beta,
bool is_deconv = false) {
ConvertOp convert_op;
InnerProductOp inner_product_op;
// Apply MMA and accumulate ElementAccumulator
for (int n = 0; n < problem_size.N; ++n) {
for (int h = 0; h < problem_size.H; ++h) {
for (int w = 0; w < problem_size.W; ++w) {
for (int c = 0; c < problem_size.C; ++c) {
ElementAccumulator acc = ElementAccumulator();
for (int r = 0; r < problem_size.R; ++r) {
for (int s = 0; s < problem_size.S; ++s) {
for (int k = 0; k < problem_size.K; ++k) {
int filter_r = r;
int filter_s = s;
if (problem_size.mode == cutlass::conv::Mode::kConvolution) {
filter_r = problem_size.R - 1 - r;
filter_s = problem_size.S - 1 - s;
}
int p = h + problem_size.pad_h - filter_r * problem_size.dilation_h;
int q = w + problem_size.pad_w - filter_s * problem_size.dilation_w;
if (p >= 0 && (p % problem_size.stride_h) == 0 &&
q >= 0 && (q % problem_size.stride_w) == 0) {
p = p / problem_size.stride_h;
q = q / problem_size.stride_w;
#if 0
std::cout << "row:"
<< n * problem_size.H * problem_size.W +
h * problem_size.W +
w << " "
<< "n, p, q: ("
<< n << ", "
<< p << ", "
<< q << ") * "
<< "r, s: ("
<< r << ", "
<< s << ") ["
<< ((p < problem_size.P && q < problem_size.Q) ? "true":"false") << "]"
<< std::endl;
#endif
if (p < problem_size.P && q < problem_size.Q) {
ElementA a = tensor_dy.at(cutlass::make_Coord(n, p, q, k));
ElementB b = is_deconv ? tensor_w.at(cutlass::make_Coord(c, r, s, k))
: tensor_w.at(cutlass::make_Coord(k, r, s, c));
acc = inner_product_op(ElementAccumulator(a), ElementAccumulator(b), acc);
}
}
} // for (K)
} // for (S)
} // for (R)
// Apply Epilogue, compute ElementCompute, convert and store ElementC
ElementC c_ref = ElementC();
if (beta != ElementCompute()) {
c_ref = tensor_dx_in.at(cutlass::make_Coord(n, h, w, c));
}
tensor_dx_out.at(cutlass::make_Coord(n, h, w, c)) =
convert_op(alpha * ElementCompute(acc) + beta * ElementCompute(c_ref));
} // for (C)
} // for (W)
} // for (H)
} // for (N)
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Wgrad
////////////////////////////////////////////////////////////////////////////////////////////////////
/// dw = wgrad(dy, x)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ElementD = ElementC,
typename ConvertOp = NumericConverter<ElementD, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
void Conv2dWgrad(
cutlass::conv::Conv2dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_dy,
TensorRef<ElementB, LayoutB> tensor_x,
TensorRef<ElementC, LayoutC> tensor_dw_in,
TensorRef<ElementD, LayoutC> tensor_dw_out,
ElementCompute alpha,
ElementCompute beta) {
InnerProductOp inner_product_op;
ConvertOp convert_op;
// Apply MMA and accumulate ElementAccumulator
for (int k = 0; k < problem_size.K; ++k) {
for (int r = 0; r < problem_size.R; ++r) {
for (int s = 0; s < problem_size.S; ++s) {
for (int c = 0; c < problem_size.C; ++c) {
ElementAccumulator acc = ElementAccumulator();
for (int n = 0; n < problem_size.N; ++n) {
for (int p = 0; p < problem_size.P; ++p) {
for (int q = 0; q < problem_size.Q; ++q) {
cutlass::Tensor4DCoord b_coord;
int filter_r = r;
int filter_s = s;
if (problem_size.mode == cutlass::conv::Mode::kConvolution) {
filter_r = problem_size.R - 1 - r;
filter_s = problem_size.S - 1 - s;
}
b_coord = make_Coord(
n,
p * problem_size.stride_h - problem_size.pad_h + filter_r * problem_size.dilation_h,
q * problem_size.stride_w - problem_size.pad_w + filter_s * problem_size.dilation_w,
c);
if (b_coord.h() < problem_size.H && b_coord.h() >= 0 &&
b_coord.w() < problem_size.W && b_coord.w() >= 0) {
ElementAccumulator a = ElementAccumulator(tensor_dy.at(cutlass::make_Coord(n, p, q, k)));
ElementAccumulator b = ElementAccumulator(tensor_x.at(b_coord));
acc = inner_product_op(a, b, acc);
}
}
}
}
// Apply Epilogue, compute ElementCompute, convert and store ElementC
ElementC c_ref = ElementC();
if (beta != ElementCompute()) {
c_ref = tensor_dw_in.at(cutlass::make_Coord(k, r, s, c));
}
tensor_dw_out.at(cutlass::make_Coord(k, r, s, c)) =
convert_op(alpha * ElementCompute(acc) + beta * ElementCompute(c_ref));
} // for (C)
} // for (S)
} // for (R)
} // for (K)
}
/// Generic 2D convolution targeting Conv2dFprop, Conv2dDgrad, and Conv2dWgrad.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ElementD = ElementC,
typename ConvertOp = NumericConverter<ElementD, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
void Conv2d(
conv::Operator convolutional_operator,
conv::Conv2dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_A,
TensorRef<ElementB, LayoutB> tensor_B,
TensorRef<ElementC, LayoutC> tensor_C,
TensorRef<ElementD, LayoutC> tensor_D,
ElementCompute alpha,
ElementCompute beta) {
switch (convolutional_operator) {
case conv::Operator::kFprop:
Conv2dFprop<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementCompute,
ElementAccumulator,
ElementD,
ConvertOp, InnerProductOp
>(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta);
break;
case conv::Operator::kDeconv:
case conv::Operator::kDgrad:
Conv2dDgrad<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementCompute,
ElementAccumulator,
ElementD,
ConvertOp, InnerProductOp
>(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta, (convolutional_operator == conv::Operator::kDeconv));
break;
case conv::Operator::kWgrad:
Conv2dWgrad<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementCompute,
ElementAccumulator,
ElementD,
ConvertOp, InnerProductOp
>(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta);
break;
default:
break;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// 3D convolution
////////////////////////////////////////////////////////////////////////////////////////////////////
/// y = conv3d(x, w)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
void Conv3dFprop(
conv::Conv3dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_x,
TensorRef<ElementB, LayoutB> tensor_w,
TensorRef<ElementC, LayoutC> tensor_y_in,
TensorRef<ElementC, LayoutC> tensor_y_out,
ElementCompute alpha,
ElementCompute beta) {
ConvertOp convert_op;
InnerProductOp inner_product_op;
// Apply MMA and accumulate ElementAccumulator
for (int n = 0; n < problem_size.N; ++n) {
for (int z = 0; z < problem_size.Z; ++z) {
for (int p = 0; p < problem_size.P; ++p) {
for (int q = 0; q < problem_size.Q; ++q) {
for (int k = 0; k < problem_size.K; ++k) {
ElementAccumulator acc = ElementAccumulator();
for (int t = 0; t < problem_size.T; ++t) {
for (int r = 0; r < problem_size.R; ++r) {
for (int s = 0; s < problem_size.S; ++s) {
for (int c = 0; c < problem_size.C; ++c) {
int filter_t = t;
int filter_r = r;
int filter_s = s;
if (problem_size.mode == cutlass::conv::Mode::kConvolution) {
filter_t = problem_size.T - 1 - t;
filter_r = problem_size.R - 1 - r;
filter_s = problem_size.S - 1 - s;
}
int d = z * problem_size.stride_d - problem_size.pad_d + filter_t * problem_size.dilation_d;
int h = p * problem_size.stride_h - problem_size.pad_h + filter_r * problem_size.dilation_h;
int w = q * problem_size.stride_w - problem_size.pad_w + filter_s * problem_size.dilation_w;
if (d >= 0 && d < problem_size.D &&
h >=0 && h < problem_size.H &&
w >= 0 && w < problem_size.W) {
ElementA a = tensor_x.at({n, d, h, w, c});
ElementB b = tensor_w.at({k, t, r, s, c});
acc = inner_product_op(ElementAccumulator(a), ElementAccumulator(b), acc);
}
}
}
}
}
// Apply Epilogue, compute ElementCompute, convert and store ElementC
ElementC c_ref = ElementC();
if (beta != ElementCompute()) {
c_ref = tensor_y_in.at(cutlass::make_Coord(n, z, p, q, k));
}
tensor_y_out.at(cutlass::make_Coord(n, z, p, q, k)) =
convert_op(alpha * ElementCompute(acc) + beta * ElementCompute(c_ref));
}
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Dgrad / Deconv
////////////////////////////////////////////////////////////////////////////////////////////////////
/// dx = dgrad(dy, w)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
void Conv3dDgrad(
cutlass::conv::Conv3dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_dy,
TensorRef<ElementB, LayoutB> tensor_w,
TensorRef<ElementC, LayoutC> tensor_dx_in,
TensorRef<ElementC, LayoutC> tensor_dx_out,
ElementCompute alpha,
ElementCompute beta,
bool is_deconv = false) {
ConvertOp convert_op;
InnerProductOp inner_product_op;
// Apply MMA and accumulate ElementAccumulator
for (int n = 0; n < problem_size.N; ++n) {
for (int d = 0; d < problem_size.D; ++d) {
for (int h = 0; h < problem_size.H; ++h) {
for (int w = 0; w < problem_size.W; ++w) {
for (int c = 0; c < problem_size.C; ++c) {
ElementAccumulator acc = ElementAccumulator();
for (int t = 0; t < problem_size.T; ++t) {
for (int r = 0; r < problem_size.R; ++r) {
for (int s = 0; s < problem_size.S; ++s) {
for (int k = 0; k < problem_size.K; ++k) {
int filter_t = t;
int filter_r = r;
int filter_s = s;
if (problem_size.mode == cutlass::conv::Mode::kConvolution) {
filter_t = problem_size.T - 1 - t;
filter_r = problem_size.R - 1 - r;
filter_s = problem_size.S - 1 - s;
}
int z = d + problem_size.pad_d - filter_t * problem_size.dilation_d;
int p = h + problem_size.pad_h - filter_r * problem_size.dilation_h;
int q = w + problem_size.pad_w - filter_s * problem_size.dilation_w;
if (z >= 0 && (z % problem_size.stride_d) == 0 &&
p >= 0 && (p % problem_size.stride_h) == 0 &&
q >= 0 && (q % problem_size.stride_w) == 0) {
z = z / problem_size.stride_d;
p = p / problem_size.stride_h;
q = q / problem_size.stride_w;
if (z < problem_size.Z && p < problem_size.P && q < problem_size.Q) {
ElementA a = tensor_dy.at(cutlass::make_Coord(n, z, p, q, k));
ElementB b = is_deconv ? tensor_w.at(cutlass::make_Coord(c, t, r, s, k))
: tensor_w.at(cutlass::make_Coord(k, t, r, s, c));
acc = inner_product_op(ElementAccumulator(a), ElementAccumulator(b), acc);
}
}
} // for (K)
} // for (S)
} // for (R)
} // for (T)
// Apply Epilogue, compute ElementCompute, convert and store ElementC
ElementC c_ref = ElementC();
if (beta != ElementCompute()) {
c_ref = tensor_dx_in.at(cutlass::make_Coord(n, d, h, w, c));
}
tensor_dx_out.at(cutlass::make_Coord(n, d, h, w, c)) =
convert_op(alpha * ElementCompute(acc) + beta * ElementCompute(c_ref));
} // for (C)
} // for (W)
} // for (H)
} // for (D)
} // for (N)
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Wgrad
////////////////////////////////////////////////////////////////////////////////////////////////////
/// dw = wgrad(dy, x)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
void Conv3dWgrad(
cutlass::conv::Conv3dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_dy,
TensorRef<ElementB, LayoutB> tensor_x,
TensorRef<ElementC, LayoutC> tensor_dw_in,
TensorRef<ElementC, LayoutC> tensor_dw_out,
ElementCompute alpha,
ElementCompute beta) {
InnerProductOp inner_product_op;
ConvertOp convert_op;
// Apply MMA and accumulate ElementAccumulator
for (int k = 0; k < problem_size.K; ++k) {
for (int t = 0; t < problem_size.T; ++t) {
for (int r = 0; r < problem_size.R; ++r) {
for (int s = 0; s < problem_size.S; ++s) {
for (int c = 0; c < problem_size.C; ++c) {
ElementAccumulator acc = ElementAccumulator();
for (int n = 0; n < problem_size.N; ++n) {
for (int z = 0; z < problem_size.Z; ++z) {
for (int p = 0; p < problem_size.P; ++p) {
for (int q = 0; q < problem_size.Q; ++q) {
int filter_t = t;
int filter_r = r;
int filter_s = s;
if (problem_size.mode == cutlass::conv::Mode::kConvolution) {
filter_t = problem_size.T - 1 - t;
filter_r = problem_size.R - 1 - r;
filter_s = problem_size.S - 1 - s;
}
Tensor5DCoord b_coord = make_Coord(
n,
z * problem_size.stride_d - problem_size.pad_d + filter_t * problem_size.dilation_d,
p * problem_size.stride_h - problem_size.pad_h + filter_r * problem_size.dilation_h,
q * problem_size.stride_w - problem_size.pad_w + filter_s * problem_size.dilation_w,
c);
if (b_coord.d() < problem_size.D && b_coord.d() >= 0 &&
b_coord.h() < problem_size.H && b_coord.h() >= 0 &&
b_coord.w() < problem_size.W && b_coord.w() >= 0) {
ElementAccumulator a = ElementAccumulator(tensor_dy.at(cutlass::make_Coord(n, z, p, q, k)));
ElementAccumulator b = ElementAccumulator(tensor_x.at(b_coord));
acc = inner_product_op(a, b, acc);
}
}
}
}
}
// Apply Epilogue, compute ElementCompute, convert and store ElementC
ElementC c_ref = ElementC();
if (beta != ElementCompute()) {
c_ref = tensor_dw_in.at(cutlass::make_Coord(k, t, r, s, c));
}
tensor_dw_out.at(cutlass::make_Coord(k, t, r, s, c)) =
convert_op(alpha * ElementCompute(acc) + beta * ElementCompute(c_ref));
} // for (C)
} // for (S)
} // for (R)
} // for (T)
} // for (K)
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Generic 3D convolution targeting Conv2dFprop, Conv2dDgrad, and Conv2dWgrad.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
void Conv3d(
conv::Operator convolutional_operator,
conv::Conv3dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_A,
TensorRef<ElementB, LayoutB> tensor_B,
TensorRef<ElementC, LayoutC> tensor_C,
TensorRef<ElementC, LayoutC> tensor_D,
ElementCompute alpha,
ElementCompute beta) {
switch (convolutional_operator) {
case conv::Operator::kFprop:
Conv3dFprop<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp, InnerProductOp
>(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta);
break;
case conv::Operator::kDeconv:
case conv::Operator::kDgrad:
Conv3dDgrad<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp, InnerProductOp
>(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta, (convolutional_operator == conv::Operator::kDeconv));
break;
case conv::Operator::kWgrad:
Conv3dWgrad<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp, InnerProductOp
>(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta);
break;
default:
break;
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace host
} // namespace reference
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| tools/util/include/cutlass/util/reference/host/convolution.h/0 | {
"file_path": "tools/util/include/cutlass/util/reference/host/convolution.h",
"repo_id": "tools",
"token_count": 13227
} | 54 |
Subsets and Splits