text
stringlengths 27
947k
| id
stringlengths 18
126
| metadata
dict | __index_level_0__
int64 0
80
|
---|---|---|---|
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Functor performing linear combination operations on planar-complex arrays
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/complex.h"
#include "cutlass/array_planar_complex.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/epilogue/thread/scale_type.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Applies a linear combination operator to arrays of planar-complex elements.
///
/// D = alpha * accumulator + beta * source + uniform
///
/// Note, as with most CUTLASS components for planar complex, the template arguments describe
/// the underlying real data type.
template <
typename ElementOutput_, ///< Data type used to load and store tensors
int Count, ///< Number of elements computed per operation
///< Usually it is 128/sizeof_bits<ElementOutput_>,
///< but we use 64 or 32 sometimes when there are not enough data to store
typename ElementAccumulator_ = ElementOutput_, ///< Accumulator data type
typename ElementCompute_ = ElementOutput_, ///< Data type used to compute linear combination
FloatRoundStyle Round = FloatRoundStyle::round_to_nearest,
ScaleType::Kind Scale = ScaleType::Default ///< Control Alpha and Beta scaling
>
class LinearCombinationPlanarComplex {
public:
using ElementOutput = ElementOutput_;
using ElementAccumulator = ElementAccumulator_;
using ElementCompute = ElementCompute_;
using ElementScalar = complex<ElementCompute>;
static int const kCount = Count;
static const ScaleType::Kind kScale = Scale;
using FragmentOutput = ArrayPlanarComplex<ElementOutput, kCount>;
using FragmentAccumulator = ArrayPlanarComplex<ElementAccumulator, kCount>;
using ComputeFragment = ArrayPlanarComplex<ElementCompute, kCount>;
static FloatRoundStyle const kRound = Round;
/// Host-constructable parameters structure
struct Params {
ElementScalar alpha{ElementCompute(1)}; ///< scales accumulators
ElementScalar beta{ElementCompute(0)}; ///< scales source tensor
ElementScalar const* alpha_ptr{nullptr}; ///< pointer to accumulator scalar - if not null, loads it from memory
ElementScalar const* beta_ptr{nullptr}; ///< pointer to source scalar - if not null, loads it from memory
//
// Methods
//
Params() = default;
CUTLASS_HOST_DEVICE
Params(
ElementScalar alpha,
ElementScalar beta
): alpha(alpha), beta(beta)
{}
CUTLASS_HOST_DEVICE
Params(
ElementScalar const *alpha_ptr,
ElementScalar const *beta_ptr
): alpha_ptr(alpha_ptr), beta_ptr(beta_ptr)
{}
};
private:
//
// Data members
//
ElementScalar alpha_;
ElementScalar beta_;
public:
/// Constructs the function object, possibly loading from pointers in host memory
CUTLASS_HOST_DEVICE
LinearCombinationPlanarComplex(Params const ¶ms) {
alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha);
beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta);
}
/// Returns true if source is needed
CUTLASS_HOST_DEVICE
bool is_source_needed() const {
if (Scale == ScaleType::OnlyAlphaScaling) return false;
return beta_.real() != ElementCompute(0) || beta_.imag() != ElementCompute(0);
}
/// Functionally required for serial reduction in the epilogue
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {
if (k_partition) {
beta_ = ElementCompute(1);
}
}
/// Computes linear scaling: D = alpha * accumulator + beta * source
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const &accumulator,
FragmentOutput const &source) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementOutput, kCount, Round> source_converter;
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
ComputeFragment converted_source{
source_converter(source.real),
source_converter(source.imag)};
ComputeFragment converted_accumulator{
accumulator_converter(accumulator.real),
accumulator_converter(accumulator.imag)};
multiplies<Array<ElementCompute, kCount> > mul_op;
multiply_add<Array<ElementCompute, kCount> > mul_add_op;
// Perform binary operations
// complex multiply: I = beta * C
ComputeFragment intermediate {
mul_op(beta_.real(), converted_source.real),
mul_op(beta_.real(), converted_source.imag)
};
intermediate.real = mul_add_op(-beta_.imag(), converted_source.imag, intermediate.real);
intermediate.imag = mul_add_op( beta_.imag(), converted_source.real, intermediate.imag);
// complex multiply-add: I = alpha * AB + I
intermediate.real = mul_add_op(alpha_.real(), converted_accumulator.real, intermediate.real);
intermediate.imag = mul_add_op(alpha_.real(), converted_accumulator.imag, intermediate.imag);
intermediate.real = mul_add_op(-alpha_.imag(), converted_accumulator.imag, intermediate.real);
intermediate.imag = mul_add_op( alpha_.imag(), converted_accumulator.real, intermediate.imag);
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter;
return FragmentOutput{
destination_converter(intermediate.real),
destination_converter(intermediate.imag)};
}
/// Computes linear scaling: D = alpha * accumulator + beta * source
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const &accumulator) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
ComputeFragment converted_accumulator{
accumulator_converter(accumulator.real),
accumulator_converter(accumulator.imag)};
// Perform binary operations
multiplies<Array<ElementCompute, kCount> > mul_op;
multiply_add<Array<ElementCompute, kCount> > mul_add_op;
// complex multiply-add: I = alpha * AB + I
ComputeFragment intermediate {
mul_op(alpha_.real(), converted_accumulator.real),
mul_op(alpha_.real(), converted_accumulator.imag)
};
intermediate.real = mul_add_op(-alpha_.imag(), converted_accumulator.imag, intermediate.real);
intermediate.imag = mul_add_op( alpha_.imag(), converted_accumulator.real, intermediate.imag);
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter;
return FragmentOutput{
destination_converter(intermediate.real),
destination_converter(intermediate.imag)};
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
|
cutlass/include/cutlass/epilogue/thread/linear_combination_planar_complex.h/0
|
{
"file_path": "cutlass/include/cutlass/epilogue/thread/linear_combination_planar_complex.h",
"repo_id": "cutlass",
"token_count": 2942
}
| 27 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/epilogue/thread/linear_combination_clamp.h"
#include "cutlass/epilogue/thread/linear_combination_relu.h"
#include "cutlass/epilogue/thread/linear_combination_gelu.h"
#include "cutlass/epilogue/thread/linear_combination_sigmoid.h"
#include "cutlass/epilogue/thread/linear_combination_planar_complex.h"
#include "cutlass/epilogue/thread/conversion_op.h"
#include "cutlass/epilogue/thread/reduction_op.h"
#include "cutlass/transform/threadblock/regular_tile_iterator_pitch_linear.h"
#include "cutlass/epilogue/warp/fragment_iterator_tensor_op.h"
#include "cutlass/epilogue/warp/fragment_iterator_complex_tensor_op.h"
#include "cutlass/epilogue/warp/tile_iterator_tensor_op.h"
#include "cutlass/epilogue/warp/tile_iterator_tensor_op_mixed.h"
#include "cutlass/epilogue/threadblock/default_thread_map_tensor_op.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator_blas3.h"
#include "cutlass/epilogue/threadblock/shared_load_iterator.h"
#include "cutlass/epilogue/threadblock/shared_load_iterator_mixed.h"
#include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h"
#include "cutlass/epilogue/threadblock/epilogue.h"
#include "cutlass/epilogue/threadblock/interleaved_epilogue.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues for TensorOps.
template <
typename Shape_,
typename WarpMmaTensorOp_,
int PartitionsK,
typename OutputOp_,
int ElementsPerAccess,
/// Is for a symmetric kernel
BlasMode BlasMode_ = BlasMode::kGemm
>
struct DefaultEpilogueTensorOpBlas3 {
using Shape = Shape_;
using WarpMmaTensorOp = WarpMmaTensorOp_;
static int const kPartitionsK = PartitionsK;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = ElementsPerAccess;
static BlasMode const kBlasMode = BlasMode_;
using ElementOutput = typename OutputOp::ElementOutput;
using LayoutC = typename WarpMmaTensorOp::LayoutC;
using ElementAccumulator = typename WarpMmaTensorOp::ElementC;
//
// Thread map
//
using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapTensorOp<
Shape,
typename WarpMmaTensorOp::Shape,
kPartitionsK,
ElementOutput,
kElementsPerAccess
>::Type;
using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorBlas3<
OutputTileThreadMap,
ElementOutput,
kBlasMode
>;
using AccumulatorFragmentIterator = typename platform::conditional<is_complex<ElementOutput>::value,
cutlass::epilogue::warp::FragmentIteratorComplexTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
LayoutC>,
cutlass::epilogue::warp::FragmentIteratorTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
LayoutC> >::type;
/// Support several implementations depending on structure of epilogue
using DefaultIterators = detail::DefaultIteratorsTensorOp<
ElementOutput,
ElementAccumulator,
kElementsPerAccess,
Shape,
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename OutputTileThreadMap::CompactedThreadMap
>;
using WarpTileIterator = typename DefaultIterators::WarpTileIterator;
using SharedLoadIterator = typename DefaultIterators::SharedLoadIterator;
/// Hard-coded padding elements added
using Padding = cutlass::MatrixShape<0, 64 / sizeof_bits<ElementAccumulator>::value * 4>;
//
// Define the epilogue
//
using Epilogue = cutlass::epilogue::threadblock::Epilogue<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputTileIterator,
AccumulatorFragmentIterator,
WarpTileIterator,
SharedLoadIterator,
OutputOp,
Padding
>;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
|
cutlass/include/cutlass/epilogue/threadblock/default_epilogue_tensor_op_blas3.h/0
|
{
"file_path": "cutlass/include/cutlass/epilogue/threadblock/default_epilogue_tensor_op_blas3.h",
"repo_id": "cutlass",
"token_count": 2492
}
| 28 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <assert.h>
#endif
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/layout/vector.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/functional.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/transform/threadblock/regular_tile_iterator.h"
#include "cutlass/epilogue/threadblock/epilogue_base.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
#include "cutlass/numeric_types.h"
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Epilogue operator
template <
typename ElementAccumulator_,
typename ElementOutput_,
typename ThreadBlockShape_, ///< Shape of threadblock tile (concept: GemmShape)
typename WarpMmaOperator_, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp)
bool ReduceKForA_
>
class EpilogueGemmKReduction {
public:
using ThreadBlockShape = ThreadBlockShape_;
using WarpMmaOperator = WarpMmaOperator_;
using WarpShape = typename WarpMmaOperator::Shape;
using Layout = layout::RowMajor;
using LongIndex = typename Layout::LongIndex;
/// Accumulator element
using ElementAccumulator = ElementAccumulator_;
/// Output element
using ElementOutput = ElementOutput_;
/// Output access size
static int const kElementsPerAccess = 1;
static bool const kReduceKForA = ReduceKForA_;
static int const kThreadBlockSize = kReduceKForA ? ThreadBlockShape::kM : ThreadBlockShape::kN;
static int const kWarpSize = kReduceKForA ? WarpShape::kM : WarpShape::kN;
static int const kIterations = kWarpSize / 8;
using FragmentAccumulator = Array<ElementAccumulator, kIterations>;
private:
int thread_offset_;
ElementOutput* pointer_;
int col_;
public:
/// Constructor
CUTLASS_DEVICE
EpilogueGemmKReduction(
int thread_idx, ///< ID of a thread within the threadblock
int warp_idx, ///< ID of warp within threadblock
int lane_idx, ///< Id of thread within warp
int threadblock_offset,
ElementOutput* pointer
)
{
col_ = lane_idx % 4;
thread_offset_ = threadblock_offset * kThreadBlockSize
+ warp_idx * kWarpSize
+ lane_idx / 4 + col_ * 8;
pointer_ = pointer + LongIndex(thread_offset_);
}
/// Streams the result to global memory
CUTLASS_DEVICE
void operator()(
int size,
FragmentAccumulator &gemm_k_with_reduction_accumulation,
bool LoadForSerialSplitK
) {
bool guard[kIterations / 4];
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kIterations / 4; ++i) {
guard[i] = ((thread_offset_ + i * 32) < size);
}
Array<ElementOutput, kIterations / 4> source;
source.clear();
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kIterations / 4; ++i) {
ElementOutput *source_ptr = reinterpret_cast<ElementOutput *>(&source);
cutlass::arch::global_load<ElementOutput, sizeof(ElementOutput)>(
source_ptr[i],
(void *)(pointer_ + i * 32),
guard[i] && LoadForSerialSplitK);
}
FragmentAccumulator sum = gemm_k_with_reduction_accumulation;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kIterations; ++i) {
sum[i] += __shfl_xor_sync(0xffffffff, sum[i], 1);
sum[i] += __shfl_xor_sync(0xffffffff, sum[i], 2);
}
Array<ElementAccumulator, kIterations / 4> intermediate;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kIterations / 4; ++i) {
if (col_ == 0) {
intermediate[i] = sum[0 + i * 4];
}
if (col_ == 1) {
intermediate[i] = sum[1 + i * 4];
}
if (col_ == 2) {
intermediate[i] = sum[2 + i * 4];
}
if (col_ == 3) {
intermediate[i] = sum[3 + i * 4];
}
}
NumericArrayConverter<ElementAccumulator, ElementOutput, kIterations / 4> source_converter;
Array<ElementAccumulator, kIterations / 4> converted_source = source_converter(source);
plus<Array<ElementAccumulator, kIterations / 4>> plus_source;
intermediate = plus_source(intermediate, converted_source);
NumericArrayConverter<ElementOutput, ElementAccumulator, kIterations / 4> converter;
Array<ElementOutput, kIterations / 4> result = converter(intermediate);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kIterations / 4; ++i) {
cutlass::arch::global_store<ElementOutput, sizeof(ElementOutput)>(result[i],
(void *)(pointer_ + i * 32), guard[i]);
}
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
|
cutlass/include/cutlass/epilogue/threadblock/epilogue_gemm_k_reduction.h/0
|
{
"file_path": "cutlass/include/cutlass/epilogue/threadblock/epilogue_gemm_k_reduction.h",
"repo_id": "cutlass",
"token_count": 2738
}
| 29 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/layout/vector.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/transform/threadblock/regular_tile_iterator.h"
#include "cutlass/epilogue/threadblock/epilogue_base_streamk.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Epilogue operator without splitk
template <
/// Shape of threadblock tile (concept: GemmShape)
typename Shape_,
/// Warp-level MMA operator (concept: gemm::warp::MmaTensorOp)
typename WarpMmaOperator_,
/// Number of partitions of the K dimension
int PartitionsK,
/// Tile iterator reading and writing output tensors
typename OutputTileIterator_,
/// Fragment iterator selecting accumulators
typename AccumulatorFragmentIterator_,
/// Output operator
typename OutputOp_,
/// Number of interleaved k
int InterleavedK>
class InterleavedEpilogue :
public EpilogueBaseStreamK<
Shape_,
PartitionsK,
WarpMmaOperator_,
AccumulatorFragmentIterator_>
{
public:
using BaseStreamK = EpilogueBaseStreamK<
Shape_,
PartitionsK,
WarpMmaOperator_,
AccumulatorFragmentIterator_>;
using Shape = Shape_;
using WarpMmaOperator = WarpMmaOperator_;
static int const kPartitionsK = PartitionsK;
using AccumulatorFragmentIterator = AccumulatorFragmentIterator_;
using OutputTileIterator = OutputTileIterator_;
using OutputOp = OutputOp_;
/// The complete warp-level accumulator tile
using AccumulatorTile = typename AccumulatorFragmentIterator::AccumulatorTile;
/// Fragment type used by the accumulator tile's fragment iterator
using AccumulatorFragment = typename AccumulatorFragmentIterator::Fragment;
/// Accumulator element
using ElementAccumulator = typename AccumulatorTile::Element;
/// Output element
using ElementOutput = typename OutputTileIterator::Element;
/// Output access size
static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess;
/// Tensor reference to destination tensor
using TensorRef = typename OutputTileIterator::TensorRef;
/// Tensor reference to sync tensor
using SyncTensorRef =
typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>;
/// Const tensor reference to source tensor
using ConstTensorRef = typename OutputTileIterator::ConstTensorRef;
/// Array type used to output
using OutputAccessType = Array<typename OutputTileIterator::Element,
OutputTileIterator::kElementsPerAccess>;
/// Array type used by output functor
using AccumulatorAccessType =
Array<ElementAccumulator, OutputTileIterator::kElementsPerAccess>;
/// Number of warps
using WarpCount =
gemm::GemmShape<Shape::kM / WarpMmaOperator::Shape::kM,
Shape::kN / WarpMmaOperator::Shape::kN, kPartitionsK>;
public:
static_assert(OutputTileIterator::kElementsPerAccess,
"This must not be zero.");
static_assert(!(OutputTileIterator::Fragment::kElements %
OutputTileIterator::kElementsPerAccess),
"Divisibility");
public:
/// Aspect for when epilogue source is not needed
struct SourceAspectNotNeeded
{
/// Constructor
CUTLASS_DEVICE
SourceAspectNotNeeded()
{}
/// Invoke the output functor over each vector of output
CUTLASS_DEVICE
void apply_output_operator(
typename OutputTileIterator::Fragment &output_fragment,
OutputOp const &output_op,
typename AccumulatorFragmentIterator::Fragment const &aligned_accum_fragment)
{
OutputAccessType *output_frag_ptr =
reinterpret_cast<OutputAccessType *>(&output_fragment);
AccumulatorAccessType const *compute_frag_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment);
int const kOutputOpIterations =
OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kOutputOpIterations; ++i)
{
// Call the output operator
output_frag_ptr[i] = output_op(compute_frag_ptr[i]);
}
}
};
/// Aspect for when epilogue source is needed
struct SourceAspectNeeded
{
OutputTileIterator source_iterator;
typename OutputTileIterator::Fragment source_fragment;
/// Invoke the output functor over each vector of output
CUTLASS_DEVICE
static void apply_output_operator(
typename OutputTileIterator::Fragment &output_fragment,
OutputOp const &output_op,
typename AccumulatorFragmentIterator::Fragment const &aligned_accum_fragment,
typename OutputTileIterator::Fragment const &source_fragment)
{
OutputAccessType *output_frag_ptr =
reinterpret_cast<OutputAccessType *>(&output_fragment);
AccumulatorAccessType const *compute_frag_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment);
OutputAccessType const *source_frag_ptr =
reinterpret_cast<OutputAccessType const *>(&source_fragment);
int const kOutputOpIterations =
OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kOutputOpIterations; ++i)
{
// Call the output operator
output_frag_ptr[i] = output_op(compute_frag_ptr[i], source_frag_ptr[i]);
}
}
/// Constructor
CUTLASS_DEVICE
SourceAspectNeeded(OutputTileIterator source_iterator) :
source_iterator(source_iterator)
{
source_fragment.clear();
}
/// Invoke the output functor over each vector of output
CUTLASS_DEVICE
void apply_output_operator(
typename OutputTileIterator::Fragment &output_fragment,
OutputOp const &output_op,
typename AccumulatorFragmentIterator::Fragment const &aligned_accum_fragment)
{
// Load addend source fragment from global memory
source_iterator.load(source_fragment);
++source_iterator;
apply_output_operator(output_fragment, output_op, aligned_accum_fragment, source_fragment);
}
};
/// Shared storage allocation needed by the epilogue
struct SharedStorage {};
public:
/// Constructor
CUTLASS_DEVICE
InterleavedEpilogue(
SharedStorage &shared_storage, ///< Shared storage object
int thread_idx, ///< ID of a thread within the threadblock
int warp_idx, ///< ID of warp within threadblock
int lane_idx) ///< Id of thread within warp
:
BaseStreamK(thread_idx)
{}
/// Aggregates the accumulator sets shared by peer blocks in the global workspace,
/// performing epilogue computations, writing to output
CUTLASS_DEVICE
void reduce(
int peer_idx_begin,
int peer_idx_end,
int reduce_fragment_idx,
void *element_workspace,
OutputOp const &output_op, ///< Output operator
OutputTileIterator destination_iterator, ///< Tile iterator for destination
OutputTileIterator source_iterator) ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
{
// Redcuce peer accumulator fragments into one fragment
AccumulatorFragment accum_fragment;
BaseStreamK::reduce(accum_fragment, peer_idx_begin, peer_idx_end, reduce_fragment_idx, element_workspace);
// Source-fragment data (zero-initialized for scenarios where the
// output operator allows us to skip loading it from global input)
typename OutputTileIterator::Fragment source_fragment;
source_fragment.clear();
if (output_op.is_source_needed())
{
source_iterator += reduce_fragment_idx;
source_iterator.load(source_fragment);
}
// Compute the output result
typename OutputTileIterator::Fragment output_fragment;
// Apply the output operator
SourceAspectNeeded::apply_output_operator(output_fragment, output_op, accum_fragment, source_fragment);
// Store the final result
destination_iterator += reduce_fragment_idx;
destination_iterator.store(output_fragment);
}
/// Perform the epilogue computations and stream the result to global memory.
CUTLASS_DEVICE
void operator()(
OutputOp const &output_op, ///< Output operator
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators) ///< Complete warp-level accumulator tile
{
operator()(output_op, destination_iterator, accumulators, SourceAspectNotNeeded());
}
/// Perform the epilogue computations and stream the result to global memory. Implements
/// two alternative codepaths, depending on whether the output op requires addend data to be loaded.
CUTLASS_DEVICE
void operator()(
OutputOp const &output_op, ///< Output operator
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
OutputTileIterator source_iterator ) ///< Tile iterator for addend source
{
if (output_op.is_source_needed())
{
operator()(output_op, destination_iterator, accumulators, SourceAspectNeeded(source_iterator));
}
else
{
operator()(output_op, destination_iterator, accumulators, SourceAspectNotNeeded());
}
}
/// Perform the epilogue computations and stream the result to global memory. Implements a
/// single codepath, regardless of whether the output op requires addend data to be loaded
CUTLASS_DEVICE
void unified(
OutputOp const &output_op, ///< Output operator
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
OutputTileIterator source_iterator ) ///< Tile iterator for addend source
{
if (!output_op.is_source_needed())
{
source_iterator.clear_mask();
__syncthreads(); // Dummy (CUDA 11.0)
}
operator()(output_op, destination_iterator, accumulators, SourceAspectNeeded(source_iterator));
}
/// Streams the result to global memory
template <typename SourceAspect>
CUTLASS_DEVICE
void operator()(
OutputOp const &output_op, ///< Output operator
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
SourceAspect source)
{
//
// Iterator over warp-level accumulator fragment
//
AccumulatorFragmentIterator accum_fragment_iterator(accumulators);
//
// Iterate over accumulator tile
//
CUTLASS_PRAGMA_UNROLL
for (int iter = 0; iter < OutputTileIterator::kIterations; ++iter) {
//
// Convert fragment
//
typename AccumulatorFragmentIterator::Fragment accum_fragment;
accum_fragment_iterator.load(accum_fragment);
++accum_fragment_iterator;
//
// Compute the output result
//
typename OutputTileIterator::Fragment output_fragment;
source.apply_output_operator(output_fragment, output_op, accum_fragment);
//
// Store the final result
//
destination_iterator.set_iteration_index(iter);
destination_iterator.store(output_fragment);
++destination_iterator;
}
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
|
cutlass/include/cutlass/epilogue/threadblock/interleaved_epilogue.h/0
|
{
"file_path": "cutlass/include/cutlass/epilogue/threadblock/interleaved_epilogue.h",
"repo_id": "cutlass",
"token_count": 4850
}
| 30 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*!
\file
\brief The universal GEMM accommodates streamk, batched strided, and batched array variants.
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/limits>
#else
#include <limits>
#endif
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/arch.h"
#include "cutlass/device_kernel.h"
#include "cutlass/cuda_host_adapter.hpp"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/kernel/gemm_universal.h"
#include "cutlass/gemm/kernel/default_gemm_universal.h"
#include "cutlass/gemm/device/default_gemm_configuration.h"
#include "cutlass/trace.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename GemmKernel_>
class GemmUniversalBase {
public:
using GemmKernel = GemmKernel_;
/// Boolean indicating whether the CudaHostAdapter is enabled
static bool const kEnableCudaHostAdapter = CUTLASS_ENABLE_CUDA_HOST_ADAPTER;
using ThreadblockShape = typename GemmKernel::Mma::Shape;
using ElementA = typename GemmKernel::ElementA;
using LayoutA = typename GemmKernel::LayoutA;
using TensorRefA = TensorRef<ElementA const, LayoutA>;
static ComplexTransform const kTransformA = GemmKernel::kTransformA;
using ElementB = typename GemmKernel::ElementB;
using LayoutB = typename GemmKernel::LayoutB;
using TensorRefB = TensorRef<ElementB const, LayoutB>;
static ComplexTransform const kTransformB = GemmKernel::kTransformB;
using ElementC = typename GemmKernel::ElementC;
using LayoutC = typename GemmKernel::LayoutC;
using TensorRefC = TensorRef<ElementC const, LayoutC>;
using TensorRefD = TensorRef<ElementC, LayoutC>;
/// Numerical accumulation element type
using ElementAccumulator = typename GemmKernel::Mma::ElementC;
using EpilogueOutputOp = typename GemmKernel::EpilogueOutputOp;
using ThreadblockSwizzle = typename GemmKernel::ThreadblockSwizzle;
using Operator = typename GemmKernel::Operator;
/// Argument structure
using Arguments = typename GemmKernel::Arguments;
/// Index of the GEMM Kernel within the CudaHostAdapter
static int32_t const kGemmKernelIndex = 0;
/// Kernel dynamic shared memory allocation requirement
/// Update the kernel function's shared memory configuration for the current device
static constexpr size_t kSharedStorageSize = sizeof(typename GemmKernel::SharedStorage);
protected:
//
// Device properties (uniform across all instances of the current thread)
//
// Device ordinal
CUTLASS_THREAD_LOCAL static int device_ordinal_;
/// Device SM count
CUTLASS_THREAD_LOCAL static int device_sms_;
/// Kernel SM occupancy (in thread blocks)
CUTLASS_THREAD_LOCAL static int sm_occupancy_;
protected:
/// Initialize static thread-local members for the thread's current device,
/// if necessary.
static Status init_device_props()
{
CUTLASS_TRACE_HOST("GemmUniversalBase::init_device_props()");
cudaError_t cudart_result;
// Get current device ordinal
int current_ordinal;
cudart_result = cudaGetDevice(¤t_ordinal);
if (cudart_result != cudaSuccess) {
CUTLASS_TRACE_HOST(" cudaGetDevice() returned error " << cudaGetErrorString(cudart_result));
return Status::kErrorInternal;
}
// Done if matches the current static member
if (current_ordinal == device_ordinal_) {
// Already initialized
return Status::kSuccess;
}
// Update SM count member
cudart_result = cudaDeviceGetAttribute (&device_sms_, cudaDevAttrMultiProcessorCount, current_ordinal);
if (cudart_result != cudaSuccess) {
CUTLASS_TRACE_HOST(" cudaDeviceGetAttribute() returned error " << cudaGetErrorString(cudart_result));
return Status::kErrorInternal;
}
// If requires more than 48KB: configure for extended, dynamic shared memory
if constexpr (kSharedStorageSize >= (48 << 10))
{
cudart_result = cudaFuncSetAttribute(
Kernel2<GemmKernel>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
kSharedStorageSize);
if (cudart_result != cudaSuccess) {
CUTLASS_TRACE_HOST(" cudaFuncSetAttribute() returned error " << cudaGetErrorString(cudart_result));
return Status::kErrorInternal;
}
}
// Update SM occupancy member
cudart_result = cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(
&sm_occupancy_,
Kernel2<GemmKernel>,
GemmKernel::kThreadCount,
kSharedStorageSize,
cudaOccupancyDisableCachingOverride);
if (cudart_result != cudaSuccess) {
CUTLASS_TRACE_HOST(" cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags() returned error " << cudaGetErrorString(cudart_result));
return Status::kErrorInternal;
}
// Update device ordinal member on success
device_ordinal_ = current_ordinal;
CUTLASS_TRACE_HOST(" "
"device_ordinal: (" << device_ordinal_ << "), "
"device_sms: (" << device_sms_ << "), "
"sm_occupancy: (" << sm_occupancy_ << ") "
"smem_size: (" << kSharedStorageSize << ") "
"GemmKernel::kThreadCount: (" << GemmKernel::kThreadCount << ")");
return Status::kSuccess;
}
protected:
//
// Instance data members
//
/// Kernel parameters
typename GemmKernel::Params params_;
/// Initialize params member
Status init_params(Arguments const &args, CudaHostAdapter *cuda_adapter = nullptr)
{
int32_t device_sms = 0;
int32_t sm_occupancy = 0;
if constexpr (kEnableCudaHostAdapter) {
CUTLASS_ASSERT(cuda_adapter);
//
// Occupancy query using CudaHostAdapter::query_occupancy().
//
if (cuda_adapter) {
Status status = cuda_adapter->query_occupancy(
&device_sms,
&sm_occupancy,
kGemmKernelIndex,
GemmKernel::kThreadCount,
kSharedStorageSize);
CUTLASS_ASSERT(status == Status::kSuccess);
if (status != Status::kSuccess) {
return status;
}
}
else {
return Status::kErrorInternal;
}
}
else {
CUTLASS_ASSERT(cuda_adapter == nullptr);
// Initialize static device properties, if necessary
Status result = init_device_props();
if (result != Status::kSuccess) {
return result;
}
//
// Use thread-local static members for occupancy query initialized by call to
// `init_device_props()`
//
device_sms = device_sms_;
sm_occupancy = sm_occupancy_;
}
// Initialize params member
params_ = typename GemmKernel::Params(args, device_sms, sm_occupancy);
return Status::kSuccess;
}
public:
//---------------------------------------------------------------------------------------------
// Stateless API
//---------------------------------------------------------------------------------------------
/// Determines whether the GEMM can execute the given problem.
static Status can_implement(Arguments const &args, CudaHostAdapter *cuda_adapter = nullptr)
{
CUTLASS_TRACE_HOST("GemmUniversalBase::can_implement()");
dim3 grid = get_grid_shape(args, cuda_adapter);
if (!(grid.y <= std::numeric_limits<uint16_t>::max() &&
grid.z <= std::numeric_limits<uint16_t>::max()))
{
return Status::kErrorInvalidProblem;
}
return GemmKernel::can_implement(args);
}
/// Returns the workspace size (in bytes) needed for the problem
/// geometry expressed by these arguments
static size_t get_workspace_size(Arguments const &args, CudaHostAdapter *cuda_adapter = nullptr)
{
CUTLASS_TRACE_HOST("GemmUniversalBase::get_workspace_size()");
// Initialize parameters from args
GemmUniversalBase base;
if (base.init_params(args, cuda_adapter) != Status::kSuccess) {
return 0;
}
// Get size from parameters
size_t workspace_bytes = base.params_.get_workspace_size();
CUTLASS_TRACE_HOST(" workspace_bytes: " << workspace_bytes);
return workspace_bytes;
}
/// Returns the grid extents in thread blocks to launch
static dim3 get_grid_shape(Arguments const &args, CudaHostAdapter *cuda_adapter = nullptr)
{
CUTLASS_TRACE_HOST("GemmUniversalBase::get_grid_shape()");
// Initialize parameters from args
GemmUniversalBase base;
if (base.init_params(args, cuda_adapter) != Status::kSuccess) {
return dim3(0,0,0);
}
// Get dims from parameters
dim3 grid_dims = base.params_.get_grid_dims();
CUTLASS_TRACE_HOST(
" tiled_shape: " << base.params_.get_tiled_shape() << "\n"
<< " grid_dims: {" << grid_dims << "}");
return grid_dims;
}
/// Returns the maximum number of active thread blocks per multiprocessor
static int maximum_active_blocks(CudaHostAdapter *cuda_adapter = nullptr)
{
CUTLASS_TRACE_HOST("GemmUniversalBase::maximum_active_blocks()");
int32_t device_sms = 0;
int32_t sm_occupancy = 0;
if constexpr (kEnableCudaHostAdapter) {
CUTLASS_ASSERT(cuda_adapter);
if (cuda_adapter) {
Status status = cuda_adapter->query_occupancy(
&device_sms,
&sm_occupancy,
kGemmKernelIndex,
GemmKernel::kThreadCount,
kSharedStorageSize);
CUTLASS_ASSERT(status == Status::kSuccess);
if (status != Status::kSuccess) {
return -1;
}
}
else {
return -1;
}
}
else {
CUTLASS_ASSERT(cuda_adapter == nullptr);
// Initialize static device properties, if necessary
if (init_device_props() != Status::kSuccess) {
return -1;
}
sm_occupancy = sm_occupancy_;
}
CUTLASS_TRACE_HOST(" max_active_blocks: " << sm_occupancy_);
return sm_occupancy;
}
//---------------------------------------------------------------------------------------------
// Stateful API
//---------------------------------------------------------------------------------------------
/// Initializes GEMM state from arguments and workspace memory
Status initialize(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr,
CudaHostAdapter *cuda_adapter = nullptr)
{
CUTLASS_TRACE_HOST("GemmUniversalBase::initialize() - workspace "
<< workspace << ", stream: " << (stream ? "non-null" : "null"));
// Initialize parameters from args
Status result = init_params(args, cuda_adapter);
if (result != Status::kSuccess) {
return result;
}
// Assign and prepare workspace memory
if (args.mode == GemmUniversalMode::kGemm) {
return params_.init_workspace(workspace, stream);
}
return Status::kSuccess;
}
/// Lightweight update given a subset of arguments.
Status update(Arguments const &args)
{
CUTLASS_TRACE_HOST("GemmUniversalBase()::update()");
params_.update(args);
return Status::kSuccess;
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr, CudaHostAdapter *cuda_adapter = nullptr)
{
CUTLASS_TRACE_HOST("GemmUniversalBase::run()");
// Configure grid and block dimensions
dim3 block(GemmKernel::kThreadCount, 1, 1);
dim3 grid = params_.get_grid_dims();
// Launch kernel
CUTLASS_TRACE_HOST(" "
"grid: (" << grid << "), "
"block: (" << block << "), "
"SMEM: (" << kSharedStorageSize << ")");
if constexpr (kEnableCudaHostAdapter) {
CUTLASS_ASSERT(cuda_adapter);
if (cuda_adapter) {
void* kernel_params[] = {¶ms_};
return cuda_adapter->launch(grid, block, kSharedStorageSize, stream, kernel_params, 0);
}
else {
return Status::kErrorInternal;
}
}
else {
CUTLASS_ASSERT(cuda_adapter == nullptr);
Kernel2<GemmKernel><<<grid, block, kSharedStorageSize, stream>>>(params_);
// Query for errors
cudaError_t result = cudaGetLastError();
if (result != cudaSuccess) {
CUTLASS_TRACE_HOST(" grid launch failed with error " << cudaGetErrorString(result));
return Status::kErrorInternal;
}
}
return Status::kSuccess;
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr, CudaHostAdapter *cuda_adapter = nullptr)
{
return run(stream, cuda_adapter);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr,
CudaHostAdapter *cuda_adapter = nullptr)
{
Status status = initialize(args, workspace, stream, cuda_adapter);
if (status == Status::kSuccess) {
status = run(stream, cuda_adapter);
}
return status;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Static initializers
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Device ordinal
template <typename GemmKernel_>
CUTLASS_THREAD_LOCAL int GemmUniversalBase<GemmKernel_>::device_ordinal_ = -1;
/// Device SM count
template <typename GemmKernel_>
CUTLASS_THREAD_LOCAL int GemmUniversalBase<GemmKernel_>::device_sms_ = -1;
/// Kernel SM occupancy (in thread blocks)
template <typename GemmKernel_>
CUTLASS_THREAD_LOCAL int GemmUniversalBase<GemmKernel_>::sm_occupancy_ = -1;
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
|
cutlass/include/cutlass/gemm/device/gemm_universal_base.h/0
|
{
"file_path": "cutlass/include/cutlass/gemm/device/gemm_universal_base.h",
"repo_id": "cutlass",
"token_count": 5423
}
| 31 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/barrier.h"
#include "cutlass/block_striped.h"
#include "cutlass/trace.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_ ///! Threadblock mapping function
>
struct GemmUniversalStreamk {
public:
//
// Types and constants
//
using Mma = Mma_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using ElementA = typename Mma::IteratorA::Element;
using LayoutA = typename Mma::IteratorA::Layout;
using ElementB = typename Mma::IteratorB::Element;
using LayoutB = typename Mma::IteratorB::Layout;
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename Epilogue::OutputTileIterator::Layout;
/// The per-thread tile of raw accumulators
using AccumulatorTile = typename Mma::FragmentC;
static ComplexTransform const kTransformA = Mma::kTransformA;
static ComplexTransform const kTransformB = Mma::kTransformB;
using Operator = typename Mma::Operator;
using OperatorClass = typename Mma::Operator::OperatorClass;
using ThreadblockShape = typename Mma::Shape;
using WarpShape = typename Mma::Operator::Shape;
using InstructionShape = typename Mma::Policy::Operator::InstructionShape;
using ArchTag = typename Mma::ArchTag;
static int const kStages = Mma::kStages;
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
/// Workspace bytes per thread block
static size_t const kWorkspaceBytesPerBlock =
__NV_STD_MAX(
kThreadCount * sizeof(AccumulatorTile),
Epilogue::kWorkspaceBytesPerBlock);
/// Block-striped reduction utility
using BlockStripedReduceT = BlockStripedReduce<kThreadCount, AccumulatorTile>;
//
// Structures
//
/// Argument structure
struct Arguments {
//
// Data members
//
GemmUniversalMode mode = GemmUniversalMode::kGemm;
GemmCoord problem_size {};
int batch_count {1}; // Either (mode == GemmUniversalMode::kBatched) the batch count, or (mode == GemmUniversalMode::kGemm) the tile-splitting factor
typename EpilogueOutputOp::Params epilogue{};
void const * ptr_A = nullptr;
void const * ptr_B = nullptr;
void const * ptr_C = nullptr;
void * ptr_D = nullptr;
int64_t batch_stride_A{0};
int64_t batch_stride_B{0};
int64_t batch_stride_C{0};
int64_t batch_stride_D{0};
typename LayoutA::Stride stride_a{0};
typename LayoutB::Stride stride_b{0};
typename LayoutC::Stride stride_c{0};
typename LayoutC::Stride stride_d{0};
typename LayoutA::Stride::LongIndex lda{0};
typename LayoutB::Stride::LongIndex ldb{0};
typename LayoutC::Stride::LongIndex ldc{0};
typename LayoutC::Stride::LongIndex ldd{0};
int avail_sms{-1}; /// The number of SMs that StreamK dispatch heuristics will attempt to load-balance across (-1 defaults to device width, 1 implies classic data-parallel scheduling)
//
// Methods
//
/// Default Constructor
Arguments() = default;
/// Constructor
Arguments(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_split, /// Either (mode == GemmUniversalMode::kBatched) the batch count, or (mode == GemmUniversalMode::kGemm) the tile-splitting factor (1 defaults to StreamK, >1 emulates Split-K)
typename EpilogueOutputOp::Params epilogue,
void const * ptr_A,
void const * ptr_B,
void const * ptr_C,
void * ptr_D,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C,
int64_t batch_stride_D,
typename LayoutA::Stride stride_a,
typename LayoutB::Stride stride_b,
typename LayoutC::Stride stride_c,
typename LayoutC::Stride stride_d,
int avail_sms = -1 /// The number of SMs that StreamK dispatch heuristics will attempt to load-balance across (-1 defaults to device width, 1 implies classic data-parallel scheduling)
):
mode(mode),
problem_size(problem_size),
batch_count(batch_split),
epilogue(epilogue),
ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D),
batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_C(batch_stride_C), batch_stride_D(batch_stride_D),
stride_a(stride_a), stride_b(stride_b), stride_c(stride_c), stride_d(stride_d), avail_sms(avail_sms)
{
CUTLASS_TRACE_HOST("GemmUniversalStreamk::Arguments::Arguments() - problem_size: " << problem_size);
}
/// Constructor
Arguments(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_split, /// Either (mode == GemmUniversalMode::kBatched) the batch count, or (mode == GemmUniversalMode::kGemm) the tile-splitting factor (1 defaults to StreamK, >1 emulates Split-K)
typename EpilogueOutputOp::Params epilogue,
void const * ptr_A,
void const * ptr_B,
void const * ptr_C,
void * ptr_D,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C,
int64_t batch_stride_D,
typename LayoutA::Stride::LongIndex lda,
typename LayoutB::Stride::LongIndex ldb,
typename LayoutC::Stride::LongIndex ldc,
typename LayoutC::Stride::LongIndex ldd,
int avail_sms = -1 /// The number of SMs that StreamK dispatch heuristics will attempt to load-balance across (-1 defaults to device width, 1 implies classic data-parallel scheduling)
):
mode(mode),
problem_size(problem_size),
batch_count(batch_split),
epilogue(epilogue),
ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D),
batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_C(batch_stride_C), batch_stride_D(batch_stride_D),
lda(lda), ldb(ldb), ldc(ldc), ldd(ldd), avail_sms(avail_sms)
{
stride_a = make_Coord(lda);
stride_b = make_Coord(ldb);
stride_c = make_Coord(ldc);
stride_d = make_Coord(ldd);
CUTLASS_TRACE_HOST("GemmUniversalStreamk::Arguments::Arguments() - problem_size: " << problem_size);
}
/// Returns arguments for the transposed problem
Arguments transposed_problem() const
{
Arguments args(*this);
std::swap(args.problem_size.m(), args.problem_size.n());
std::swap(args.ptr_A, args.ptr_B);
std::swap(args.lda, args.ldb);
std::swap(args.stride_a, args.stride_b);
std::swap(args.batch_stride_A, args.batch_stride_B);
return args;
}
};
/// Parameters structure
struct Params
{
public:
//
// Data members
//
void * ptr_A = nullptr;
void * ptr_B = nullptr;
typename Mma::IteratorA::Params params_A{};
typename Mma::IteratorB::Params params_B{};
int64_t batch_stride_A{0};
int64_t batch_stride_B{0};
GemmUniversalMode mode = GemmUniversalMode::kGemm;
ThreadblockSwizzle block_mapping{};
void *barrier_workspace = nullptr;
void *partials_workspace = nullptr;
typename EpilogueOutputOp::Params output_op{};
void * ptr_D = nullptr;
void * ptr_C = nullptr;
typename Epilogue::OutputTileIterator::Params params_D{};
typename Epilogue::OutputTileIterator::Params params_C{};
int64_t batch_stride_D{0};
int64_t batch_stride_C{0};
protected:
//
// Host-only dispatch-utilities
//
/// Pad the given allocation size up to the nearest cache line
static size_t cacheline_align_up(size_t size)
{
static const int CACHELINE_SIZE = 128;
return (size + CACHELINE_SIZE - 1) / CACHELINE_SIZE * CACHELINE_SIZE;
}
/// Get the workspace size needed for barrier
size_t get_barrier_workspace_size() const
{
// For atomic reduction, each SK-block needs a synchronization flag. For parallel reduction,
// each reduction block needs its own synchronization flag.
int sk_blocks = block_mapping.sk_regions() * block_mapping.sk_blocks_per_region();
int num_flags = fast_max(sk_blocks, block_mapping.reduction_blocks);
return cacheline_align_up(sizeof(typename Barrier::T) * num_flags);
}
/// Get the workspace size needed for intermediate partial sums
size_t get_partials_workspace_size() const
{
int sk_blocks = block_mapping.sk_regions() * block_mapping.sk_blocks_per_region();
return cacheline_align_up(kWorkspaceBytesPerBlock * sk_blocks);
}
public:
//
// Host dispatch API
//
/// Default constructor
Params() = default;
/// Constructor
Params(
Arguments const &args, /// GEMM application arguments
int device_sms, /// Number of SMs on the device
int sm_occupancy) /// Kernel SM occupancy (in thread blocks)
:
params_A(args.lda ? make_Coord_with_padding<LayoutA::kStrideRank>(args.lda) : args.stride_a),
params_B(args.ldb ? make_Coord_with_padding<LayoutB::kStrideRank>(args.ldb) : args.stride_b),
params_C(args.ldc ? make_Coord_with_padding<LayoutC::kStrideRank>(args.ldc) : args.stride_c),
params_D(args.ldd ? make_Coord_with_padding<LayoutC::kStrideRank>(args.ldd) : args.stride_d),
output_op(args.epilogue),
mode(args.mode),
ptr_A(const_cast<void *>(args.ptr_A)),
ptr_B(const_cast<void *>(args.ptr_B)),
ptr_C(const_cast<void *>(args.ptr_C)),
ptr_D(args.ptr_D),
batch_stride_A(args.batch_stride_A),
batch_stride_B(args.batch_stride_B),
batch_stride_C(args.batch_stride_C),
batch_stride_D(args.batch_stride_D),
barrier_workspace(nullptr),
partials_workspace(nullptr)
{
// Number of SMs to make available for StreamK decomposition
int avail_sms = (args.avail_sms == -1) ?
device_sms :
fast_min(args.avail_sms, device_sms);
// Initialize the block mapping structure
block_mapping = ThreadblockSwizzle(
args.mode,
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.batch_count,
sm_occupancy,
device_sms,
avail_sms,
sizeof(ElementA),
sizeof(ElementB),
sizeof(ElementC),
Epilogue::kAccumulatorFragments);
}
/// Returns the workspace size (in bytes) needed for these parameters
size_t get_workspace_size() const
{
return
get_barrier_workspace_size() +
get_partials_workspace_size();
}
/// Assign and initialize the specified workspace buffer. Assumes
/// the memory allocated to workspace is at least as large as get_workspace_size().
Status init_workspace(
void *workspace,
cudaStream_t stream = nullptr)
{
uint8_t *ptr = static_cast<uint8_t*>(workspace);
// Establish partials workspace
partials_workspace = nullptr;
size_t partials_workspace_bytes = get_partials_workspace_size();
if (partials_workspace_bytes > 0)
{
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
partials_workspace = ptr;
ptr += partials_workspace_bytes;
}
// Establish barrier workspace
barrier_workspace = nullptr;
size_t barrier_workspace_bytes = get_barrier_workspace_size();
if (barrier_workspace_bytes > 0)
{
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
barrier_workspace = ptr;
ptr += barrier_workspace_bytes;
}
// Zero-initialize barrier workspace
if (barrier_workspace)
{
size_t barrier_workspace_bytes = get_barrier_workspace_size();
CUTLASS_TRACE_HOST(" Initialize " << barrier_workspace_bytes << " barrier bytes");
cudaError_t result = cudaMemsetAsync(
barrier_workspace,
0,
barrier_workspace_bytes,
stream);
if (result != cudaSuccess) {
CUTLASS_TRACE_HOST(" cudaMemsetAsync() returned error " << cudaGetErrorString(result));
return Status::kErrorInternal;
}
}
return Status::kSuccess;
}
/// Returns the GEMM volume in thread block tiles
cutlass::gemm::GemmCoord get_tiled_shape() const
{
return block_mapping.tiled_shape();
}
/// Returns the total number of thread blocks to launch
int get_grid_blocks() const
{
dim3 grid_dims = get_grid_dims();
return grid_dims.x * grid_dims.y * grid_dims.z;
}
/// Returns the grid extents in thread blocks to launch
dim3 get_grid_dims() const
{
return block_mapping.get_grid_dims();
}
/// Lightweight update given a subset of arguments.
void update(Arguments const &args)
{
CUTLASS_TRACE_HOST("GemmUniversalStreamK::Params::update()");
// Update input/output pointers
ptr_A = const_cast<void *>(args.ptr_A);
ptr_B = const_cast<void *>(args.ptr_B);
ptr_C = const_cast<void *>(args.ptr_C);
ptr_D = args.ptr_D;
batch_stride_A = args.batch_stride_A;
batch_stride_B = args.batch_stride_B;
batch_stride_C = args.batch_stride_C;
batch_stride_D = args.batch_stride_D;
output_op = args.epilogue;
}
};
/// Tile work descriptor
struct TileWorkDesc
{
/// The linear tile index
int tile_idx;
/// The location of this tile (in threadblock-tile coordinates) in the output matrix
cutlass::gemm::GemmCoord tiled_coord;
// The first global-scoped MAC-iteration this threadblock will perform for this tile
int iter_begin;
// The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile
int k_begin;
// The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile
int k_end;
/// The number of remaining MAC-iterations this threadblock will perform for this tile
int k_iters_remaining;
// Whether this block will perform the first iteration of this tile
CUTLASS_DEVICE
bool tile_started()
{
return (k_begin == 0);
}
// Whether this block will perform the last iteration of this tile
CUTLASS_DEVICE
bool tile_finished(Params const ¶ms)
{
return (k_end == params.block_mapping.problem_size.k());
}
};
/// Shared memory storage structure
union SharedStorage
{
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
protected:
//
// Data members
//
/// GEMM problem parameters
Params params;
/// Shared storage reference
SharedStorage &shared_storage;
/// ID within the threadblock
int thread_idx;
/// ID of warp
int warp_idx;
/// ID of each thread within a warp
int lane_idx;
/// Threadblock scoped epilogue
Epilogue epilogue;
public:
//
// Host-only dispatch API
//
/// Determines whether the GEMM problem size satisfies this kernel's
/// alignment requirements
static Status can_implement(
cutlass::gemm::GemmCoord const & problem_size)
{
CUTLASS_TRACE_HOST("GemmUniversalStreamk::can_implement()");
static int const kAlignmentA = (platform::is_same<LayoutA,
layout::ColumnMajorInterleaved<32>>::value)
? 32
: (platform::is_same<LayoutA,
layout::ColumnMajorInterleaved<64>>::value)
? 64
: Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = (platform::is_same<LayoutB,
layout::RowMajorInterleaved<32>>::value)
? 32
: (platform::is_same<LayoutB,
layout::RowMajorInterleaved<64>>::value)
? 64
: Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = (platform::is_same<LayoutC,
layout::ColumnMajorInterleaved<32>>::value)
? 32
: (platform::is_same<LayoutC,
layout::ColumnMajorInterleaved<64>>::value)
? 64
: Epilogue::OutputTileIterator::kElementsPerAccess;
bool isAMisaligned = false;
bool isBMisaligned = false;
bool isCMisaligned = false;
if (platform::is_same<LayoutA, layout::RowMajor>::value) {
isAMisaligned = problem_size.k() % kAlignmentA;
} else if (platform::is_same<LayoutA, layout::ColumnMajor>::value) {
isAMisaligned = problem_size.m() % kAlignmentA;
} else if (platform::is_same<LayoutA, layout::ColumnMajorInterleaved<32>>::value
|| platform::is_same<LayoutA, layout::ColumnMajorInterleaved<64>>::value) {
isAMisaligned = problem_size.k() % kAlignmentA;
}
if (platform::is_same<LayoutB, layout::RowMajor>::value) {
isBMisaligned = problem_size.n() % kAlignmentB;
} else if (platform::is_same<LayoutB, layout::ColumnMajor>::value) {
isBMisaligned = problem_size.k() % kAlignmentB;
} else if (platform::is_same<LayoutB, layout::RowMajorInterleaved<32>>::value
|| platform::is_same<LayoutB, layout::RowMajorInterleaved<64>>::value) {
isBMisaligned = problem_size.k() % kAlignmentB;
}
if (platform::is_same<LayoutC, layout::RowMajor>::value) {
isCMisaligned = problem_size.n() % kAlignmentC;
} else if (platform::is_same<LayoutC, layout::ColumnMajor>::value) {
isCMisaligned = problem_size.m() % kAlignmentC;
} else if (platform::is_same<LayoutC, layout::ColumnMajorInterleaved<32>>::value
|| platform::is_same<LayoutC, layout::ColumnMajorInterleaved<64>>::value) {
isCMisaligned = problem_size.n() % kAlignmentC;
}
if (isAMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for A operand");
return Status::kErrorMisalignedOperand;
}
if (isBMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for B operand");
return Status::kErrorMisalignedOperand;
}
if (isCMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for C operand");
return Status::kErrorMisalignedOperand;
}
CUTLASS_TRACE_HOST(" returning kSuccess");
return Status::kSuccess;
}
/// Determines whether the GEMM problem satisfies this kernel's
/// alignment requirements
static Status can_implement(Arguments const &args) {
return can_implement(args.problem_size);
}
protected:
//
// Device-only utility methods
//
/// Iterator for fetching tile fragments from A
CUTLASS_DEVICE
typename Mma::IteratorA init_iterator_A(
TileWorkDesc &tile_work,
GemmUniversalMode mode)
{
// The input A matrix
ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A);
// Update input pointers based on batched/array mode
if (mode == GemmUniversalMode::kBatched) {
ptr_A += tile_work.tiled_coord.k() * params.batch_stride_A;
}
if (mode == GemmUniversalMode::kArray) {
ptr_A = static_cast<ElementA * const *>(params.ptr_A)[tile_work.tiled_coord.k()];
}
int m_begin = tile_work.tiled_coord.m() * Mma::Shape::kM;
int m_end = params.block_mapping.problem_size.m();
return typename Mma::IteratorA(
params.params_A,
ptr_A,
{ m_end, tile_work.k_end },
threadIdx.x,
{ m_begin, tile_work.k_begin });
}
/// Iterator for fetching tile fragments from B
CUTLASS_DEVICE
typename Mma::IteratorB init_iterator_B(
TileWorkDesc &tile_work,
GemmUniversalMode mode)
{
// The input B matrix
ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B);
// Update input pointers based on batched/array mode
if (mode == GemmUniversalMode::kBatched) {
ptr_B += tile_work.tiled_coord.k() * params.batch_stride_B;
}
if (mode == GemmUniversalMode::kArray) {
ptr_B = static_cast<ElementB * const *>(params.ptr_B)[tile_work.tiled_coord.k()];
}
int n_begin = tile_work.tiled_coord.n() * Mma::Shape::kN;
int n_end = params.block_mapping.problem_size.n();
return typename Mma::IteratorB(
params.params_B,
ptr_B,
{ tile_work.k_end, n_end },
threadIdx.x,
{ tile_work.k_begin, n_begin });
}
CUTLASS_DEVICE
void init_dp_tile_work(
TileWorkDesc &tile_work,
int tile_idx)
{
// The linear tile index
tile_work.tile_idx = tile_idx;
// The first global-scoped MAC-iteration this threadblock will perform for this tile
tile_work.iter_begin = tile_idx * params.block_mapping.iters_per_tile();
// The number of MAC-iterations this threadblock will perform for this tile
tile_work.k_iters_remaining = params.block_mapping.iters_per_tile();
// The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile
tile_work.k_begin = 0;
// The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile
tile_work.k_end = params.block_mapping.problem_size.k();
// The location of this tile (in threadblock-tile coordinates) in the output matrix
tile_work.tiled_coord = params.block_mapping.get_tile_offset(tile_work.tile_idx);
}
CUTLASS_DEVICE
void init_sk_tile_work(
TileWorkDesc &tile_work,
int tile_idx,
int block_iter_begin,
int block_iter_end)
{
// The linear tile index
tile_work.tile_idx = tile_idx;
// The first global-scoped MAC-iteration for this tile
int tile_iter_begin = tile_idx * params.block_mapping.iters_per_tile();
// The first global-scoped MAC-iteration this threadblock will perform for this tile
tile_work.iter_begin = max(block_iter_begin, tile_iter_begin);
// The first tile-scoped MAC-iteration this threadblock will perform for this tile
int k_iter_begin = tile_work.iter_begin - tile_iter_begin;
// The last (one past) tile-scoped MAC-iteration this threadblock will perform for this tile
int k_iter_end = block_iter_end - tile_iter_begin;
// The number of MAC-iterations this threadblock will perform for this tile
tile_work.k_iters_remaining = k_iter_end - k_iter_begin;
// The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile
tile_work.k_begin = k_iter_begin * Mma::Shape::kK;
// The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile
tile_work.k_end = min(
params.block_mapping.problem_size.k(), // extent of k domain
(k_iter_end * Mma::Shape::kK)); // extent of the threadblock's global iteration assignment
// The location of this tile (in threadblock-tile coordinates) in the output matrix
tile_work.tiled_coord = params.block_mapping.get_tile_offset(tile_work.tile_idx);
}
/// Share accumulators with peers
CUTLASS_DEVICE
void share_accumulators(
AccumulatorTile const &accumulator_tile,
int block_idx,
int first_block_idx)
{
AccumulatorTile *accum_tile_workspace = reinterpret_cast<AccumulatorTile *>(params.partials_workspace);
int accum_tile_offset = first_block_idx * kThreadCount;
if (block_idx == first_block_idx)
{
// First peer initializes the workspace partials
BlockStripedReduceT::store(accum_tile_workspace + accum_tile_offset, accumulator_tile, thread_idx);
}
else
{
// Subsequent peers atomically accumulate into the workspace partials
if (ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kAtomic)
{
// Non-deterministic reduction order: wait for the first peer to have initialized the partials before we add to them
Barrier::wait_lt(params.barrier_workspace, thread_idx, first_block_idx, 1);
}
else
{
// Turnstile reduction order: wait until the previous peer has written
int wait_count = block_idx - first_block_idx;
Barrier::wait_eq(params.barrier_workspace, thread_idx, first_block_idx, wait_count);
}
// Perform reduction in workspace
BlockStripedReduceT::reduce(accum_tile_workspace + accum_tile_offset, accumulator_tile, thread_idx);
}
// Signal our arrival
Barrier::arrive_inc(params.barrier_workspace, thread_idx, first_block_idx);
}
/// Acquire accumulators from peers
CUTLASS_DEVICE
void acquire_accumulators(
AccumulatorTile &accumulator_tile,
int block_idx,
int first_block_idx)
{
AccumulatorTile *accum_tile_workspace = reinterpret_cast<AccumulatorTile *>(params.partials_workspace);
// Wait for arrival
int num_carry_in = block_idx - first_block_idx;
Barrier::wait_eq_reset(params.barrier_workspace, thread_idx, first_block_idx, num_carry_in);
// Load and add peer-partials accumulator tile to local accumulator tile
int accum_tile_offset = first_block_idx * kThreadCount;
BlockStripedReduceT::load_add(accumulator_tile, accum_tile_workspace + accum_tile_offset, thread_idx);
}
/// Perform epilogue computations and output
CUTLASS_DEVICE
void do_epilogue(
TileWorkDesc &tile_work,
AccumulatorTile &accumulator_tile)
{
ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C);
ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D);
// Update pointers for batched/array mode(s)
if (params.mode == GemmUniversalMode::kBatched) {
ptr_C += tile_work.tiled_coord.k() * params.batch_stride_C;
ptr_D += tile_work.tiled_coord.k() * params.batch_stride_D;
}
if (params.mode == GemmUniversalMode::kArray) {
ptr_C = static_cast<ElementC * const *>(params.ptr_C)[tile_work.tiled_coord.k()];
ptr_D = static_cast<ElementC * const *>(params.ptr_D)[tile_work.tiled_coord.k()];
}
// Location of this tile in item-coords
MatrixCoord threadblock_item_begin(
tile_work.tiled_coord.m() * Mma::Shape::kM,
tile_work.tiled_coord.n() * Mma::Shape::kN
);
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
ptr_C,
params.block_mapping.problem_size.mn(),
thread_idx,
threadblock_item_begin);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
ptr_D,
params.block_mapping.problem_size.mn(),
thread_idx,
threadblock_item_begin);
// Execute the epilogue operator to update the destination tensor.
epilogue(
EpilogueOutputOp(params.output_op),
iterator_D,
accumulator_tile,
iterator_C);
}
CUTLASS_DEVICE
void separate_reduction(int reduce_idx)
{
int peer_idx_begin, peer_idx_last, reduce_tile_idx, reduce_fragment_idx;
// Reduce by sk-tile (every tile contributed to by one or more blocks)
reduce_tile_idx = reduce_idx / Epilogue::kAccumulatorFragments;
reduce_fragment_idx = reduce_idx % Epilogue::kAccumulatorFragments;
int iter_tile_first = reduce_tile_idx * params.block_mapping.iters_per_tile();
int iter_tile_last = iter_tile_first + params.block_mapping.iters_per_tile() - 1;
peer_idx_begin = params.block_mapping.get_sk_block_idx(iter_tile_first);
peer_idx_last = params.block_mapping.get_sk_block_idx(iter_tile_last);
// Wait for peers to complete
int peer_idx_end = peer_idx_last + 1;
int num_peers = peer_idx_end - peer_idx_begin;
Barrier::wait_eq_reset(
params.barrier_workspace,
thread_idx,
(reduce_tile_idx * Epilogue::kAccumulatorFragments) + reduce_fragment_idx,
num_peers);
/// The location of this tile (in threadblock-tile coordinates) in the output matrix
GemmCoord tiled_coord = params.block_mapping.get_tile_offset(reduce_tile_idx);
// Location of this tile in item-coords
MatrixCoord threadblock_item_begin(
tiled_coord.m() * Mma::Shape::kM,
tiled_coord.n() * Mma::Shape::kN
);
ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C);
ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D);
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
ptr_C,
params.block_mapping.problem_size.mn(),
thread_idx,
threadblock_item_begin);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
ptr_D,
params.block_mapping.problem_size.mn(),
thread_idx,
threadblock_item_begin);
// Execute the epilogue operator to update the destination tensor.
epilogue.reduce(
peer_idx_begin,
peer_idx_end,
reduce_fragment_idx,
params.partials_workspace,
EpilogueOutputOp(params.output_op),
iterator_D,
iterator_C);
}
CUTLASS_DEVICE
void process_tile(
TileWorkDesc tile_work,
int block_idx,
int dp_start_block_idx,
int block_iter_begin)
{
// Initialize input iterators
typename Mma::IteratorA iterator_A = init_iterator_A(tile_work, params.mode);
typename Mma::IteratorB iterator_B = init_iterator_B(tile_work, params.mode);
// Initialize accumulators
AccumulatorTile accumulator_tile;
accumulator_tile.clear();
// Initialize MMA abstraction
Mma mma(
shared_storage.main_loop,
thread_idx,
warp_idx,
lane_idx);
// Perform this tile's range of multiply-accumulate (MAC) iterations
mma(tile_work.k_iters_remaining, accumulator_tile, iterator_A, iterator_B, accumulator_tile);
if ((ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kAtomic) ||
(params.block_mapping.reduction_blocks == 0) ||
(block_idx >= dp_start_block_idx))
{
//
// Cooperative SK peer reduction or DP block
//
int first_block_idx = params.block_mapping.get_first_block_idx(tile_work.tile_idx, block_idx);
if (!tile_work.tile_finished(params)) {
// Non "finishing" SK blocks must share their partial accumulator sums through global scratch workspace
share_accumulators(accumulator_tile, block_idx, first_block_idx);
}
else
{
// DP blocks and "finishing" SK blocks must perform epilogue operations and write the output tile
if (!tile_work.tile_started())
{
// A "finishing" SK block must first aggregate its accumulator partial sums with those shared by peer threadblocks
acquire_accumulators(accumulator_tile, block_idx, first_block_idx);
}
do_epilogue(tile_work, accumulator_tile);
}
}
else
{
//
// Separate peer reduction
//
// Share accumulator partial sums with peer threadblock(s) through scratch workspace
epilogue.share(block_idx, params.partials_workspace, accumulator_tile, tile_work.tile_started());
// Signal arrival
Barrier::arrive_range_inc(
params.barrier_workspace,
thread_idx,
tile_work.tile_idx * Epilogue::kAccumulatorFragments,
Epilogue::kAccumulatorFragments);
}
}
/// Executes one GEMM
CUTLASS_DEVICE
void gemm()
{
// Initialize block's iteration range
int tile_idx = 0;
int block_iter_begin = 0;
int block_iters_remaining = 0;
int block_idx = params.block_mapping.get_block_idx();
int sk_padding_start_block_idx = params.block_mapping.sk_regions() * params.block_mapping.sk_blocks_per_region();
int dp_start_block_idx = params.block_mapping.sk_waves * params.block_mapping.avail_sms;
int reduce_start_block_idx = dp_start_block_idx + params.block_mapping.dp_blocks;
int grid_padding_start_block_idx = reduce_start_block_idx + params.block_mapping.reduction_blocks;
// Initialize tile work descriptor
TileWorkDesc tile_work;
bool dp_block = (block_idx >= dp_start_block_idx) && (block_idx < reduce_start_block_idx);
bool sk_block = (block_idx < sk_padding_start_block_idx);
bool reduce_block = (block_idx >= reduce_start_block_idx) &&
(block_idx < grid_padding_start_block_idx) &&
(ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kMixed);
if (dp_block)
{
// This is a DP block
int dp_block_idx = block_idx - dp_start_block_idx;
int first_dp_tile = (params.block_mapping.cohort_raster) ? 0 : params.block_mapping.sk_tiles;
// Blocks in first DP wave get configured number of tiles
tile_idx = first_dp_tile + dp_block_idx;
int tile_allottment = params.block_mapping.dp_first_wave_tiles;
// Blocks in subsequent DP waves get 1 tile
if (dp_block_idx >= params.block_mapping.avail_sms) {
tile_allottment = 1;
tile_idx += (params.block_mapping.dp_first_wave_tiles - 1) * params.block_mapping.avail_sms;
}
block_iters_remaining = params.block_mapping.iters_per_tile() * tile_allottment;
init_dp_tile_work(tile_work, tile_idx);
// DP blocks exit if out of bounds or overlap an SK tile (only possible during cohort rasterization, where dp_first_wave_tiles must be 1)
if ((tile_idx < params.block_mapping.sk_tiles) ||
(tile_work.tiled_coord.m() >= params.block_mapping.tiled_shape().m()) ||
(tile_work.tiled_coord.n() >= params.block_mapping.tiled_shape().n()))
{
return;
}
}
else if (sk_block)
{
// This is a SK block
int block_iter_end;
params.block_mapping.get_iter_extents(block_idx, block_iter_begin, block_iter_end);
block_iters_remaining = block_iter_end - block_iter_begin;
tile_idx = params.block_mapping.get_sk_tile_idx(block_iter_end - 1);
init_sk_tile_work(tile_work, tile_idx, block_iter_begin, block_iter_begin + block_iters_remaining);
}
else
{
if (reduce_block)
{
// This is a reduction threadblock
int reduce_block_idx = block_idx - reduce_start_block_idx;
separate_reduction(reduce_block_idx);
}
return;
}
// Iteration-processing loop body
CUTLASS_PRAGMA_NO_UNROLL
while (true)
{
// Perform this block's share of work for this tile
process_tile(
tile_work,
block_idx,
dp_start_block_idx,
block_iter_begin);
block_iters_remaining -= tile_work.k_iters_remaining;
if (block_iters_remaining == 0)
{
break;
}
// Continue to next tile
__syncthreads();
if (block_idx >= dp_start_block_idx)
{
// DP block consume their tiles at stride
tile_idx += params.block_mapping.avail_sms;
init_dp_tile_work(tile_work, tile_idx);
}
else
{
// SK blocks consume their tiles in backwards order
tile_idx--;
init_sk_tile_work(tile_work, tile_idx, block_iter_begin, block_iter_begin + block_iters_remaining);
}
}
}
public:
//
// Device-only API
//
// Factory invocation
CUTLASS_DEVICE
static void invoke(
Params const ¶ms,
SharedStorage &shared_storage)
{
GemmUniversalStreamk op(params, shared_storage);
op();
}
// Constructor
CUTLASS_DEVICE
GemmUniversalStreamk(
Params const ¶ms,
SharedStorage &shared_storage)
:
params(params),
shared_storage(shared_storage),
thread_idx(threadIdx.x),
warp_idx(__shfl_sync(0xffffffff, threadIdx.x / 32, 0)), // broadcast the warp_id computed by lane 0 to ensure dependent code
lane_idx(threadIdx.x % 32),
epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx)
{}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()()
{
// Generic SK code path
gemm();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
|
cutlass/include/cutlass/gemm/kernel/gemm_universal_streamk.h/0
|
{
"file_path": "cutlass/include/cutlass/gemm/kernel/gemm_universal_streamk.h",
"repo_id": "cutlass",
"token_count": 15874
}
| 32 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/kernel_hardware_info.hpp"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/dispatch_policy.hpp"
#include "cute/tensor.hpp"
namespace cutlass::gemm::kernel {
///////////////////////////////////////////////////////////////////////////////
template <
class ProblemShape_,
class CollectiveMainloop_,
class CollectiveEpilogue_,
class TileScheduler_
>
class GemmUniversal<
ProblemShape_,
CollectiveMainloop_,
CollectiveEpilogue_,
TileScheduler_,
cute::enable_if_t<cute::is_base_of_v<KernelMultistage, typename CollectiveMainloop_::DispatchPolicy::Schedule>>>
{
public:
//
// Type Aliases
//
using ProblemShape = ProblemShape_;
static_assert(rank(ProblemShape{}) == 3 or rank(ProblemShape{}) == 4,
"ProblemShape{} should be <M,N,K> or <M,N,K,L>");
// Mainloop derived types
using CollectiveMainloop = CollectiveMainloop_;
using TileShape = typename CollectiveMainloop::TileShape;
using TiledMma = typename CollectiveMainloop::TiledMma;
using ArchTag = typename CollectiveMainloop::ArchTag;
using ElementA = typename CollectiveMainloop::ElementA;
using StrideA = typename CollectiveMainloop::StrideA;
using ElementB = typename CollectiveMainloop::ElementB;
using StrideB = typename CollectiveMainloop::StrideB;
using DispatchPolicy = typename CollectiveMainloop::DispatchPolicy;
using ElementAccumulator = typename CollectiveMainloop::ElementAccumulator;
using MainloopArguments = typename CollectiveMainloop::Arguments;
using MainloopParams = typename CollectiveMainloop::Params;
using TileSchedulerTag = TileScheduler_;
using TileScheduler = typename detail::TileSchedulerSelector<
TileScheduler_, ArchTag, TileShape,
cute::Shape<cute::Int<1>, cute::Int<1>, cute::Int<1>>>::Scheduler;
using TileSchedulerArguments = typename TileScheduler::Arguments;
static constexpr bool is_valid_tile_scheduler =
cute::is_void_v<TileScheduler_> or cute::is_same_v<TileScheduler_, PersistentScheduler>;
static_assert(is_valid_tile_scheduler, "SM70 kernel does not support specializing the tile scheduler.");
// Epilogue derived types
using CollectiveEpilogue = CollectiveEpilogue_;
using ElementC = typename CollectiveEpilogue::ElementC;
using StrideC = typename CollectiveEpilogue::StrideC;
using ElementD = typename CollectiveEpilogue::ElementD;
using StrideD = typename CollectiveEpilogue::StrideD;
using EpilogueArguments = typename CollectiveEpilogue::Arguments;
using EpilogueParams = typename CollectiveEpilogue::Params;
static_assert(cute::is_same_v<ElementAccumulator, typename CollectiveEpilogue::ElementAccumulator>,
"Mainloop and epilogue do not agree on accumulator value type.");
// MSVC requires the cast to fix a warning-as-error.
static constexpr int SharedStorageSize = static_cast<int>(cute::max(
sizeof(typename CollectiveMainloop::SharedStorage),
sizeof(typename CollectiveEpilogue::SharedStorage)));
static constexpr uint32_t MaxThreadsPerBlock = CUTE_STATIC_V(cute::size(TiledMma{}));
static constexpr uint32_t MinBlocksPerMultiprocessor = 1;
// Device side arguments
struct Arguments {
GemmUniversalMode mode{};
ProblemShape problem_shape{};
MainloopArguments mainloop{};
EpilogueArguments epilogue{};
KernelHardwareInfo hw_info{};
TileSchedulerArguments scheduler{};
};
// Kernel entry point API
struct Params {
GemmUniversalMode mode{};
ProblemShape problem_shape{};
MainloopParams mainloop{};
EpilogueParams epilogue{};
};
//
// Methods
//
// Convert to underlying arguments. In this case, a simple copy for the aliased type.
static
Params
to_underlying_arguments(Arguments const& args, void* workspace) {
(void) workspace;
KernelHardwareInfo hw_info{args.hw_info.device_id, args.hw_info.sm_count};
auto problem_shape_MNKL = append<4>(args.problem_shape, Int<1>{});
return {
args.mode,
args.problem_shape,
CollectiveMainloop::to_underlying_arguments(args.problem_shape, args.mainloop, workspace),
CollectiveEpilogue::to_underlying_arguments(args.problem_shape, args.epilogue, workspace)
};
}
static bool
can_implement(Arguments const& args) {
bool mode_implementable = args.mode == GemmUniversalMode::kGemm or
(args.mode == GemmUniversalMode::kBatched && rank(ProblemShape{}) == 4);
return mode_implementable && TileScheduler::can_implement(args.scheduler);
}
static size_t
get_workspace_size(Arguments const& args) {
size_t workspace_size = 0;
return workspace_size;
}
static
cutlass::Status
initialize_workspace(Arguments const& args, void* workspace = nullptr, cudaStream_t stream = nullptr,
CudaHostAdapter* cuda_adapter = nullptr) {
cutlass::Status status = Status::kSuccess;
return status;
}
static dim3
get_grid_shape(Params const& params) {
int batch_count = 1;
if constexpr (cute::rank(ProblemShape{}) == 4) {
batch_count = cute::size<3>(params.problem_shape);
}
return dim3(
cute::size(cute::ceil_div(cute::shape<0>(params.problem_shape), cute::shape<0>(TileShape{}))),
cute::size(cute::ceil_div(cute::shape<1>(params.problem_shape), cute::shape<1>(TileShape{}))),
batch_count
);
}
static dim3
get_block_shape() {
return dim3(MaxThreadsPerBlock, 1, 1);
}
CUTLASS_DEVICE
void
operator()(Params const& params, char* smem_buf) {
using namespace cute;
using X = Underscore;
// Preconditions
CUTE_STATIC_ASSERT(is_static<TileShape>::value);
// Separate out problem shape for convenience
// Optionally append 1s until problem shape is rank-4 in case its is only rank-3 (MNK)
auto problem_shape_MNKL = append<4>(params.problem_shape, Int<1>{});
auto M = get<0>(problem_shape_MNKL);
auto N = get<1>(problem_shape_MNKL);
auto K = get<2>(problem_shape_MNKL);
auto L = get<3>(problem_shape_MNKL);
// Preconditions
static_assert(cute::rank(StrideA{}) == 3, "StrideA must be rank-3: [M, K, L]. If batch mode is not needed, set L stride to Int<0>.");
static_assert(cute::rank(StrideB{}) == 3, "StrideB must be rank-3: [N, K, L]. If batch mode is not needed, set L stride to Int<0>.");
static_assert(cute::rank(StrideC{}) == 3, "StrideC must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>.");
static_assert(cute::rank(StrideD{}) == 3, "StrideD must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>.");
// Get the appropriate blocks for this thread block -- potential for thread block locality
int thread_idx = int(threadIdx.x);
auto blk_shape = TileShape{}; // (BLK_M,BLK_N,BLK_K)
auto [m_coord, n_coord, l_coord] = static_cast<uint3>(blockIdx);
auto blk_coord_mnkl = make_coord(m_coord, n_coord, _, l_coord); // (m,n,k,l)
// Represent the full tensors
Tensor mA_mkl = make_tensor(make_gmem_ptr(params.mainloop.ptr_A), make_shape(M,K,L), params.mainloop.dA); //(m,k,l)
Tensor mB_nkl = make_tensor(make_gmem_ptr(params.mainloop.ptr_B), make_shape(N,K,L), params.mainloop.dB); //(n,k,l)
// Get batch slice
Tensor mA_mk = mA_mkl(_,_,l_coord); // (m,k)
Tensor mB_nk = mB_nkl(_,_,l_coord); // (n,k)
// Slice to get the tiles this thread block is responsible for
Tensor gA = local_tile(mA_mk, blk_shape, take<0,3>(blk_coord_mnkl), Step<_1, X,_1>{}); // (BLK_M,BLK_K,k)
Tensor gB = local_tile(mB_nk, blk_shape, take<0,3>(blk_coord_mnkl), Step< X,_1,_1>{}); // (BLK_N,BLK_K,k)
// Compute tile residues for predication
auto m_max_coord = M - size<0>(gA) * get<0>(blk_coord_mnkl); // M - BLK_M * m_coord
auto n_max_coord = N - size<0>(gB) * get<1>(blk_coord_mnkl); // N - BLK_N * n_coord
auto k_residue = K - size<1>(gA) * size<2>(gA); // K - BLK_K * k_coord_max
auto residue_mnk = make_tuple(m_max_coord, n_max_coord, k_residue);
// Allocate the tiled_mma and the accumulators for the (M,N) blk_shape
TiledMma tiled_mma;
Tensor accumulators = partition_fragment_C(tiled_mma, take<0,2>(blk_shape)); // (MMA,MMA_M,MMA_N)
clear(accumulators);
auto k_tile_iter = cute::make_coord_iterator(shape<2>(gA));
int k_tile_count = size<2>(gA);
// Perform the collective scoped MMA
CollectiveMainloop collective_mma;
collective_mma(
accumulators,
gA,
gB,
accumulators,
k_tile_iter, k_tile_count,
residue_mnk,
thread_idx,
smem_buf
);
// Epilogue and write to gD
CollectiveEpilogue epilogue{params.epilogue};
epilogue(
problem_shape_MNKL,
blk_shape,
blk_coord_mnkl,
accumulators,
tiled_mma,
residue_mnk,
thread_idx,
smem_buf
);
}
};
///////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::gemm::kernel
|
cutlass/include/cutlass/gemm/kernel/sm70_gemm.hpp/0
|
{
"file_path": "cutlass/include/cutlass/gemm/kernel/sm70_gemm.hpp",
"repo_id": "cutlass",
"token_count": 4196
}
| 33 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/semaphore.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma1_, ///! Threadblock-scoped triangular matrix multiply-accumulate (A*B or B*A)
typename Mma2_, ///! Threadblock-scoped triangular matrix multiply-accumulate (AT*B or B*AT)
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_, ///! Threadblock swizzling function
SideMode SideMode_, ///! Side Mode for the kernel (kLeft or kRight)
FillMode FillMode_ ///! Fill Mode for triangular matrix (kLower or kUpper)
>
struct SymmUniversal {
public:
using Mma1 = Mma1_;
using Mma2 = Mma2_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using ElementA = typename Mma1::IteratorA::Element;
using ElementB = typename Mma1::IteratorB::Element;
// Mma1 (TRMM - with diagonal: C_tmp = alpha * A * B)
using LayoutA = typename Mma1::IteratorA::Layout;
using LayoutBT = typename Mma1::IteratorB::Layout;
static ComplexTransform const kMma1TransformA = Mma1::kTransformA;
static ComplexTransform const kMma1TransformB = Mma1::kTransformB;
// Mma2 (TRMM - withOUT diagonal: alpha * AT * B)
using LayoutB = typename Mma2::IteratorA::Layout;
using LayoutAT = typename Mma2::IteratorB::Layout;
static ComplexTransform const kMma2TransformA = Mma2::kTransformA;
static ComplexTransform const kMma2TransformB = Mma2::kTransformB;
// Common type definitions for Mma1 and Mma2
using Operator = typename Mma1::Operator;
using OperatorClass = typename Mma1::Operator::OperatorClass;
using ThreadblockShape = typename Mma1::Shape;
using WarpShape = typename Mma1::Operator::Shape;
using InstructionShape = typename Mma1::Policy::Operator::InstructionShape;
using ArchTag = typename Mma1::ArchTag;
static int const kStages = Mma1::kStages;
static int const kAlignmentA = Mma1::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma1::IteratorB::AccessType::kElements;
// Output related typedefinitions
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename Epilogue::OutputTileIterator::Layout;
static SideMode const kSideModeA = SideMode_;
static FillMode const kFillModeA = FillMode_;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma1::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
//
// Structures
//
/// Argument structure
struct Arguments {
//
// Data members
//
GemmUniversalMode mode = GemmUniversalMode::kGemm;
GemmCoord problem_size{};
int batch_count{1};
typename EpilogueOutputOp::Params epilogue{};
void const * ptr_A{nullptr};
void const * ptr_B{nullptr};
void const * ptr_C{nullptr};
void * ptr_D{nullptr};
int64_t batch_stride_A{0};
int64_t batch_stride_B{0};
int64_t batch_stride_C{0};
int64_t batch_stride_D{0};
typename LayoutA::Stride::Index lda{0};
typename LayoutB::Stride::Index ldb{0};
typename LayoutC::Stride::Index ldc{0};
typename LayoutC::Stride::Index ldd{0};
//
// Methods
//
Arguments() = default;
/// constructs an arguments structure
Arguments(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_count,
typename EpilogueOutputOp::Params epilogue,
void const * ptr_A,
void const * ptr_B,
void const * ptr_C,
void * ptr_D,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C,
int64_t batch_stride_D,
typename LayoutA::Stride::Index lda,
typename LayoutB::Stride::Index ldb,
typename LayoutC::Stride::Index ldc,
typename LayoutC::Stride::Index ldd
):
mode(mode),
problem_size(problem_size),
batch_count(batch_count),
epilogue(epilogue),
ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D),
batch_stride_A(batch_stride_A), batch_stride_B(0),
batch_stride_C(batch_stride_C), batch_stride_D(batch_stride_D),
lda(lda), ldb(ldb), ldc(ldc), ldd(ldd) {
}
/// Returns arguments for the transposed problem sizes
Arguments transposed_problem_size() const {
Arguments args(*this);
std::swap(args.problem_size.m(), args.problem_size.n());
return args;
}
/// Returns arguments for the transposed matrices
Arguments swapped_matrices() const {
Arguments args(*this);
std::swap(args.ptr_A, args.ptr_B);
std::swap(args.lda, args.ldb);
std::swap(args.batch_stride_A, args.batch_stride_B);
return args;
}
};
//
// Structure for precomputing values in host memory and passing to kernels
//
/// Parameters structure
struct Params {
cutlass::gemm::GemmCoord problem_size{};
cutlass::gemm::GemmCoord grid_tiled_shape{};
int swizzle_log_tile{0};
// Mma1 Iterator A and B params
typename Mma1::IteratorA::Params params_A_mma1{};
typename Mma1::IteratorB::Params params_B_mma1{};
// Mma2 Iterator A and B params
typename Mma2::IteratorA::Params params_A_mma2{};
typename Mma2::IteratorB::Params params_B_mma2{};
typename Epilogue::OutputTileIterator::Params params_C{};
typename Epilogue::OutputTileIterator::Params params_D{};
typename EpilogueOutputOp::Params output_op{};
GemmUniversalMode mode = cutlass::gemm::GemmUniversalMode::kGemm;
int batch_count {0};
int gemm_k_size {0};
void * ptr_A{nullptr};
void * ptr_B{nullptr};
void * ptr_C{nullptr};
void * ptr_D{nullptr};
int64_t batch_stride_A {0};
int64_t batch_stride_B {0};
int64_t batch_stride_C {0};
int64_t batch_stride_D {0};
int *semaphore{nullptr};
//
// Methods
//
Params() = default;
CUTLASS_HOST_DEVICE
Params(
Arguments const &args,
cutlass::gemm::GemmCoord const & grid_tiled_shape,
int gemm_k_size,
void *workspace = nullptr
):
problem_size(args.problem_size),
grid_tiled_shape(grid_tiled_shape),
swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)),
params_A_mma1(args.lda),
params_B_mma1(args.ldb),
params_A_mma2(args.lda),
params_B_mma2(args.ldb),
params_C(args.ldc),
params_D(args.ldd),
output_op(args.epilogue),
mode(args.mode),
batch_count(args.batch_count),
gemm_k_size(gemm_k_size),
ptr_A(const_cast<void *>(args.ptr_A)),
ptr_B(const_cast<void *>(args.ptr_B)),
ptr_C(const_cast<void *>(args.ptr_C)),
ptr_D(const_cast<void *>(args.ptr_D)),
batch_stride_A(args.batch_stride_A),
batch_stride_B(args.batch_stride_B),
batch_stride_C(args.batch_stride_C),
batch_stride_D(args.batch_stride_D),
semaphore(static_cast<int *>(workspace)) {
}
CUTLASS_HOST_DEVICE
void update(
Arguments const &args,
void *workspace = nullptr) {
ptr_A = const_cast<void *>(args.ptr_A);
ptr_B = const_cast<void *>(args.ptr_B);
ptr_C = const_cast<void *>(args.ptr_C);
ptr_D = args.ptr_D;
output_op = args.epilogue;
semaphore = static_cast<int *>(workspace);
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma1::SharedStorage mma1_main_loop;
typename Mma2::SharedStorage mma2_main_loop;
typename Epilogue::SharedStorage epilogue;
};
public:
//
// Methods
//
CUTLASS_DEVICE
SymmUniversal() { }
/// Determines whether kernel satisfies alignment
static Status can_implement(
cutlass::gemm::GemmCoord const & problem_size) {
static int const kAlignmentA = Mma1::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma1::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
if ((problem_size.m() % kAlignmentA) || (problem_size.k() % kAlignmentA) ||
(problem_size.n() % kAlignmentB) || (problem_size.k() % kAlignmentB) ||
(problem_size.m() % kAlignmentC) || (problem_size.n() % kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
return Status::kSuccess;
}
static Status can_implement(Arguments const &args) {
return can_implement(args.problem_size);
}
/// Executes two GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
int offset_k = 0;
int problem_size_k = params.problem_size.k();
ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A);
ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B);
//
// Fetch pointers based on mode.
//
if (params.mode == GemmUniversalMode::kGemm ||
params.mode == GemmUniversalMode::kGemmSplitKParallel) {
if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) {
problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size;
}
offset_k = threadblock_tile_offset.k() * params.gemm_k_size;
}
__syncthreads();
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_MxK_mma1{
threadblock_tile_offset.m() * Mma1::Shape::kM,
offset_k,
};
cutlass::MatrixCoord tb_offset_KxN_mma1{
offset_k,
threadblock_tile_offset.n() * Mma1::Shape::kN
};
cutlass::MatrixCoord tb_offset_MxK_mma2{
threadblock_tile_offset.m() * Mma1::Shape::kM,
offset_k,
};
cutlass::MatrixCoord tb_offset_KxN_mma2{
offset_k,
threadblock_tile_offset.n() * Mma1::Shape::kN
};
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = canonical_warp_idx_sync();
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply for Mma1
Mma1 mma1(shared_storage.mma1_main_loop, thread_idx, warp_idx, lane_idx);
// Construct thread-scoped matrix multiply for Mma2
Mma2 mma2(shared_storage.mma2_main_loop, thread_idx, warp_idx, lane_idx);
typename Mma1::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size_k - offset_k + Mma1::Shape::kK - 1) / Mma1::Shape::kK;
int gemm_k_iterations_mma1 = gemm_k_iterations;
int gemm_k_iterations_mma2 = gemm_k_iterations;
/******************************************************************************************************
* SYMM (Side Mode, Fill Mode) is made of two TRMMs:
First TRMM (Mma1: Side Mode, Fill Mode, Non-Unit Diag): (A * B) or (B * A)
Second TRMM (Mma2: Side Mode, Inverted Fill Mode, Unit Diag): (AT * B) or (B * AT)
* For the first TRMM (Mma1) of SYMM, the following method is used to calculate the k-iterations:
First two cases: (Left Side, Lower Fill) and (Right Side, Upper Fill) are transpose of each other
- (Left Side, Lower Fill): calculate bottom of the CTA tile, then find the k-iterations
needed to process all elements till that coordinate.
- (Right Side, Upper Fill): calculate right end of the CTA tile, then find the k-iterations
needed to process all elements till that coordinate.
Last two cases: (Left Side, Upper Fill) and (Right Side, Lower Fill) are transpose of each other
- (Left Side, Upper Fill): calculate the top of the CTA tile, then find k-iterations
that can be skipped for all elements of this tile.
- (Right Side, Lower Fill): calculate the left start of the CTA tile, then find k-iterations
that can be skipped for all elements of this tile.
* For the second TRMM (Mma2) of SYMM, the k-iterations and threadblock offsets are calculated
the same way as the first TRMM (Mma1) of same side mode but with inverted fill mode.
For example, if the first TRMM is left sided with lower fill, the second TRMM would be
left sided with upper fill.
********************************************************************************************************/
if (kSideModeA == SideMode::kLeft && kFillModeA == FillMode::kLower) {
int k_iterations_till_diagonal_mma1 = ((threadblock_tile_offset.m() + 1) * Mma1::Shape::kM + Mma1::Shape::kK - 1) / Mma1::Shape::kK;
if (k_iterations_till_diagonal_mma1 < gemm_k_iterations) {
gemm_k_iterations_mma1 = k_iterations_till_diagonal_mma1;
}
int k_iterations_till_diagonal_mma2 = ((threadblock_tile_offset.m()) * Mma1::Shape::kM) / Mma1::Shape::kK;
if (k_iterations_till_diagonal_mma2 != 0) {
tb_offset_MxK_mma2 += cutlass::MatrixCoord({0, k_iterations_till_diagonal_mma2 * Mma1::Shape::kK});
tb_offset_KxN_mma2 += cutlass::MatrixCoord({k_iterations_till_diagonal_mma2 * Mma1::Shape::kK, 0});
gemm_k_iterations_mma2 -= k_iterations_till_diagonal_mma2;
}
} else if (kSideModeA == SideMode::kRight && kFillModeA == FillMode::kUpper) {
int k_iterations_till_diagonal_mma1 = ((threadblock_tile_offset.n() + 1) * Mma1::Shape::kN + Mma1::Shape::kK - 1) / Mma1::Shape::kK;
if (k_iterations_till_diagonal_mma1 < gemm_k_iterations) {
gemm_k_iterations_mma1 = k_iterations_till_diagonal_mma1;
}
int k_iterations_till_diagonal_mma2 = ((threadblock_tile_offset.n()) * Mma1::Shape::kN) / Mma1::Shape::kK;
if (k_iterations_till_diagonal_mma2 != 0) {
tb_offset_MxK_mma2 += cutlass::MatrixCoord({0, k_iterations_till_diagonal_mma2 * Mma1::Shape::kK});
tb_offset_KxN_mma2 += cutlass::MatrixCoord({k_iterations_till_diagonal_mma2 * Mma1::Shape::kK, 0});
gemm_k_iterations_mma2 -= k_iterations_till_diagonal_mma2;
}
} else if (kSideModeA == SideMode::kLeft && kFillModeA == FillMode::kUpper) {
int k_iterations_till_diagonal_mma1 = ((threadblock_tile_offset.m()) * Mma1::Shape::kM) / Mma1::Shape::kK;
if (k_iterations_till_diagonal_mma1 != 0) {
tb_offset_MxK_mma1 += cutlass::MatrixCoord({0, k_iterations_till_diagonal_mma1 * Mma1::Shape::kK});
tb_offset_KxN_mma1 += cutlass::MatrixCoord({k_iterations_till_diagonal_mma1 * Mma1::Shape::kK, 0});
gemm_k_iterations_mma1 -= k_iterations_till_diagonal_mma1;
}
int k_iterations_till_diagonal_mma2 = ((threadblock_tile_offset.m() + 1) * Mma1::Shape::kM + Mma1::Shape::kK - 1) / Mma1::Shape::kK;
if (k_iterations_till_diagonal_mma2 < gemm_k_iterations) {
gemm_k_iterations_mma2 = k_iterations_till_diagonal_mma2;
}
} else if (kSideModeA == SideMode::kRight && kFillModeA == FillMode::kLower) {
int k_iterations_till_diagonal_mma1 = ((threadblock_tile_offset.n()) * Mma1::Shape::kN) / Mma1::Shape::kK;
if (k_iterations_till_diagonal_mma1 != 0) {
tb_offset_MxK_mma1 += cutlass::MatrixCoord({0, k_iterations_till_diagonal_mma1 * Mma1::Shape::kK});
tb_offset_KxN_mma1 += cutlass::MatrixCoord({k_iterations_till_diagonal_mma1 * Mma1::Shape::kK, 0});
gemm_k_iterations_mma1 -= k_iterations_till_diagonal_mma1;
}
int k_iterations_till_diagonal_mma2 = ((threadblock_tile_offset.n() + 1) * Mma1::Shape::kN + Mma1::Shape::kK - 1) / Mma1::Shape::kK;
if (k_iterations_till_diagonal_mma2 < gemm_k_iterations) {
gemm_k_iterations_mma2 = k_iterations_till_diagonal_mma2;
}
}
// Construct iterators to A and B operands for Mma1
typename Mma1::IteratorA iterator_A_mma1(
params.params_A_mma1,
ptr_A,
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_MxK_mma1);
typename Mma1::IteratorB iterator_B_mma1(
params.params_B_mma1,
ptr_B,
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_KxN_mma1);
// Construct iterators to A and B operands for Mma2
typename Mma2::IteratorA iterator_A_mma2(
params.params_A_mma2,
ptr_A,
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_MxK_mma2);
typename Mma2::IteratorB iterator_B_mma2(
params.params_B_mma2,
ptr_B,
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_KxN_mma2);
// Compute threadblock-scoped matrix multiply-add (A x B) or (B x A)
mma1(
gemm_k_iterations_mma1,
accumulators,
iterator_A_mma1,
iterator_B_mma1,
accumulators);
// Compute threadblock-scoped matrix multiply-add (AT x B) or (B x AT)
mma2(
gemm_k_iterations_mma2,
accumulators,
iterator_A_mma2,
iterator_B_mma2,
accumulators);
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma1::Shape::kM,
threadblock_tile_offset.n() * Mma1::Shape::kN
);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C);
ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D);
//
// Fetch pointers based on mode.
//
// Construct the semaphore.
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
if (params.mode == GemmUniversalMode::kGemm) {
// If performing a reduction via split-K, fetch the initial synchronization
if (params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
}
}
else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) {
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_C += threadblock_tile_offset.k() * params.batch_stride_C;
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_C = static_cast<ElementC * const *>(params.ptr_C)[threadblock_tile_offset.k()];
ptr_D = static_cast<ElementC * const *>(params.ptr_D)[threadblock_tile_offset.k()];
}
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
ptr_C,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
ptr_D,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Wait on the semaphore - this latency may have been covered by iterator construction
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_offset.k()) {
iterator_C = iterator_D;
}
semaphore.wait(threadblock_tile_offset.k());
__threadfence();
}
// Execute the epilogue operator to update the destination tensor.
epilogue(
output_op,
iterator_D,
accumulators,
iterator_C);
//
// Release the semaphore
//
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_offset.k() + 1;
}
semaphore.release(lock);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
|
cutlass/include/cutlass/gemm/kernel/symm_universal.h/0
|
{
"file_path": "cutlass/include/cutlass/gemm/kernel/symm_universal.h",
"repo_id": "cutlass",
"token_count": 9434
}
| 34 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines basic properties needed by CTA-level GEMMs assuming
expectations about data layout of the global memory fragments, data types,
and internal tile sizes.
Partial specializations for threadblock::Mma operations targeting sparse
TensorOp instructions.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/layout/tensor_op_multiplicand_sm75.h"
#include "cutlass/layout/tensor_op_multiplicand_sm80.h"
#include "cutlass/gemm/warp/mma_simt_policy.h"
#include "cutlass/gemm/warp/mma_simt.h"
#include "cutlass/gemm/warp/default_mma_sparse_tensor_op.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h"
#include "cutlass/gemm/threadblock/default_mma_core.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op_sm80.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear.h"
#include "cutlass/gemm/threadblock/mma_sparse_multistage.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Template defininng default matrix multiply operators inferred from threadblock tile size,
/// global memory data layout, and target math instruction.
template <
/// Shape of threadblock-scoped matrix multiply operator
typename Shape,
/// Shape of warp-level matrix multiply operator
typename WarpShape,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape,
/// Element data type of A operand
typename ElementA,
/// Layout of operand A
typename LayoutA,
/// Element data type of B operand
typename ElementB,
/// Layout of operand B
typename LayoutB,
/// Data type of accumulator
typename ElementC,
/// Layout of accumulator
typename LayoutC,
/// Indicates type of math operator (arch::OpClassSimt or arch::OpClassTensorOp)
typename OperatorClass,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator = typename platform::conditional<
(platform::is_same<OperatorClass,
cutlass::arch::OpClassTensorOp>::value) &&
(platform::is_same<ElementA, int8_t>::value ||
platform::is_same<ElementA, int4b_t>::value ||
platform::is_same<ElementA, uint8_t>::value ||
platform::is_same<ElementA, uint4b_t>::value),
cutlass::arch::OpMultiplyAddSaturate,
cutlass::arch::OpMultiplyAdd>::type,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor = false
/// Cache operation of operand A
, cutlass::arch::CacheOperation::Kind CacheOpA =
cutlass::arch::CacheOperation::Global,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB =
cutlass::arch::CacheOperation::Global
>
struct DefaultSparseMmaCore;
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: column-major
/// B: row-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultSparseMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::ColumnMajor, ElementB_, layout::RowMajor,
ElementC_, LayoutC_, arch::OpClassTensorOp, Stages,
Operator_, false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::ColumnMajor;
using ElementB = ElementB_;
using LayoutB = layout::RowMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB;
static int const kSparse = 2;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
/// Default Operator
using Operator = Operator_;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous<
sizeof_bits<ElementA>::value, int(128 / sizeof(ElementA))>;
// Shared memory layout
using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous<
sizeof_bits<ElementB>::value, int(128 / sizeof(ElementB))>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK / kSparse>, kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK / kSparse>, ElementA, SmemLayoutA, 1,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp<
WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK>::Type;
/// Cache operation of operand E
static cutlass::arch::CacheOperation::Kind const kCacheOpE =
cutlass::arch::CacheOperation::Global;
static int const kInterleavedE = MmaTensorOp::kInterleaved;
static int const kMetaSizeInBits = MmaTensorOp::kMetaSizeInBits;
static int const kMaxID2 = MmaTensorOp::kMaxID2;
static int const kElementsPerElementE = MmaTensorOp::kElementsPerElementE;
using ElementE = typename MmaTensorOp::ElementE;
using GmemLayoutE = cutlass::layout::ColumnMajorInterleaved<kInterleavedE>;
// Shared memory layout. Interleaved layout is mapped to PitchLinear layout.
using SmemLayoutE = typename MmaTensorOp::LayoutE;
/// ThreadMap of iterator E
static int const kElementsPerAccessE =
kAccessSizeInBits / sizeof_bits<ElementE>::value;
/// E is tiny. Not all warps are needed.
static int const kThreadsE =
(Shape::kM * Shape::kK / kSparse / kElementsPerElementE /
(kAccessSizeInBits / sizeof_bits<ElementE>::value) >
kThreads)
? kThreads
: (Shape::kM * Shape::kK / kSparse / kElementsPerElementE /
(kAccessSizeInBits / sizeof_bits<ElementE>::value));
using IteratorThreadMapE = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kM * kInterleavedE,
Shape::kK / kSparse / kElementsPerElementE /
kInterleavedE>,
kThreadsE, kElementsPerAccessE>;
/// Shared memory iterator to E operand
using SmemIteratorE = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM * kInterleavedE,
Shape::kK / kSparse / kElementsPerElementE / kInterleavedE>,
ElementE, SmemLayoutE, 0, IteratorThreadMapE>;
/// Policy used to define MmaPipelined
using MmaPolicy =
SparseMmaPolicy<MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: row-major
/// B: column-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultSparseMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::RowMajor, ElementB_, layout::ColumnMajor,
ElementC_, LayoutC_, arch::OpClassTensorOp, Stages,
Operator_, false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB;
static int const kSparse = 2;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
/// Default Operator
using Operator = Operator_;
// Warp thread arrangement
static int const kWarpThreadArrangementContiguousA =
Shape::kK / kSparse / (kAccessSizeInBits / sizeof_bits<ElementA>::value);
static int const kWarpThreadArrangementStridedA =
kWarpSize / kWarpThreadArrangementContiguousA;
// crosswise cannot be larger than 1024 bit.
static int const kCrosswiseB =
(Shape::kK > (1024 / sizeof_bits<ElementB>::value))
? (1024 / sizeof_bits<ElementB>::value)
: Shape::kK;
static int const kWarpThreadArrangementContiguousB =
kCrosswiseB / (kAccessSizeInBits / sizeof_bits<ElementB>::value);
static int const kWarpThreadArrangementStridedB =
kWarpSize / kWarpThreadArrangementContiguousB;
//
// Shared memory layouts
//
using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise<
sizeof_bits<ElementA>::value, Shape::kK / kSparse>;
// Shared memory layout
using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise<
sizeof_bits<ElementB>::value, kCrosswiseB>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK / kSparse, Shape::kM>, kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousA,
kWarpThreadArrangementStridedA>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK / kSparse>, ElementA, SmemLayoutA, 0,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousB,
kWarpThreadArrangementStridedB>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp<
WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK>::Type;
/// Cache operation of operand E
static cutlass::arch::CacheOperation::Kind const kCacheOpE =
cutlass::arch::CacheOperation::Global;
static int const kInterleavedE = MmaTensorOp::kInterleaved;
static int const kMetaSizeInBits = MmaTensorOp::kMetaSizeInBits;
static int const kMaxID2 = MmaTensorOp::kMaxID2;
static int const kElementsPerElementE = MmaTensorOp::kElementsPerElementE;
using ElementE = typename MmaTensorOp::ElementE;
using GmemLayoutE = cutlass::layout::ColumnMajorInterleaved<kInterleavedE>;
// Shared memory layout. Interleaved layout is mapped to PitchLinear layout.
using SmemLayoutE = typename MmaTensorOp::LayoutE;
/// ThreadMap of iterator E
static int const kElementsPerAccessE =
kAccessSizeInBits / sizeof_bits<ElementE>::value;
/// E is tiny. Not all warps are needed.
static int const kThreadsE =
(Shape::kM * Shape::kK / kSparse / kElementsPerElementE /
(kAccessSizeInBits / sizeof_bits<ElementE>::value) >
kThreads)
? kThreads
: (Shape::kM * Shape::kK / kSparse / kElementsPerElementE /
(kAccessSizeInBits / sizeof_bits<ElementE>::value));
using IteratorThreadMapE = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kM * kInterleavedE,
Shape::kK / kSparse / kElementsPerElementE /
kInterleavedE>,
kThreadsE, kElementsPerAccessE>;
/// Shared memory iterator to E operand
using SmemIteratorE = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM * kInterleavedE,
Shape::kK / kSparse / kElementsPerElementE / kInterleavedE>,
ElementE, SmemLayoutE, 0, IteratorThreadMapE>;
/// Policy used to define MmaPipelined
using MmaPolicy =
SparseMmaPolicy<MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: column-major
/// B: column-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultSparseMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::ColumnMajor, ElementB_, layout::ColumnMajor,
ElementC_, LayoutC_, arch::OpClassTensorOp, Stages,
Operator_, false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::ColumnMajor;
using ElementB = ElementB_;
using LayoutB = layout::ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB;
static int const kSparse = 2;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
/// Default Operator
using Operator = Operator_;
// Warp thread arrangement
// crosswise cannot be larger than 1024 bit.
static int const kCrosswiseB =
(Shape::kK > (1024 / sizeof_bits<ElementB>::value))
? (1024 / sizeof_bits<ElementB>::value)
: Shape::kK;
static int const kWarpThreadArrangementContiguousB =
kCrosswiseB / (kAccessSizeInBits / sizeof_bits<ElementB>::value);
static int const kWarpThreadArrangementStridedB =
kWarpSize / kWarpThreadArrangementContiguousB;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous<
sizeof_bits<ElementA>::value, int(128 / sizeof(ElementA))>;
// Shared memory layout
using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise<
sizeof_bits<ElementB>::value, kCrosswiseB>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK / kSparse>, kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK / kSparse>, ElementA, SmemLayoutA, 1,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousB,
kWarpThreadArrangementStridedB>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp<
WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK>::Type;
/// Cache operation of operand E
static cutlass::arch::CacheOperation::Kind const kCacheOpE =
cutlass::arch::CacheOperation::Global;
static int const kInterleavedE = MmaTensorOp::kInterleaved;
static int const kMetaSizeInBits = MmaTensorOp::kMetaSizeInBits;
static int const kMaxID2 = MmaTensorOp::kMaxID2;
static int const kElementsPerElementE = MmaTensorOp::kElementsPerElementE;
using ElementE = typename MmaTensorOp::ElementE;
using GmemLayoutE = cutlass::layout::ColumnMajorInterleaved<kInterleavedE>;
// Shared memory layout. Interleaved layout is mapped to PitchLinear layout.
using SmemLayoutE = typename MmaTensorOp::LayoutE;
/// ThreadMap of iterator E
static int const kElementsPerAccessE =
kAccessSizeInBits / sizeof_bits<ElementE>::value;
/// E is tiny. Not all warps are needed.
static int const kThreadsE =
(Shape::kM * Shape::kK / kSparse / kElementsPerElementE /
(kAccessSizeInBits / sizeof_bits<ElementE>::value) >
kThreads)
? kThreads
: (Shape::kM * Shape::kK / kSparse / kElementsPerElementE /
(kAccessSizeInBits / sizeof_bits<ElementE>::value));
using IteratorThreadMapE = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kM * kInterleavedE,
Shape::kK / kSparse / kElementsPerElementE /
kInterleavedE>,
kThreadsE, kElementsPerAccessE>;
/// Shared memory iterator to E operand
using SmemIteratorE = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM * kInterleavedE,
Shape::kK / kSparse / kElementsPerElementE / kInterleavedE>,
ElementE, SmemLayoutE, 0, IteratorThreadMapE>;
/// Policy used to define MmaPipelined
using MmaPolicy =
SparseMmaPolicy<MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: row-major
/// B: row-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultSparseMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::RowMajor, ElementB_, layout::RowMajor, ElementC_,
LayoutC_, arch::OpClassTensorOp, Stages, Operator_,
false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::RowMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB;
static int const kSparse = 2;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
/// Default Operator
using Operator = Operator_;
// Warp thread arrangement
static int const kWarpThreadArrangementContiguousA =
Shape::kK / kSparse / (kAccessSizeInBits / sizeof_bits<ElementA>::value);
static int const kWarpThreadArrangementStridedA =
kWarpSize / kWarpThreadArrangementContiguousA;
//
// Shared memory layouts
//
using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise<
sizeof_bits<ElementA>::value, Shape::kK / kSparse>;
// Shared memory layout
using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous<
sizeof_bits<ElementB>::value, int(128 / sizeof(ElementB))>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK / kSparse, Shape::kM>, kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousA,
kWarpThreadArrangementStridedA>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK / kSparse>, ElementA, SmemLayoutA, 0,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp<
WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK>::Type;
/// Cache operation of operand E
static cutlass::arch::CacheOperation::Kind const kCacheOpE =
cutlass::arch::CacheOperation::Global;
static int const kInterleavedE = MmaTensorOp::kInterleaved;
static int const kMetaSizeInBits = MmaTensorOp::kMetaSizeInBits;
static int const kMaxID2 = MmaTensorOp::kMaxID2;
static int const kElementsPerElementE = MmaTensorOp::kElementsPerElementE;
using ElementE = typename MmaTensorOp::ElementE;
using GmemLayoutE = cutlass::layout::ColumnMajorInterleaved<kInterleavedE>;
// Shared memory layout. Interleaved layout is mapped to PitchLinear layout.
using SmemLayoutE = typename MmaTensorOp::LayoutE;
/// ThreadMap of iterator E
static int const kElementsPerAccessE =
kAccessSizeInBits / sizeof_bits<ElementE>::value;
/// E is tiny. Not all warps are needed.
static int const kThreadsE =
(Shape::kM * Shape::kK / kSparse / kElementsPerElementE /
(kAccessSizeInBits / sizeof_bits<ElementE>::value) >
kThreads)
? kThreads
: (Shape::kM * Shape::kK / kSparse / kElementsPerElementE /
(kAccessSizeInBits / sizeof_bits<ElementE>::value));
using IteratorThreadMapE = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kM * kInterleavedE,
Shape::kK / kSparse / kElementsPerElementE /
kInterleavedE>,
kThreadsE, kElementsPerAccessE>;
/// Shared memory iterator to E operand
using SmemIteratorE = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM * kInterleavedE,
Shape::kK / kSparse / kElementsPerElementE / kInterleavedE>,
ElementE, SmemLayoutE, 0, IteratorThreadMapE>;
/// Policy used to define MmaPipelined
using MmaPolicy =
SparseMmaPolicy<MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
|
cutlass/include/cutlass/gemm/threadblock/default_mma_core_sparse_sm80.h/0
|
{
"file_path": "cutlass/include/cutlass/gemm/threadblock/default_mma_core_sparse_sm80.h",
"repo_id": "cutlass",
"token_count": 11445
}
| 35 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Implements several possible threadblock-swizzling functions mapping blockIdx to
GEMM problems.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/platform/platform.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/conv3d_problem_size.h"
#include "cutlass/gemm/threadblock/index_remat.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle_streamk.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Threadblock swizzling function for GEMMs
template <int N = 1>
struct GemmIdentityThreadblockSwizzle {
CUTLASS_HOST_DEVICE
GemmIdentityThreadblockSwizzle() { }
/// Returns the shape of the problem in units of logical tiles
/// *Gemm* problem size: gemm(M, N, K)
CUTLASS_HOST_DEVICE
static GemmCoord get_tiled_shape(
GemmCoord problem_size,
GemmCoord tile_size,
int split_k_slices) {
return GemmCoord(
(problem_size.m() + tile_size.m() - 1) / tile_size.m(),
(problem_size.n() + tile_size.n() - 1) / tile_size.n(),
split_k_slices);
}
/// Returns the shape of the problem in units of logical tiles
/// *ImplicitGemm* Conv2d problem size: conv_operator(NPQK, NHWC, KRSC)
CUTLASS_HOST_DEVICE
static GemmCoord get_tiled_shape(
cutlass::conv::Operator conv_operator,
cutlass::conv::Conv2dProblemSize const &problem_size,
GemmCoord tile_size,
int split_k_slices) {
gemm::GemmCoord implicit_gemm_problem_size =
cutlass::conv::implicit_gemm_problem_size(conv_operator, problem_size);
return get_tiled_shape(
implicit_gemm_problem_size, tile_size, split_k_slices);
}
/// Returns the shape of the problem in units of logical tiles
/// *ImplicitGemm* Conv3d problem size: conv_operator(NZPQK, NDHWC, KTRSC)
CUTLASS_HOST_DEVICE
static GemmCoord get_tiled_shape(
cutlass::conv::Operator conv_operator,
cutlass::conv::Conv3dProblemSize const &problem_size,
GemmCoord tile_size,
int split_k_slices) {
gemm::GemmCoord implicit_gemm_problem_size =
cutlass::conv::implicit_gemm_problem_size(conv_operator, problem_size);
return get_tiled_shape(
implicit_gemm_problem_size, tile_size, split_k_slices);
}
/// Computes CUDA grid dimensions given a size in units of logical tiles
CUTLASS_HOST_DEVICE
static dim3 get_grid_shape(GemmCoord tiled_shape) {
int tile = 1 << get_log_tile(tiled_shape);
return dim3(tiled_shape.m() * tile, (tiled_shape.n() + tile - 1) / tile, tiled_shape.k());
}
/// Calculates optimal swizzle width
CUTLASS_HOST_DEVICE
static int get_log_tile(GemmCoord tiled_shape) {
auto n = tiled_shape.n();
// Thresholds picked so that it doesn't cause too many no-op CTAs
if (N >= 8 && n >= 6)
return 3;
else if (N >= 4 && n >= 3)
return 2;
else if (N >= 2 && n >= 2)
return 1;
else
return 0;
}
/// Obtains the threadblock offset (in units of threadblock-scoped tiles)
CUTLASS_DEVICE
static GemmCoord get_tile_offset(int log_tile) {
int block_idx_x = RematerializeBlockIdxX();
int block_idx_y = RematerializeBlockIdxY();
int block_idx_z = RematerializeBlockIdxZ();
return GemmCoord{(block_idx_x >> log_tile), //
(block_idx_y << log_tile) + ((block_idx_x) & ((1 << (log_tile)) - 1)),
block_idx_z};
}
/// Obtains the threadblock offset (in units of threadblock-scoped tiles)
CUTLASS_DEVICE
static GemmCoord get_tile_offset(GemmCoord tiled_shape) {
int const kTile = N;
int block_idx_x = RematerializeBlockIdxX();
int block_idx_y = RematerializeBlockIdxY();
if ((tiled_shape.m() < kTile) || (tiled_shape.n() < kTile))
return GemmCoord{block_idx_x, block_idx_y, RematerializeBlockIdxZ()};
return GemmCoord{
(block_idx_x / kTile),
(block_idx_y * kTile) + (block_idx_x % kTile),
RematerializeBlockIdxZ()
};
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Threadblock swizzling function for GEMMs
struct GemmHorizontalThreadblockSwizzle {
CUTLASS_HOST_DEVICE
GemmHorizontalThreadblockSwizzle() { }
/// Returns the shape of the problem in units of logical tiles
CUTLASS_HOST_DEVICE
static GemmCoord get_tiled_shape(
GemmCoord problem_size,
GemmCoord tile_size,
int split_k_slices) {
return GemmCoord(
(problem_size.m() + tile_size.m() - 1) / tile_size.m(),
(problem_size.n() + tile_size.n() - 1) / tile_size.n(),
split_k_slices);
}
/// Computes CUDA grid dimensions given a size in units of logical tiles
CUTLASS_HOST_DEVICE
static dim3 get_grid_shape(GemmCoord tiled_shape) {
return dim3(tiled_shape.n(), tiled_shape.m(), tiled_shape.k());
}
/// Calculates optimal swizzle width
CUTLASS_HOST_DEVICE
static int get_log_tile(GemmCoord tiled_shape) {
return 0;
}
/// Obtains the threadblock offset (in units of threadblock-scoped tiles)
CUTLASS_DEVICE
static GemmCoord get_tile_offset(GemmCoord tiled_shape) {
return GemmCoord{
RematerializeBlockIdxY(),
RematerializeBlockIdxX(),
RematerializeBlockIdxZ()
};
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Threadblock swizzling function for batched GEMMs
struct GemmBatchedIdentityThreadblockSwizzle {
/// Returns the shape of the problem in units of logical tiles
CUTLASS_HOST_DEVICE
static GemmCoord get_tiled_shape(
GemmCoord problem_size,
GemmCoord tile_size,
int batch_count) {
return GemmCoord(
(problem_size.m() + tile_size.m() - 1) / tile_size.m(),
(problem_size.n() + tile_size.n() - 1) / tile_size.n(),
batch_count % (1 << 16));
}
/// Computes CUDA grid dimensions given a size in units of logical tiles
CUTLASS_HOST_DEVICE
static dim3 get_grid_shape(GemmCoord tiled_shape) {
return dim3(tiled_shape.m(), tiled_shape.n(), tiled_shape.k());
}
/// Calculates optimal swizzle width
CUTLASS_HOST_DEVICE
static int get_log_tile(GemmCoord tiled_shape) {
return 0;
}
/// Obtains the threadblock offset (in units of threadblock-scoped tiles)
CUTLASS_DEVICE
static GemmCoord get_tile_offset(GemmCoord tiled_shape) {
return GemmCoord{
RematerializeBlockIdxX(),
RematerializeBlockIdxY(),
RematerializeBlockIdxZ()
};
}
/// Obtains the threadblock offset (in units of threadblock-scoped tiles)
CUTLASS_DEVICE
static GemmCoord get_tile_offset(int log_tile) {
int block_idx_x = RematerializeBlockIdxX();
int block_idx_y = RematerializeBlockIdxY();
int block_idx_z = RematerializeBlockIdxZ();
return GemmCoord{(block_idx_x >> log_tile), //
(block_idx_y << log_tile) + ((block_idx_x) & ((1 << (log_tile)) - 1)),
block_idx_z};
}
/// Gets the batch index
CUTLASS_DEVICE
static int get_batch_idx() {
return RematerializeBlockIdxZ();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Threadblock swizzling function for split-K GEMMs
template <int N = 1>
struct GemmSplitKIdentityThreadblockSwizzle {
int const kTile = N;
/// Returns the shape of the problem in units of logical tiles
CUTLASS_HOST_DEVICE
static GemmCoord get_tiled_shape(
GemmCoord problem_size,
GemmCoord tile_size,
int partitions) {
return GemmCoord(
(problem_size.m() + tile_size.m() - 1) / tile_size.m(),
(problem_size.n() + tile_size.n() - 1) / tile_size.n(),
partitions);
}
/// Calculates optimal swizzle width
CUTLASS_HOST_DEVICE
static int get_log_tile(GemmCoord tiled_shape) {
auto n = tiled_shape.n();
// Thresholds picked so that it doesn't cause too many no-op CTAs
if (N >= 8 && n >= 6)
return 3;
else if (N >= 4 && n >= 3)
return 2;
else if (N >= 2 && n >= 2)
return 1;
else
return 0;
}
/// Computes CUDA grid dimensions given a size in units of logical tiles
CUTLASS_HOST_DEVICE
static dim3 get_grid_shape(GemmCoord tiled_shape) {
int tile = 1 << get_log_tile(tiled_shape);
return dim3(tiled_shape.m() * tile, (tiled_shape.n() + tile - 1) / tile, tiled_shape.k());
}
/// Obtains the threadblock offset (in units of threadblock-scoped tiles)
CUTLASS_DEVICE
static GemmCoord get_tile_offset(int log_tile) {
int block_idx_x = RematerializeBlockIdxX();
int block_idx_y = RematerializeBlockIdxY();
int block_idx_z = RematerializeBlockIdxZ();
return GemmCoord{(block_idx_x >> log_tile), //
(block_idx_y << log_tile) + ((block_idx_x) & ((1 << (log_tile)) - 1)),
block_idx_z};
}
/// Obtains the threadblock offset (in units of threadblock-scoped tiles)
CUTLASS_DEVICE
static GemmCoord get_tile_offset(GemmCoord tiled_shape) {
int const kTile = N;
int block_idx_x = RematerializeBlockIdxX();
int block_idx_y = RematerializeBlockIdxY();
if ((tiled_shape.m() < kTile) || (tiled_shape.n() < kTile))
return GemmCoord{block_idx_x, block_idx_y, RematerializeBlockIdxZ()};
return GemmCoord{
(block_idx_x / kTile),
(block_idx_y * kTile) + (block_idx_x % kTile),
RematerializeBlockIdxZ()
};
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Threadblock swizzling function for split-K GEMMs
struct GemmSplitKHorizontalThreadblockSwizzle {
/// Returns the shape of the problem in units of logical tiles
CUTLASS_HOST_DEVICE
static GemmCoord get_tiled_shape(
GemmCoord problem_size,
GemmCoord tile_size,
int partitions) {
return GemmCoord(
(problem_size.m() + tile_size.m() - 1) / tile_size.m(),
(problem_size.n() + tile_size.n() - 1) / tile_size.n(),
partitions);
}
/// Computes CUDA grid dimensions given a size in units of logical tiles
CUTLASS_HOST_DEVICE
static dim3 get_grid_shape(GemmCoord tiled_shape) {
return dim3(tiled_shape.n(), tiled_shape.m(), tiled_shape.k());
}
/// Calculates optimal swizzle width
CUTLASS_HOST_DEVICE
static int get_log_tile(GemmCoord tiled_shape) {
return 0;
}
/// Obtains the threadblock offset (in units of threadblock-scoped tiles)
CUTLASS_DEVICE
static GemmCoord get_tile_offset(int log_tile) {
return GemmCoord{
RematerializeBlockIdxY(),
RematerializeBlockIdxX(),
RematerializeBlockIdxZ()
};
}
/// Obtains the threadblock offset (in units of threadblock-scoped tiles)
CUTLASS_DEVICE
static GemmCoord get_tile_offset(GemmCoord tiled_shape) {
return GemmCoord{
RematerializeBlockIdxY(),
RematerializeBlockIdxX(),
RematerializeBlockIdxZ()
};
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Threadblock swizzling function for batched GEMVs
struct GemvBatchedStridedThreadblockDefaultSwizzle {
/// Returns the shape of the problem in units of logical tiles
CUTLASS_HOST_DEVICE
static BatchedGemmCoord get_tiled_shape(
BatchedGemmCoord problem_size,
BatchedGemmCoord tile_size) {
return BatchedGemmCoord(
1, // M is always 1
(problem_size.n() + tile_size.n() - 1) / tile_size.n(),
(problem_size.k() + tile_size.k() - 1) / tile_size.k(),
(problem_size.batch() + tile_size.batch() - 1) / tile_size.batch());
}
/// Computes CUDA grid dimensions given a size in units of logical tiles
CUTLASS_HOST_DEVICE
static dim3 get_grid_shape(BatchedGemmCoord tiled_shape) {
return dim3(tiled_shape.n(), tiled_shape.batch(), tiled_shape.k());
}
/// Calculates optimal swizzle width
CUTLASS_HOST_DEVICE
static int get_log_tile(GemmCoord tiled_shape) {
return 0;
}
/// Obtains the threadblock offset (in units of threadblock-scoped tiles)
CUTLASS_DEVICE
static BatchedGemmCoord get_tile_offset(int log_tile) {
return BatchedGemmCoord{
0, // M is always 1
RematerializeBlockIdxX(),
RematerializeBlockIdxZ(),
RematerializeBlockIdxY(),
};
}
/// Obtains the threadblock offset (in units of threadblock-scoped tiles)
CUTLASS_DEVICE
static BatchedGemmCoord get_tile_offset() {
return BatchedGemmCoord{
0, // M is always 1
RematerializeBlockIdxX(),
RematerializeBlockIdxZ(),
RematerializeBlockIdxY(),
};
}
/// Gets the batch tile index
CUTLASS_DEVICE
static int get_batch_tile_idx() {
return RematerializeBlockIdxY();
}
/// Gets the absolute batch index
CUTLASS_DEVICE
static int get_batch_idx() {
return RematerializeBlockDimY()*RematerializeBlockIdxY() + RematerializeThreadIdxY();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
|
cutlass/include/cutlass/gemm/threadblock/threadblock_swizzle.h/0
|
{
"file_path": "cutlass/include/cutlass/gemm/threadblock/threadblock_swizzle.h",
"repo_id": "cutlass",
"token_count": 5536
}
| 36 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing warp-level matrix multiply-accumulate operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/complex.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/array_planar_complex.h"
#include "cutlass/gemm/warp/tile_iterator_planar_complex.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Underlying real-valued warp-level matrix multiply
typename Operator_,
/// Transformation applied to A operand (typically folded into math instruction)
ComplexTransform TransformA = ComplexTransform::kNone,
/// Transformation applied to B operand (typically folded into math instruction)
ComplexTransform TransformB = ComplexTransform::kNone
>
class MmaPlanarComplex {
public:
/// Underlying real-valued warp-level matrix multiply
using Operator = Operator_;
/// Shape of warp-level matrix multipy
using Shape = typename Operator::Shape;
/// Transformation applied to A operand (typically folded into math instruction)
static ComplexTransform const kTransformA = TransformA;
/// Transformation applied to B operand (typically folded into math instruction)
static ComplexTransform const kTransformB = TransformB;
/// Fragment of elements
using FragmentA = ArrayPlanarComplex<typename Operator::ElementA, Operator::FragmentA::kElements>;
/// Iterator into planar complex
using IteratorA = TileIteratorPlanarComplex<typename Operator::IteratorA>;
/// Layout in memory of the A operand
using LayoutA = typename Operator::LayoutA;
using FragmentB = ArrayPlanarComplex<typename Operator::ElementB, Operator::FragmentB::kElements>;
/// Iterator into planar complex
using IteratorB = TileIteratorPlanarComplex<typename Operator::IteratorB>;
/// Layout in memory of the B operand
using LayoutB = typename Operator::LayoutB;
/// Tile iterator for accumulator
using IteratorC = TileIteratorPlanarComplex<typename Operator::IteratorC>;
/// Accumulator fragment
using FragmentC = ArrayPlanarComplex<typename Operator::ElementC, Operator::FragmentC::kElements>;
/// Layout of accumulator fragment in memory
using LayoutC = typename Operator::LayoutC;
private:
/// Number of mma operations performed
using MmaIterations = MatrixShape<
Operator::Shape::kM / Operator::Policy::Operator::Shape::kM,
Operator::Shape::kN / Operator::Policy::Operator::Shape::kN
>;
public:
/// Ctor
CUTLASS_DEVICE
MmaPlanarComplex() {}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
FragmentA const &A_in,
FragmentB const &B_in,
FragmentC const &C) const {
D.real = C.real;
D.imag = C.imag;
//
// Transform fragments based on conjugate operations.
//
negate<typename FragmentA::ArrayReal> neg_A;
FragmentA frag_A;
frag_A.real = A_in.real;
if (kTransformA == ComplexTransform::kConjugate) {
frag_A.imag = neg_A(frag_A.imag);
}
else {
frag_A.imag = frag_A.imag;
}
FragmentB frag_B;
frag_B.real = B_in.real;
if (kTransformB == ComplexTransform::kConjugate) {
negate<typename FragmentB::ArrayReal> neg;
frag_B.imag = neg(frag_B.imag);
}
else {
frag_B.imag = frag_B.imag;
}
//
// Accumulated real-valued matrix multiplies
//
Operator real_mma;
// D.i += A.i * B.r
real_mma(D.imag, frag_A.imag, frag_B.real, D.imag);
// D.r += A.r * B.r
real_mma(D.real, frag_A.real, frag_B.real, D.real);
// D.i += A.r * B.i
real_mma(D.imag, frag_A.real, frag_B.imag, D.imag);
// D.r += -A.i * B.i
frag_A.imag = neg_A(frag_A.imag);
real_mma(D.real, frag_A.imag, frag_B.imag, D.real);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
|
cutlass/include/cutlass/gemm/warp/mma_planar_complex.h/0
|
{
"file_path": "cutlass/include/cutlass/gemm/warp/mma_planar_complex.h",
"repo_id": "cutlass",
"token_count": 1855
}
| 37 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing warp-level matrix multiply-accumulate operations targeting
Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/arch/wmma.h"
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
#include "cutlass/wmma_array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/arch/mma_sm75.h"
#include "cutlass/arch/mma_sm80.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/mma.h"
#include "cutlass/gemm/warp/mma_tensor_op_policy.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_wmma.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
///< Structure to compute the matrix product targeting CUDA cores via WMMA.
template <
///< Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
///< Data type of A elements
typename ElementA_,
///< Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
///< Data type of B elements
typename ElementB_,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
///< Element type of C matrix
typename ElementC_,
///< Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
///< Policy describing warp-level Wmma operation (concept: MmaTensorOpPolicy)
typename Policy_,
///< Number of partitions along K dimension
int PartitionsK_ = 1,
///< Used for partial specialization
typename Enable = bool
>
class MmaTensorOpWmma {
public:
///< Shape of warp-level matrix operation (concept: GemmShape)
using Shape = Shape_;
///< Data type of multiplicand A
using ElementA = ElementA_;
///< Layout of multiplicand A
using LayoutA = LayoutA_;
///< Data type of multiplicand B
using ElementB = ElementB_;
///< Layout of multiplicand B
using LayoutB = LayoutB_;
///< Data type of accumulator matrix C
using ElementC = ElementC_;
///< Layout of accumulator matrix C
using LayoutC = LayoutC_;
/// Shape of the warp in units of thread (concept: MmaTensorOpPolicy)
using Policy = Policy_;
/// Underlying instruction shape
using InstructionShape = typename Policy::Operator::Shape;
/// Underlying matrix multiply operator (concept: arch::Mma)
using ArchMmaOperator = typename Policy::Operator;
/// Indicates math operator
using MathOperator = typename ArchMmaOperator::Operator;
/// Underlying architecture tag
using ArchTag = typename Policy::Operator::ArchTag;
/// Complex transform on A operand
static ComplexTransform const kTransformA = ComplexTransform::kNone;
/// Complex transform on B operand
static ComplexTransform const kTransformB = ComplexTransform::kNone;
/// Indicates class of matrix operator
using OperatorClass = arch::OpClassWmmaTensorOp;
/// Number of threads participating in warp-level matrix product
static int const kThreadCount = 32;
/// Number of partitions along K dimension
static int const kPartitionsK = PartitionsK_;
public:
/// Iterates over the A operand in memory
using IteratorA = MmaTensorOpWmmaMultiplicandTileIterator<
MatrixShape<Shape::kM, Shape::kK>, Operand::kA, ElementA, LayoutA,
Policy::OpDelta::kRow, kThreadCount, Policy>;
/// Storage for A tile
using FragmentA = typename IteratorA::Fragment;
/// Iterates over the B operand in memory
using IteratorB = MmaTensorOpWmmaMultiplicandTileIterator<
MatrixShape<Shape::kK, Shape::kN>, Operand::kB, ElementB, LayoutB,
Policy::OpDelta::kRow, kThreadCount, Policy>;
/// Storage for B tile
using FragmentB = typename IteratorB::Fragment;
/// Iterates over the C operand in memory
using IteratorC = MmaTensorOpWmmaAccumulatorTileIterator<
MatrixShape<Shape::kM, Shape::kN>, ElementC, LayoutC,
typename Policy::OpDelta, Policy>;
/// Storage for C tile
using FragmentC = typename IteratorC::Fragment;
private:
static_assert(
!(Shape::kM % Policy::Operator::Shape::kM) &&
!(Shape::kN % Policy::Operator::Shape::kN),
"Shape of warp-level Wmma must be divisible by operator shape (wmma native size)");
/// Number of wmma operations performed
using WmmaIterations = MatrixShape<
Shape::kM / Policy::Operator::Shape::kM,
Shape::kN / Policy::Operator::Shape::kN
>;
public:
/// Underlying matrix multiply operator (concept: cutlass::arch::Wmma)
typename Policy::Operator wmma;
public:
//
// Methods
//
/// Ctor
CUTLASS_DEVICE
MmaTensorOpWmma() {}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
FragmentA const &A,
FragmentB const &B,
FragmentC const &C) const {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < WmmaIterations::kColumn; ++n) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < WmmaIterations::kRow; ++m) {
// accumulate wmma mma
wmma(D[m * WmmaIterations::kColumn + n], A[m], B[n], C[m * WmmaIterations::kColumn + n]);
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
#endif // if defined(CUTLASS_ARCH_WMMA_ENABLED)
|
cutlass/include/cutlass/gemm/warp/mma_tensor_op_wmma.h/0
|
{
"file_path": "cutlass/include/cutlass/gemm/warp/mma_tensor_op_wmma.h",
"repo_id": "cutlass",
"token_count": 2225
}
| 38 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines layout functions used by TensorRef and derived classes for common 4-D and 5-D
tensor formats.
Layout functions map logical coordinates to linear memory. They often require additional
data to describe strides between elements.
Layout functions must implement all members in the public interface of IdentityTensorLayout<>
defined in cutlass/tensor_ref.h.
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include "assert.h"
#endif
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/coord.h"
#include "cutlass/tensor_coord.h"
namespace cutlass {
namespace layout {
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Defines data layouts of various tensor formats usable by TensorRef and other classes.
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag used for 3-D NWC tensors for 1D conv, only used in 3.x API
class TensorNWC {};
/// Tag used for n-D KCSRT tensors for nD conv, only used in 3.x API for wgrad output layouts
class TensorKCS {};
class TensorKCSR {};
class TensorKCSRT {};
/// Mapping function for 4-D NHWC tensors.
class TensorNHWC {
public:
/// Logical rank of tensor
static int const kRank = 4;
/// Rank of stride vector
static int const kStrideRank = 3;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate (n, h, w, c)
using TensorCoord = Tensor4DCoord;
/// Stride vector
using Stride = Coord<kStrideRank>;
private:
//
// Data members
//
/// Stride data member - [stride_w, stride_h, stride_n]
Stride stride_;
public:
//
// Methods
//
/// Constructor
CUTLASS_HOST_DEVICE
TensorNHWC(Stride const &stride = Stride(0)): stride_(stride) { }
/// Constructor
CUTLASS_HOST_DEVICE
TensorNHWC(
typename Stride::Index stride_w, ///< number of elements between adjacent W coordinates
typename Stride::Index stride_h, ///< number of elements between adjacent H coordinates
typename Stride::Index stride_n ///< number of elements between adjacent N coordinates
):
stride_(make_Coord(stride_w, stride_h, stride_n)) { }
/// Constructor
// Once convolutions implement 64b stride this ctor can be deleted
CUTLASS_HOST_DEVICE
TensorNHWC(Coord<kStrideRank, LongIndex> const &stride):
stride_(make_Coord(
static_cast<typename Stride::Index>(stride[0]),
static_cast<typename Stride::Index>(stride[1]),
static_cast<typename Stride::Index>(stride[2]))
) { }
/// Helper returns a layout to a tightly packed NHWC tensor.
CUTLASS_HOST_DEVICE
static TensorNHWC packed(TensorCoord const &extent) {
return TensorNHWC(
make_Coord(
extent.c(),
extent.w() * extent.c(),
extent.h() * extent.w() * extent.c()
)
);
}
/// Returns the offset of a coordinate (n, h, w, c) in linear memory.
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return coord.c() +
LongIndex(stride_[0] * coord.w()) +
LongIndex(stride_[1] * coord.h()) +
LongIndex(stride_[2] * coord.n());
}
/// Returns the offset of a pitchlinear coordinate in linear memory.
CUTLASS_HOST_DEVICE
LongIndex operator()(PitchLinearCoord coord) const {
return coord.contiguous() + LongIndex(coord.strided() * stride_[2]);
}
/// Returns the logical coordinate (n, h, w, c) from a given offset in linear memory.
CUTLASS_HOST_DEVICE
TensorCoord inverse(LongIndex index) const {
int n = 0, h = 0, w = 0, c = 0;
#if defined(__CUDA_ARCH__)
int tmp = 0;
c = int(index % static_cast<int>(stride_[0]));
unsigned int hw_mul, hw_shr, w_mul, w_shr, c_mul, c_shr;
find_divisor(hw_mul, hw_shr, stride_[2]);
find_divisor(w_mul, w_shr, stride_[1]);
find_divisor(c_mul, c_shr, stride_[0]);
fast_divmod(n, tmp, index, int(stride_[2]), hw_mul, hw_shr);
fast_divmod(h, w, tmp, int(stride_[1]), w_mul, w_shr);
fast_divmod(w, tmp, w, int(stride_[0]), c_mul, c_shr);
#else
n = int(index / stride_[2]);
LongIndex residual = index % stride_[2];
h = int(residual / stride_[1]);
residual = (residual % stride_[1]);
w = int(residual / stride_[0]);
c = int(residual % stride_[0]);
#endif
return TensorCoord(n, h, w, c);
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
// it does not make sense if the extent is larger than stride
// and we could not rely on the capacity calculation in such cases
// we could move this checkers to debug code only
if ((extent.c() > stride_[0])
|| (extent.w() * stride_[0] > stride_[1])
|| (extent.h() * stride_[1] > stride_[2])) {
assert(0);
}
return extent.n() * stride_[2];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Mapping function for 4-D NCHW tensors.
class TensorNCHW {
public:
/// Logical rank of tensor
static int const kRank = 4;
/// Rank of stride vector
static int const kStrideRank = 3;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = Tensor4DCoord;
/// Stride vector
using Stride = Coord<kStrideRank>;
private:
//
// Data members
//
/// Stride data member - [w, hw, chw]
Stride stride_;
public:
//
// Methods
//
/// Constructor
CUTLASS_HOST_DEVICE
TensorNCHW(Stride const &stride = Stride(0)): stride_(stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static TensorNCHW packed(TensorCoord const &extent) {
return TensorNCHW(
make_Coord(
extent.w(),
extent.w() * extent.h(),
extent.h() * extent.w() * extent.c()
)
);
}
/// Returns the offset of a coordinate in linear memory.
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return coord.w() +
LongIndex(stride_[0] * coord.h()) +
LongIndex(stride_[1] * coord.c()) +
LongIndex(stride_[2] * coord.n());
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return extent.n() * stride_[2];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Mapping function for 4-D NC/xHWx tensors.
template <int Interleave>
class TensorNCxHWx {
public:
/// Interleaving quantity
static int const kInterleave = Interleave;
/// Logical rank of tensor
static int const kRank = 4;
/// Rank of stride vector
static int const kStrideRank = 3;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = Tensor4DCoord;
/// Stride vector
using Stride = Coord<kStrideRank>;
private:
//
// Data members
//
/// Stride data member - [Interleave x w, Interleave x wh, hwc]
Stride stride_;
public:
//
// Methods
//
/// Constructor
CUTLASS_HOST_DEVICE
TensorNCxHWx(Stride const &stride = Stride(0)): stride_(stride) { }
/// Constructor
CUTLASS_HOST_DEVICE
TensorNCxHWx(
typename Stride::Index stride_w, ///< number of elements between adjacent W coordinates
typename Stride::Index stride_h, ///< number of elements between adjacent H coordinates
typename Stride::Index stride_n ///< number of elements between adjacent N coordinates
):
stride_(make_Coord(stride_w, stride_h, stride_n)) { }
/// Constructor
// Once convolutions implement 64b stride this ctor can be deleted
CUTLASS_HOST_DEVICE
TensorNCxHWx(Coord<kStrideRank, LongIndex> const &stride):
stride_(make_Coord(
static_cast<typename Stride::Index>(stride[0]),
static_cast<typename Stride::Index>(stride[1]),
static_cast<typename Stride::Index>(stride[2]))
) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static TensorNCxHWx packed(TensorCoord const &extent) {
return TensorNCxHWx(
make_Coord(
kInterleave * extent.w(),
kInterleave * extent.w() * extent.h(),
extent.h() * extent.w() * extent.c()
)
);
}
/// Returns the offset of a coordinate in linear memory.
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
Index c_minor = (coord.c() % kInterleave);
Index c_major = (coord.c() / kInterleave);
return c_minor +
LongIndex(kInterleave * coord.w()) +
LongIndex(stride_[0] * coord.h()) +
LongIndex(stride_[1] * c_major) +
LongIndex(stride_[2] * coord.n());
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return extent.n() * stride_[2];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Mapping function for 4-D CxRSKx tensors.
template <int Interleave>
class TensorCxRSKx {
public:
/// Interleaving quantity
static int const kInterleave = Interleave;
/// Logical rank of tensor
static int const kRank = 4;
/// Rank of stride vector
static int const kStrideRank = 3;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = Tensor4DCoord;
/// Stride vector
using Stride = Coord<kStrideRank>;
private:
//
// Data members
//
/// Stride data member - [Interleave x n, Interleave x nw, Interleave x nwh]
Stride stride_;
public:
//
// Methods
//
/// Constructor
CUTLASS_HOST_DEVICE
TensorCxRSKx(Stride const &stride = Stride(0)): stride_(stride) { }
/// Constructor
CUTLASS_HOST_DEVICE
TensorCxRSKx(
typename Stride::Index stride_w, ///< number of elements between adjacent W coordinates
typename Stride::Index stride_h, ///< number of elements between adjacent H coordinates
typename Stride::Index stride_n ///< number of elements between adjacent N coordinates
):
stride_(make_Coord(stride_w, stride_h, stride_n)) { }
/// Constructor
// Once convolutions implement 64b stride this ctor can be deleted
CUTLASS_HOST_DEVICE
TensorCxRSKx(Coord<kStrideRank, LongIndex> const &stride):
stride_(make_Coord(
static_cast<typename Stride::Index>(stride[0]),
static_cast<typename Stride::Index>(stride[1]),
static_cast<typename Stride::Index>(stride[2]))
) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static TensorCxRSKx packed(TensorCoord const &extent) {
return TensorCxRSKx(
make_Coord(
kInterleave * extent.n(),
kInterleave * extent.n() * extent.w(),
kInterleave * extent.n() * extent.w() * extent.h()
)
);
}
/// Returns the offset of a coordinate in linear memory.
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
Index c_minor = (coord.c() % kInterleave);
Index c_major = (coord.c() / kInterleave);
return c_minor +
LongIndex(kInterleave * coord.n()) +
LongIndex(stride_[0] * coord.w()) +
LongIndex(stride_[1] * coord.h()) +
LongIndex(stride_[2] * c_major);
}
/// Returns the offset of a pitchlinear coordinate in linear memory.
CUTLASS_HOST_DEVICE
LongIndex operator()(PitchLinearCoord const &coord) const {
return (coord.contiguous() % kInterleave) +
LongIndex((coord.contiguous() / kInterleave) * stride_[2]) +
LongIndex(coord.strided() * kInterleave);
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return (extent.c() / kInterleave * stride_[2]);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Mapping function for 5-D NDHWC tensors.
class TensorNDHWC {
public:
/// Logical rank of tensor
static int const kRank = 5;
/// Rank of stride vector
static int const kStrideRank = 4;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate (n, d, h, w, c)
using TensorCoord = Tensor5DCoord;
/// Stride vector
using Stride = Coord<kStrideRank>;
private:
//
// Data members
//
/// Stride data member - [c, wc, hwc, dhwc]
Stride stride_;
public:
//
// Methods
//
/// Constructor
CUTLASS_HOST_DEVICE
TensorNDHWC(Stride const &stride = Stride(0)): stride_(stride) { }
/// Constructor
CUTLASS_HOST_DEVICE
TensorNDHWC(
typename Stride::Index c,
typename Stride::Index wc,
typename Stride::Index hwc,
typename Stride::Index dhwc):
stride_(make_Coord(c, wc, hwc, dhwc)) { }
/// Constructor
// Once convolutions implement 64b stride this ctor can be deleted
CUTLASS_HOST_DEVICE
TensorNDHWC(Coord<kStrideRank, LongIndex> const &stride):
stride_(make_Coord(
static_cast<typename Stride::Index>(stride[0]),
static_cast<typename Stride::Index>(stride[1]),
static_cast<typename Stride::Index>(stride[2]),
static_cast<typename Stride::Index>(stride[3]))
) { }
/// Helper returns a layout to a tightly packed NHWC tensor.
CUTLASS_HOST_DEVICE
static TensorNDHWC packed(TensorCoord const &extent) {
return TensorNDHWC(
make_Coord(
extent.c(),
extent.w() * extent.c(),
extent.h() * extent.w() * extent.c(),
extent.d() * extent.h() * extent.w() * extent.c()
)
);
}
/// Returns the offset of a coordinate (n, d, h, w, c) in linear memory.
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return coord.c() +
LongIndex(stride_[0] * coord.w()) +
LongIndex(stride_[1] * coord.h()) +
LongIndex(stride_[2] * coord.d()) +
LongIndex(stride_[3] * coord.n());
}
/// Returns the offset of a pitchlinear coordinate in linear memory.
CUTLASS_HOST_DEVICE
LongIndex operator()(PitchLinearCoord coord) const {
return coord.contiguous() + LongIndex(coord.strided() * stride_[3]);
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
// it does not make sense if the extent is larger than stride
// and we could not rely on the capacity calculation in such cases
// we could move this checkers to debug code only
if ((extent.c() > stride_[0])
|| (extent.w() * stride_[0] > stride_[1])
|| (extent.h() * stride_[1] > stride_[2])
|| (extent.d() * stride_[2] > stride_[3])) {
assert(0);
}
return extent.n() * stride_[3];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag used for linearized tensors with shape (NW, C) for 1D conv, only used in 3.x API
class TensorLinearizedNWC {};
/// Tag used for linearized tensors with shape (NHW, C) for 2D conv, only used in 3.x API
class TensorLinearizedNHWC : public TensorNHWC {};
/// Tag used for linearized tensors with shape (NDHW, C) for 3D conv, only used in 3.x API
class TensorLinearizedNDHWC : public TensorNDHWC {};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace layout
} // namespace cutlass
|
cutlass/include/cutlass/layout/tensor.h/0
|
{
"file_path": "cutlass/include/cutlass/layout/tensor.h",
"repo_id": "cutlass",
"token_count": 6722
}
| 39 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines a densely packed quaternion object intended for storing data in registers and
executing quaternion operations within a CUDA or host thread.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/functional.h"
#include "cutlass/array.h"
#include "cutlass/real.h"
#include "cutlass/coord.h"
#include "cutlass/matrix.h"
#include "cutlass/fast_math.h"
#include "cutlass/layout/vector.h"
namespace cutlass {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Quaternion: xi + yj + zk + w
template <
typename Element_ = float ///< element type
>
class Quaternion : public Array<Element_, 4> {
public:
/// Logical rank of tensor index space
static int const kRank = 1;
/// Number of elements
static int const kExtent = 4;
/// Base class is a four-element array
using Base = Array<Element_, kExtent>;
/// Element type
using Element = typename Base::Element;
/// Reference type to an element
using Reference = typename Base::reference;
/// Index type
using Index = int;
/// Quaternion storage - imaginary part
static int const kX = 0;
/// Quaternion storage - imaginary part
static int const kY = 1;
/// Quaternion storage - imaginary part
static int const kZ = 2;
/// Quaternion storage - real part
static int const kW = 3;
public:
//
// Methods
//
/// Constructs a quaternion q = 0
CUTLASS_HOST_DEVICE
Quaternion() {
Base::at(kX) = Element();
Base::at(kY) = Element();
Base::at(kZ) = Element();
Base::at(kW) = Element();
}
/// Constructs a quaternion q = w + 0*i + 0*j + 0*k
CUTLASS_HOST_DEVICE
Quaternion(
Element w_
) {
Base::at(kX) = Element();
Base::at(kY) = Element();
Base::at(kZ) = Element();
Base::at(kW) = w_;
}
/// Constructs a quaternion q = w + x*i + y*j + z*k
CUTLASS_HOST_DEVICE
Quaternion(
Element x_,
Element y_,
Element z_,
Element w_
) {
Base::at(kX) = x_;
Base::at(kY) = y_;
Base::at(kZ) = z_;
Base::at(kW) = w_;
}
/// Constructs a quaternion from a vector representing the imaginary part and a real number
CUTLASS_HOST_DEVICE
Quaternion(
Matrix3x1<Element> const &imag_,
Element w_ = Element()
) {
Base::at(kX) = imag_[0];
Base::at(kY) = imag_[1];
Base::at(kZ) = imag_[2];
Base::at(kW) = w_;
}
/// Returns a reference to the element at a given Coord
CUTLASS_HOST_DEVICE
Reference at(Index idx) const {
return Base::at(idx);
}
/// Returns a reference to the element at a given Coord
CUTLASS_HOST_DEVICE
Reference at(Index idx) {
return Base::at(idx);
}
/// Accesses the x element of the imaginary part of the quaternion
CUTLASS_HOST_DEVICE
Element x() const {
return Base::at(kX);
}
/// Accesses the x element of the imaginary part of the quaternion
CUTLASS_HOST_DEVICE
Reference x() {
return Base::at(kX);
}
/// Accesses the y element of the imaginary part of the quaternion
CUTLASS_HOST_DEVICE
Element y() const {
return Base::at(kY);
}
/// Accesses the y element of the imaginary part of the quaternion
CUTLASS_HOST_DEVICE
Reference y() {
return Base::at(kY);
}
/// Accesses the z element of the imaginary part of the quaternion
CUTLASS_HOST_DEVICE
Element z() const {
return Base::at(kZ);
}
/// Accesses the z element of the imaginary part of the quaternion
CUTLASS_HOST_DEVICE
Reference z() {
return Base::at(kZ);
}
/// Accesses the real part of the quaternion
CUTLASS_HOST_DEVICE
Element w() const {
return Base::at(kW);
}
/// Accesses the real part of the quaternion
CUTLASS_HOST_DEVICE
Reference w() {
return Base::at(kW);
}
/// Returns the pure imaginary part of the quaternion as a 3-vector
CUTLASS_HOST_DEVICE
Matrix3x1<Element> pure() const {
return Matrix3x1<Element>(x(), y(), z());
}
/// Returns a quaternion representation of a spatial rotation given a unit-length axis and
/// a rotation in radians.
CUTLASS_HOST_DEVICE
static Quaternion<Element> rotation(
Matrix3x1<Element> const &axis_unit, ///< axis of rotation (assumed to be unit length)
Element theta) { ///< angular rotation in radians
Element s = fast_sin(theta / Element(2));
return Quaternion(
s * axis_unit[0],
s * axis_unit[1],
s * axis_unit[2],
fast_cos(theta / Element(2))
);
}
/// Returns a quaternion representation of a spatial rotation represented as a
/// unit-length rotation axis (r_x, r_y, r_z) and an angular rotation in radians
CUTLASS_HOST_DEVICE
static Quaternion<Element> rotation(
Element r_x,
Element r_y,
Element r_z,
Element theta) { ///< angular rotation in radians
return rotation({r_x, r_y, r_z}, theta);
}
/// Geometric rotation of a 3-element vector
CUTLASS_HOST_DEVICE
Matrix3x1<Element> rotate(Matrix3x1<Element> const &rhs) const {
return (*this * Quaternion<Element>(rhs, 0) * reciprocal(*this)).pure();
}
/// Inverse rotation operation
CUTLASS_HOST_DEVICE
Matrix3x1<Element> rotate_inv(Matrix3x1<Element> const &rhs) const {
return (reciprocal(*this) * Quaternion<Element>(rhs, 0) * *this).pure();
}
/// Rotates a 3-vector assuming this is a unit quaternion (a spinor)
CUTLASS_HOST_DEVICE
Matrix3x1<Element> spinor(Matrix3x1<Element> const &rhs) const {
return (*this * Quaternion<Element>(rhs, 0) * conj(*this)).pure();
}
/// Inverse rotation of 3-vector assuming this is a unit quaternion (a spinor)
CUTLASS_HOST_DEVICE
Matrix3x1<Element> spinor_inv(Matrix3x1<Element> const &rhs) const {
return (conj(*this) * Quaternion<Element>(rhs, 0) * *this).pure();
}
/// In-place addition
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> &operator+=(Quaternion<Element> const &rhs) {
*this = (*this + rhs);
return *this;
}
/// In-place subtraction
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> &operator-=(Quaternion<Element> const &rhs) {
*this = (*this - rhs);
return *this;
}
/// In-place multiplication
template <typename T>
CUTLASS_HOST_DEVICE
Quaternion<Element> &operator*=(Quaternion<Element> const &rhs) {
*this = (*this * rhs);
return *this;
}
/// Scalar multiplication
template <typename T>
CUTLASS_HOST_DEVICE
Quaternion<Element> &operator*=(Element s) {
*this = (*this * s);
return *this;
}
/// In-place Division
template <typename T>
CUTLASS_HOST_DEVICE
Quaternion<Element> &operator/=(Quaternion<Element> const &rhs) {
*this = (*this / rhs);
return *this;
}
/// In-place Division
template <typename T>
CUTLASS_HOST_DEVICE
Quaternion<Element> &operator/=(Element s) {
*this = (*this / s);
return *this;
}
/// Computes a 3x3 rotation matrix (row-major representation)
CUTLASS_HOST_DEVICE
Matrix3x3<Element> as_rotation_matrix_3x3() const {
Matrix3x3<Element> m(
w() * w() + x() * x() - y() * y() - z() * z(),
2 * x() * y() - 2 * w() * z(),
2 * x() * z() + 2 * w() * y(),
2 * x() * y() + 2 * w() * z(),
w() * w() - x() * x() + y() * y() - z() * z(),
2 * y() * z() - 2 * w() * x(),
2 * x() * z() - 2 * w() * y(),
2 * y() * z() + 2 * w() * x(),
w() * w() - x() * x() - y() * y() + z() * z()
);
return m;
}
/// Computes a 4x4 rotation matrix (row-major representation)
CUTLASS_HOST_DEVICE
Matrix4x4<Element> as_rotation_matrix_4x4() const {
Matrix4x4<Element> m = Matrix4x4<Element>::identity();
m.set_slice_3x3(as_rotation_matrix_3x3());
return m;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Constructs a quaternion that is non-zero only in its real element.
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> make_Quaternion(
Element w) { ///< real part
return Quaternion<Element>(w);
}
/// Constructs a quaternion from a vector and real
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> make_Quaternion(
Matrix3x1<Element> const &imag, ///< imaginary party as a vector
Element w) { ///< real part
return Quaternion<Element>(imag, w);
}
/// Constructs a quaternion from a unit-length rotation axis and a rotation
/// angle in radians
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> make_QuaternionRotation(
Matrix3x1<Element> const &axis_unit, ///< rotation axis (unit-length)
Element w) { ///< rotation angle in radians
return Quaternion<Element>::rotation(axis_unit, w);
}
/// Constructs a quaternion q = xi + yj + zk + w
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> make_Quaternion(Element x, Element y, Element z, Element w) {
return Quaternion<Element>(x, y, z, w);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns the real part of the quaternion number
template <typename Element>
CUTLASS_HOST_DEVICE
Element const &real(Quaternion<Element> const &q) {
return q.w();
}
/// Returns the real part of the quaternion number
template <typename Element>
CUTLASS_HOST_DEVICE
Element &real(Quaternion<Element> &q) {
return q.w();
}
/// Returns the magnitude of the quaternion number
template <typename Element>
CUTLASS_HOST_DEVICE
Element abs(Quaternion<Element> const &q) {
return fast_sqrt(norm(q));
}
/// Quaternion conjugate
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> conj(Quaternion<Element> const &q) {
return make_Quaternion(
-q.x(),
-q.y(),
-q.z(),
q.w()
);
}
/// Computes the squared magnitude of the quaternion
template <typename Element>
CUTLASS_HOST_DEVICE
Element norm(Quaternion<Element> const &q) {
return q.x() * q.x() + q.y() * q.y() + q.z() * q.z() + q.w() * q.w();
}
/// Quaternion reciprocal
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> reciprocal(Quaternion<Element> const &q) {
Element nsq = norm(q);
return make_Quaternion(
-q.x() / nsq,
-q.y() / nsq,
-q.z() / nsq,
q.w() / nsq
);
}
/// Returns a unit-length quaternion
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> unit(Quaternion<Element> const &q) {
Element rcp_mag = Element(1) / abs(q);
return make_Quaternion(
q.x() * rcp_mag,
q.y() * rcp_mag,
q.z() * rcp_mag,
q.w() * rcp_mag
);
}
/// Quaternion exponential
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> exp(Quaternion<Element> const &q) {
Element exp_ = fast_exp(q.w());
Element imag_norm = fast_sqrt(q.x() * q.x() + q.y() * q.y() + q.z() * q.z());
Element sin_norm = fast_sin(imag_norm);
return make_Quaternion(
exp_ * q.x() * sin_norm / imag_norm,
exp_ * q.y() * sin_norm / imag_norm,
exp_ * q.z() * sin_norm / imag_norm,
exp_ * fast_cos(imag_norm)
);
}
/// Quaternion natural logarithm
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> log(Quaternion<Element> const &q) {
Element v = fast_sqrt(q.x() * q.x() + q.y() * q.y() + q.z() * q.z());
Element s = fast_acos(q.w() / abs(q)) / v;
return make_Quaternion(
q.x() * s,
q.y() * s,
q.z() * s,
fast_log(q.w())
);
}
/// Gets the rotation angle from a unit-length quaternion
template <typename Element>
CUTLASS_HOST_DEVICE
Element get_rotation_angle(Quaternion<Element> const &q_unit) {
return fast_acos(q_unit.w()) * Element(2);
}
/// Gets the rotation axis from a unit-length quaternion
template <typename Element>
CUTLASS_HOST_DEVICE
Matrix3x1<Element> get_rotation_axis(Quaternion<Element> const &q_unit) {
return q_unit.pure().unit();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Equality operator
template <typename Element>
CUTLASS_HOST_DEVICE
bool operator==(Quaternion<Element> const &lhs, Quaternion<Element> const &rhs) {
return lhs.x() == rhs.x() &&
lhs.y() == rhs.y() &&
lhs.z() == rhs.z() &&
lhs.w() == rhs.w();
}
/// Inequality operator
template <typename Element>
CUTLASS_HOST_DEVICE
bool operator!=(Quaternion<Element> const &lhs, Quaternion<Element> const &rhs) {
return !(lhs == rhs);
}
/// Quaternion scalar multiplication
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> operator*(Quaternion<Element> q, Element s) {
return make_Quaternion(
q.x() * s,
q.y() * s,
q.z() * s,
q.w() * s
);
}
/// Quaternion scalar multiplication
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> operator*(Element s, Quaternion<Element> const &q) {
return make_Quaternion(
s * q.x(),
s * q.y(),
s * q.z(),
s * q.w()
);
}
/// Quaternion scalar division
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> operator/(Quaternion<Element> const &q, Element s) {
return make_Quaternion(
q.x() / s,
q.y() / s,
q.z() / s,
q.w() / s
);
}
/// Quaternion unary negation
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> operator-(Quaternion<Element> const &q) {
return make_Quaternion(
-q.x(),
-q.y(),
-q.z(),
-q.w()
);
}
/// Quaternion addition
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> operator+(Quaternion<Element> const &lhs, Quaternion<Element> const &rhs) {
return make_Quaternion(
lhs.x() + rhs.x(),
lhs.y() + rhs.y(),
lhs.z() + rhs.z(),
lhs.w() + rhs.w()
);
}
/// Quaternion subtraction
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> operator-(Quaternion<Element> const &lhs, Quaternion<Element> const &rhs) {
return make_Quaternion(
lhs.x() - rhs.x(),
lhs.y() - rhs.y(),
lhs.z() - rhs.z(),
lhs.w() - rhs.w()
);
}
/// Quaternion product
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> operator*(Quaternion<Element> const &lhs, Quaternion<Element> const &rhs) {
return make_Quaternion(
lhs.w() * rhs.x() + rhs.w() * lhs.x() + lhs.y() * rhs.z() - lhs.z() * rhs.y(),
lhs.w() * rhs.y() + rhs.w() * lhs.y() + lhs.z() * rhs.x() - lhs.x() * rhs.z(),
lhs.w() * rhs.z() + rhs.w() * lhs.z() + lhs.x() * rhs.y() - lhs.y() * rhs.x(),
lhs.w() * rhs.w() - lhs.x() * rhs.x() - lhs.y() * rhs.y() - lhs.z() * rhs.z()
);
}
/// Quaternion division
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> operator/(Quaternion<Element> const &lhs, Quaternion<Element> const &rhs) {
return lhs * reciprocal(rhs);
}
/// Quaternion scalar division
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> operator/(Element s, Quaternion<Element> const &q) {
return s * reciprocal(q);
}
/// Comparison
template <typename Element>
CUTLASS_HOST_DEVICE
bool operator<(Quaternion<Element> const &lhs, Quaternion<Element> const &rhs) {
return true;
}
/// Rotates a 3-vector assuming this is a unit quaternion (a spinor). This avoids computing
/// a reciprocal.
template <typename Element>
CUTLASS_HOST_DEVICE
Matrix3x1<Element> spinor_rotation(
Quaternion<Element> const &spinor, /// unit-length quaternion
Matrix3x1<Element> const &rhs) { /// arbitrary 3-vector
return (spinor * Quaternion<Element>(rhs, 0) * conj(spinor)).pure();
}
/// Inverse rotation of 3-vector assuming this is a unit quaternion (a spinor). This avoids computing
/// a reciprocal.
template <typename Element>
CUTLASS_HOST_DEVICE
Matrix3x1<Element> spinor_rotation_inv(
Quaternion<Element> const &spinor, /// unit-length quaternion
Matrix3x1<Element> const &rhs) { /// arbitrary 3-vector
return (conj(spinor) * Quaternion<Element>(rhs, 0) * spinor).pure();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Quaternion-valued type.
template <typename T>
struct RealType< Quaternion<T> > {
using Type = T;
/// Number of elements
static int const kExtent = Quaternion<T>::kExtent;
CUTLASS_HOST_DEVICE
static Quaternion<T> from_real(double x) {
return Quaternion<T>(static_cast<T>(x));
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// Factories
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
CUTLASS_HOST_DEVICE
cutlass::Quaternion<half_t> from_real<cutlass::Quaternion<half_t> >(double r) {
return cutlass::Quaternion<half_t>(half_t(r));
}
template <>
CUTLASS_HOST_DEVICE
cutlass::Quaternion<float> from_real<cutlass::Quaternion<float> >(double r) {
return cutlass::Quaternion<float>(float(r));
}
template <>
CUTLASS_HOST_DEVICE
cutlass::Quaternion<double> from_real<cutlass::Quaternion<double> >(double r) {
return cutlass::Quaternion<double>(r);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
// functional.h numeric specializations
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
struct multiplies<Quaternion<T>> {
CUTLASS_HOST_DEVICE
Quaternion<T> operator()(Quaternion<T> lhs, Quaternion<T> const &rhs) const {
lhs = lhs * rhs;
return lhs;
}
};
/// Squares with optional conversion
template <typename T, typename Output>
struct magnitude_squared<Quaternion<T>, Output> {
CUTLASS_HOST_DEVICE
Output operator()(Quaternion<T> lhs) const {
multiplies<Output> mul_op;
Output y_w = Output(lhs.w());
Output y_x = Output(lhs.x());
Output y_y = Output(lhs.y());
Output y_z = Output(lhs.z());
return mul_op(y_w, y_w) + mul_op(y_x, y_x) + mul_op(y_y, y_y) + \
mul_op(y_z, y_z);
}
};
template <typename T>
struct multiply_add<Quaternion<T>, Quaternion<T>, Quaternion<T>> {
CUTLASS_HOST_DEVICE
Quaternion<T> operator()(
Quaternion<T> const &a,
Quaternion<T> const &b,
Quaternion<T> const &c) const {
T x = c.x();
T y = c.y();
T z = c.z();
T w = c.w();
x += a.w() * b.x();
x += b.w() * a.x();
x += a.y() * b.z();
x += -a.z() * b.y(),
y += a.w() * b.y();
y += b.w() * a.y();
y += a.z() * b.x();
y += -a.x() * b.z();
z += a.w() * b.z();
z += b.w() * a.z();
z += a.x() * b.y();
z += -a.y() * b.x();
w += a.w() * b.w();
w += -a.x() * b.x();
w += -a.y() * b.y();
w += -a.z() * b.z();
return cutlass::make_Quaternion(x, y, z, w);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
|
cutlass/include/cutlass/quaternion.h/0
|
{
"file_path": "cutlass/include/cutlass/quaternion.h",
"repo_id": "cutlass",
"token_count": 7898
}
| 40 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines a canonical coordinate for rank=4 tensors offering named indices.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/coord.h"
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a canonical 4D coordinate used by tensor operations.
struct Tensor4DCoord : public Coord<4> {
/// Base class
using Base = Coord<4>;
/// Index type
using Index = typename Base::Index;
/// LongIndex type
using LongIndex = typename Base::LongIndex;
/// Batch dimension
static int const kN = 0;
/// Height dimension
static int const kH = 1;
/// Width dimension
static int const kW = 2;
/// Channels dimension
static int const kC = 3;
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
Tensor4DCoord() { }
/// Constructs from Coord<4>
CUTLASS_HOST_DEVICE
Tensor4DCoord(Coord<4> const &coord): Base(coord) { }
/// Helper to construct from N, H, W, and C.
CUTLASS_HOST_DEVICE
Tensor4DCoord(Index n, Index h, Index w, Index c): Base(make_Coord(n, h, w, c)) { }
/// Helper to construct from N, H, W, and C, which are LongIndex type
CUTLASS_HOST_DEVICE
Tensor4DCoord(LongIndex n, LongIndex h, LongIndex w, LongIndex c)
: Base(make_Coord(Index(n), Index(h), Index(w), Index(c))) { }
/// Returns the batch of the coordinate
CUTLASS_HOST_DEVICE
Index const & n() const { return this->at(kN); }
/// Returns the batch of the coordinate
CUTLASS_HOST_DEVICE
Index & n() { return this->at(kN); }
/// Returns the row of the coordinate
CUTLASS_HOST_DEVICE
Index const & h() const { return this->at(kH); }
/// Returns the row of the coordinate
CUTLASS_HOST_DEVICE
Index & h() { return this->at(kH); }
/// Returns the column of the coordinate
CUTLASS_HOST_DEVICE
Index const & w() const { return this->at(kW); }
/// Returns the column of the coordinate
CUTLASS_HOST_DEVICE
Index & w() { return this->at(kW); }
/// Returns the channel of the coordinate
CUTLASS_HOST_DEVICE
Index const & c() const { return this->at(kC); }
/// Returns the channel of the coordinate
CUTLASS_HOST_DEVICE
Index & c() { return this->at(kC); }
//
// Coord operators
//
/// Element-wise addition
CUTLASS_HOST_DEVICE
Tensor4DCoord operator+(Base const& b) const {
return Tensor4DCoord(Base::operator+(b));
}
/// Element-wise subtraction
CUTLASS_HOST_DEVICE
Tensor4DCoord operator-(Base const& b) const {
return Tensor4DCoord(Base::operator-(b));
}
/// Element-wise multiplication
CUTLASS_HOST_DEVICE
Tensor4DCoord operator*(Base const& b) const {
return Tensor4DCoord(Base::operator*(b));
}
/// Element-wise division
CUTLASS_HOST_DEVICE
Tensor4DCoord operator/(Base const& b) const {
return Tensor4DCoord(Base::operator/(b));
}
/// In-place addition
CUTLASS_HOST_DEVICE
Tensor4DCoord& operator+=(Base const& b) {
Base::operator+=(b);
return *this;
}
/// In-place subtraction
CUTLASS_HOST_DEVICE
Tensor4DCoord& operator-=(Base const& b) {
Base::operator-=(b);
return *this;
}
/// In-place multiplication
CUTLASS_HOST_DEVICE
Tensor4DCoord& operator*=(Base const& b) {
Base::operator*=(b);
return *this;
}
/// In-place division
CUTLASS_HOST_DEVICE
Tensor4DCoord& operator/=(Base const& b) {
Base::operator/=(b);
return *this;
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a canonical 5D coordinate used by tensor operations.
struct Tensor5DCoord : public Coord<5> {
/// Base class
using Base = Coord<5>;
/// Index type
using Index = typename Base::Index;
/// LongIndex type
using LongIndex = typename Base::LongIndex;
/// Batch dimension
static int const kN = 0;
/// Depth dimension
static int const kD = 1;
/// Height dimension
static int const kH = 2;
/// Width dimension
static int const kW = 3;
/// Channels dimension
static int const kC = 4;
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
Tensor5DCoord() { }
/// Constructs from Coord<5>
CUTLASS_HOST_DEVICE
Tensor5DCoord(Coord<5> const &coord): Base(coord) { }
/// Helper to construct from N, D, H, W, and C.
CUTLASS_HOST_DEVICE
Tensor5DCoord(Index n, Index d, Index h, Index w, Index c): Base(make_Coord(n, d, h, w, c)) { }
/// Helper to construct from N, D, H, W, and C, which are LongIndex type
CUTLASS_HOST_DEVICE
Tensor5DCoord(LongIndex n, LongIndex d, LongIndex h, LongIndex w, LongIndex c)
: Base(make_Coord(Index(n), Index(d), Index(h), Index(w), Index(c))) { }
/// Returns the batch of the coordinate
CUTLASS_HOST_DEVICE
Index const & n() const { return this->at(kN); }
/// Returns the batch of the coordinate
CUTLASS_HOST_DEVICE
Index & n() { return this->at(kN); }
/// Returns the batch of the coordinate
CUTLASS_HOST_DEVICE
Index const & d() const { return this->at(kD); }
/// Returns the batch of the coordinate
CUTLASS_HOST_DEVICE
Index & d() { return this->at(kD); }
/// Returns the row of the coordinate
CUTLASS_HOST_DEVICE
Index const & h() const { return this->at(kH); }
/// Returns the row of the coordinate
CUTLASS_HOST_DEVICE
Index & h() { return this->at(kH); }
/// Returns the column of the coordinate
CUTLASS_HOST_DEVICE
Index const & w() const { return this->at(kW); }
/// Returns the column of the coordinate
CUTLASS_HOST_DEVICE
Index & w() { return this->at(kW); }
/// Returns the channel of the coordinate
CUTLASS_HOST_DEVICE
Index const & c() const { return this->at(kC); }
/// Returns the channel of the coordinate
CUTLASS_HOST_DEVICE
Index & c() { return this->at(kC); }
//
// Coord operators
//
/// Element-wise addition
CUTLASS_HOST_DEVICE
Tensor5DCoord operator+(Base const& b) const {
return Tensor5DCoord(Base::operator+(b));
}
/// Element-wise subtraction
CUTLASS_HOST_DEVICE
Tensor5DCoord operator-(Base const& b) const {
return Tensor5DCoord(Base::operator-(b));
}
/// Element-wise multiplication
CUTLASS_HOST_DEVICE
Tensor5DCoord operator*(Base const& b) const {
return Tensor5DCoord(Base::operator*(b));
}
/// Element-wise division
CUTLASS_HOST_DEVICE
Tensor5DCoord operator/(Base const& b) const {
return Tensor5DCoord(Base::operator/(b));
}
/// In-place addition
CUTLASS_HOST_DEVICE
Tensor5DCoord& operator+=(Base const& b) {
Base::operator+=(b);
return *this;
}
/// In-place subtraction
CUTLASS_HOST_DEVICE
Tensor5DCoord& operator-=(Base const& b) {
Base::operator-=(b);
return *this;
}
/// In-place multiplication
CUTLASS_HOST_DEVICE
Tensor5DCoord& operator*=(Base const& b) {
Base::operator*=(b);
return *this;
}
/// In-place division
CUTLASS_HOST_DEVICE
Tensor5DCoord& operator/=(Base const& b) {
Base::operator/=(b);
return *this;
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
|
cutlass/include/cutlass/tensor_coord.h/0
|
{
"file_path": "cutlass/include/cutlass/tensor_coord.h",
"repo_id": "cutlass",
"token_count": 2990
}
| 41 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates calculating the address and predicates to the load of scale and bias vectors.
This iterator uses masks to guard out-of-bounds accesses.
This can be used to load var and mean vectors in layernorm which is loop invariant.
A precomputed "Params" object minimizes the amount of state that must be
stored in registers, and integer addition is used to advance the pointer
through memory.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// PredicatedScaleBiasVectorIterator
///
template <typename WarpShape,
typename Element,
typename Layout>
class PredicatedScaleBiasVectorIterator;
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileIterator for wgrad pitch-linear data.
///
template <typename WarpShape_, typename Element_>
class PredicatedScaleBiasVectorIterator<WarpShape_,
Element_,
layout::PitchLinear> {
public:
using WarpShape = WarpShape_;
using Element = Element_;
using Layout = layout::PitchLinear;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ConstPointer = const Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
static int const kElementsPerAccess = 1;
using AccessType = AlignedArray<Element, kElementsPerAccess>;
static int const kIterations = WarpShape::kContiguous / 8;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<__half2, 2 * kIterations * kElementsPerAccess>;
private:
//
// Data members
//
/// Internal pointer to first access of tile
ConstPointer scale_pointer_;
ConstPointer bias_pointer_;
/// Size of tensor
int problem_size_;
int32_t thread_offset_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorIterator(
/// Extent of tensor
int problem_size,
/// Pointer to the start of the scale vector
ConstPointer scale_pointer,
/// Pointer to the start of the bias vector
ConstPointer bias_pointer,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset)
: problem_size_(problem_size),
scale_pointer_(scale_pointer),
bias_pointer_(bias_pointer) {
thread_offset_ = threadblock_offset.contiguous() + (thread_id % 32) / 4;
}
/// Construct a PredicatedTileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorIterator(
/// Extent of tensor
int problem_size,
/// Pointer to start of scale vector
ConstPointer scale_pointer,
/// Pointer to start of scale vector
ConstPointer bias_pointer,
///< ID of each participating thread
int thread_id)
: PredicatedScaleBiasVectorIterator(problem_size,
scale_pointer, bias_pointer,
thread_id, make_Coord(0, 0)) {}
/// Advances an iterator along logical dimensions of matrix in units of whole warp tiles
CUTLASS_DEVICE
void add_tile_offset(
TensorCoord const &tile_offset) {
thread_offset_ += (WarpShape::kContiguous * tile_offset.contiguous());
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
frag.fill(__float2half2_rn(0.0f));
__half2 *frag_ptr = reinterpret_cast<__half2 *>(&frag);
// load scale
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < kIterations; ++c) {
cutlass::arch::global_load<
__half,
sizeof(AccessType)
>(
frag_ptr[c * 2].x,
scale_pointer_ + thread_offset_ + c * 8,
(thread_offset_ + c * 8) < problem_size_
);
}
// load bias
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < kIterations; ++c) {
cutlass::arch::global_load<
__half,
sizeof(AccessType)
>(
frag_ptr[c * 2 + 1].x,
bias_pointer_ + thread_offset_ + c * 8,
(thread_offset_ + c * 8) < problem_size_
);
}
// duplicate scale
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < kIterations; ++c) {
frag_ptr[c * 2].y = frag_ptr[c * 2].x;
}
// duplicate bias
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < kIterations; ++c) {
frag_ptr[c * 2 + 1].y = frag_ptr[c * 2 + 1].x;
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileIterator for row-major data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename WarpShape_,
typename Element_>
class PredicatedScaleBiasVectorIterator<WarpShape_,
Element_,
layout::RowMajor> {
public:
using WarpShape = WarpShape_;
using Element = Element_;
using Layout = layout::RowMajor;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ConstPointer = const Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedScaleBiasVectorIterator<
layout::PitchLinearShape<WarpShape::kColumn, WarpShape::kRow>,
Element,
layout::PitchLinear>;
using AccessType = typename UnderlyingIterator::AccessType;
static int const kElementsPerAccess = UnderlyingIterator::kElementsPerAccess;
using Fragment = typename UnderlyingIterator::Fragment;
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorIterator(
///< Extent of tensor
int problem_size,
///< Pointer to the start of the scale vector
ConstPointer scale_pointer,
///< Pointer to the start of the bias vector
ConstPointer bias_pointer,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(problem_size, scale_pointer, bias_pointer,
thread_id,
layout::PitchLinearCoord(threadblock_offset.column(),
threadblock_offset.row())) {}
/// Construct a PredicatedTileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorIterator(
int problem_size, ///< Extent of tensor
ConstPointer scale_pointer, ///< Pointer to the start of the scale vector
ConstPointer bias_pointer, ///< Pointer to the start of the bias vector
int thread_id ///< ID of each participating thread
)
: PredicatedScaleBiasVectorIterator(problem_size,
scale_pointer, bias_pointer,
thread_id, make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Advances an iterator along logical dimensions of matrix in units of whole
/// threadblock tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
iterator_.load(frag);
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
|
cutlass/include/cutlass/transform/threadblock/predicated_scale_bias_vector_iterator.h/0
|
{
"file_path": "cutlass/include/cutlass/transform/threadblock/predicated_scale_bias_vector_iterator.h",
"repo_id": "cutlass",
"token_count": 3844
}
| 42 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of tiles from pitch-linear rank=2 tensors.
This iterator uses masks to guard out-of-bounds accesses and visits the last "residue" tile
first, with the objective of minimizing predicate mask updates during steady-state operation.
A precomputed "Params" object minimizes the amount of state that must be stored in registers,
and integer addition is used to advance the pointer through memory.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "regular_tile_iterator.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Regular tile iterator specialized for pitch-linear. This one is used by 2-stage SIMT kernels
/// and sparse tensor core meta data.
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
int Alignment
>
class RegularTileIterator<Shape_, Element_, layout::PitchLinear, AdvanceRank, ThreadMap_, Alignment> {
public:
using Shape = Shape_;
using Element = Element_;
using Layout = layout::PitchLinear;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using StrideIndex = typename Layout::Stride::Index;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Fragment = Array<Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess, kAlignment>;
static_assert(kAdvanceRank == 0 || kAdvanceRank == 1,
"Advance rank may only be along the contiguous or strided dimensions.");
private:
//
// Types
//
//
// Data members
//
/// Pointer to memory
uint8_t *pointer_;
/// Stride quantity
StrideIndex stride_;
/// Amount to increment pointer along strided dimension
Index increment_strided_;
/// Amount to advance pointer between tiles
Index increment_advance_;
public:
CUTLASS_DEVICE
RegularTileIterator(): pointer_(nullptr), increment_strided_(0), increment_advance_(0) { }
CUTLASS_DEVICE
RegularTileIterator(
TensorRef const &ref,
int thread_idx
):
pointer_(reinterpret_cast<uint8_t *>(ref.data()) + (ref.offset(ThreadMap::initial_offset(thread_idx)) * sizeof_bits<Element>::value / 8)) {
stride_ = ref.stride()[0];
increment_strided_ = (ref.stride()[0] * sizeof_bits<Element>::value) * ThreadMap::Delta::kStrided / 8;
increment_advance_ =
(kAdvanceRank == 0 ?
Shape::kContiguous * sizeof_bits<Element>::value / 8 :
Shape::kStrided * (ref.stride()[0] * sizeof_bits<Element>::value / 8));
}
/// Loads a fragment
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
uint8_t const *byte_pointer = pointer_ + pointer_offset * sizeof_bits<Element>::value / 8;
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
AccessType const *access_ptr = reinterpret_cast<AccessType const *>(byte_pointer);
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int idx = c + s * ThreadMap::Iterations::kContiguous;
frag_ptr[idx] = access_ptr[c * ThreadMap::Delta::kContiguous /
ThreadMap::kElementsPerAccess];
}
if (s + 1 < ThreadMap::Iterations::kStrided) {
byte_pointer += increment_strided_;
}
}
}
/// Loads a fragment
CUTLASS_HOST_DEVICE
void load(Fragment &frag, TensorCoord const & tile_offset) {
load_with_pointer_offset(
frag,
tile_offset.contiguous() * Shape::kContiguous / ThreadMap::kElementsPerAccess +
tile_offset.strided() * Shape::kStrided * stride_
);
}
/// Loads a fragment
CUTLASS_HOST_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
/// Stores a fragment
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType const *frag_ptr = reinterpret_cast<AccessType const*>(&frag);
uint8_t *byte_pointer = pointer_ + pointer_offset * sizeof_bits<Element>::value / 8;
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
AccessType *access_ptr = reinterpret_cast<AccessType *>(byte_pointer);
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int idx = c + s * ThreadMap::Iterations::kContiguous;
access_ptr[c * ThreadMap::Delta::kContiguous /
ThreadMap::kElementsPerAccess] = frag_ptr[idx];
}
if (s + 1 < ThreadMap::Iterations::kStrided) {
byte_pointer += increment_strided_;
}
}
}
/// Stores a fragment
CUTLASS_HOST_DEVICE
void store(Fragment const &frag, TensorCoord const & tile_offset) {
store_with_pointer_offset(
frag,
tile_offset.contiguous() * Shape::kContiguous + tile_offset.strided() * Shape::kStrided * stride_
);
}
/// Stores a fragment
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
/// Advances the pointer
CUTLASS_HOST_DEVICE
RegularTileIterator &operator++() {
pointer_ += increment_advance_;
return *this;
}
/// Advances the pointer
CUTLASS_HOST_DEVICE
RegularTileIterator &operator--() {
pointer_ -= increment_advance_;
return *this;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset;
}
/// Adds a tile offset in the unit of tile.
/// In GEMM/Conv implementation, this is used to move in the k dimension in the shared memory.
/// Below layouts are the shared memory layouts. Current SM50 SIMT kernels only use col major A and row major B.
/// For row major A operand, k dimension is contiguous dimension;
/// For col major A operand, k dimension is strided dimension;
/// For row major B operand, k dimension is strided dimension;
/// For col major B operand, k dimension is contiguous dimension.
/// Below two classes map col/row major to the pitch linear coordinates used
/// in this base class.
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
int offset = sizeof_bits<Element>::value *
(coord.contiguous() * Shape::kContiguous + coord.strided() * Shape::kStrided * stride_) / 8;
add_pointer_offset(offset);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
#if 0
AccessType *access_ptr = pointer_[iteration_strided_ & 1];
int stride_idx = (iteration_strided_ & ~1);
int access_offset = stride_idx * ThreadMap::Delta::kStrided * stride_ +
iteration_contiguous_ * ThreadMap::Delta::kContiguous /
ThreadMap::kElementsPerAccess;
char *access_byte_ptr =
reinterpret_cast<char *>(access_ptr + access_offset);
return reinterpret_cast<AccessType *>(access_byte_ptr + byte_offset_);
#endif
return reinterpret_cast<AccessType *>(pointer_);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Regular tile iterator specialized for row major
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
int Alignment
>
class RegularTileIterator<Shape_, Element_, layout::RowMajor, AdvanceRank, ThreadMap_, Alignment> {
public:
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Fragment = Array<Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
using Underlying = RegularTileIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>,
Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 1 : 0),
ThreadMap,
kAlignment
>;
using AccessType = typename Underlying::AccessType;
static_assert(kAdvanceRank == 0 || kAdvanceRank == 1,
"Advance rank may only be along the row or column dimensions.");
private:
Underlying iterator_;
public:
CUTLASS_DEVICE
RegularTileIterator() { }
CUTLASS_DEVICE
RegularTileIterator(
TensorRef const &ref,
int thread_idx
):
iterator_({ref.data(), ref.stride()}, thread_idx) {
}
/// Loads a fragment
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment
CUTLASS_HOST_DEVICE
void load(Fragment &frag, TensorCoord const & tile_offset) {
iterator_.load_with_pointer_offset(frag, {tile_offset.column(), tile_offset.row()});
}
/// Loads a fragment
CUTLASS_HOST_DEVICE
void load(Fragment &frag) {
iterator_.load_with_pointer_offset(frag, 0);
}
/// Stores a fragment
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Stores a fragment
CUTLASS_HOST_DEVICE
void store(Fragment const &frag, TensorCoord const & tile_offset) {
iterator_.store_with_pointer_offset(frag, {tile_offset.column(), tile_offset.row()});
}
/// Stores a fragment
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
iterator_.store_with_pointer_offset(frag, 0);
}
/// Advances the pointer
CUTLASS_HOST_DEVICE
RegularTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances the pointer
CUTLASS_HOST_DEVICE
RegularTileIterator &operator--() {
--iterator_;
return *this;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.column(), coord.row()});
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return iterator_.get();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Regular tile iterator specialized for pitch-linear
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
int Alignment
>
class RegularTileIterator<Shape_, Element_, layout::ColumnMajor, AdvanceRank, ThreadMap_, Alignment> {
public:
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Fragment = Array<Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
using Underlying = RegularTileIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>,
Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 0 : 1),
ThreadMap
>;
using AccessType = typename Underlying::AccessType;
static_assert(kAdvanceRank == 0 || kAdvanceRank == 1,
"Advance rank may only be along the row or column dimensions.");
private:
Underlying iterator_;
public:
CUTLASS_DEVICE
RegularTileIterator() { }
CUTLASS_DEVICE
RegularTileIterator(
TensorRef const &ref,
int thread_idx
):
iterator_({ref.data(), ref.stride()}, thread_idx) {
}
/// Loads a fragment
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment
CUTLASS_HOST_DEVICE
void load(Fragment &frag, TensorCoord const & tile_offset) {
iterator_.load_with_pointer_offset(frag, {tile_offset.row(), tile_offset.column()});
}
/// Loads a fragment
CUTLASS_HOST_DEVICE
void load(Fragment &frag) {
iterator_.load_with_pointer_offset(frag, 0);
}
/// Stores a fragment
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Stores a fragment
CUTLASS_HOST_DEVICE
void store(Fragment const &frag, TensorCoord const & tile_offset) {
iterator_.store_with_pointer_offset(frag, {tile_offset.row(), tile_offset.column()});
}
/// Stores a fragment
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
iterator_.store_with_pointer_offset(frag, 0);
}
/// Advances the pointer
CUTLASS_HOST_DEVICE
RegularTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances the pointer
CUTLASS_HOST_DEVICE
RegularTileIterator &operator--() {
--iterator_;
return *this;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.row(), coord.column()});
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return iterator_.get();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
|
cutlass/include/cutlass/transform/threadblock/regular_tile_iterator_pitch_linear.h/0
|
{
"file_path": "cutlass/include/cutlass/transform/threadblock/regular_tile_iterator_pitch_linear.h",
"repo_id": "cutlass",
"token_count": 5517
}
| 43 |
# CuTe Tensors
This document describes `Tensor`, CuTe's core container that deploys the `Layout` concepts previously described.
Fundamentally, a `Tensor` represents a multidimensional array. `Tensor`s abstracts away the details of how the array's elements are organized and how the array's elements are stored. This lets users write algorithms that access multidimensional arrays generically and potentially specialize algorithms on a `Tensor`s traits. For example, the rank of the `Tensor` can be dispatched against, the `Layout` of data can be inspected, and the type of data can be verified.
A `Tensor` is represented by two template parameters: `Engine` and `Layout`.
For a description of `Layout`, please refer to [the `Layout` section](./01_layout.md).
The `Tensor` presents the same shape and access operators as the `Layout` and uses the result of the `Layout` computation to
offset and dereference a random-access iterator held by the `Engine`.
That is, the layout of the data is provided by `Layout` and the actual data is provided by the iterator. Such data can live in any kind of memory -- global memory, shared memory, register memory -- or can even be transformed or generated on the fly.
## Fundamental operations
CuTe `Tensor` provides container-like operations for accessing elements.
* `.data()`. The iterator this `Tensor` holds.
* `.size()`. The total logical size of this `Tensor`.
* `.operator[](Coord)`. Access the element corresponding to the logical coordinate `Coord`.
* `.operator()(Coord)`. Access the element corresponding to the logical coordinate `Coord`.
* `.operator()(Coords...)`. Access the element corresponding to the logical coordinate `make_coord(Coords...)`.
CuTe `Tensor` provides a similar core of hierarchical operations as `Layout`.
* `rank<I...>(Tensor)`. The rank of the `I...`th mode of the `Tensor`.
* `depth<I...>(Tensor)`. The depth of the `I...`th mode of the `Tensor`.
* `shape<I...>(Tensor)`. The shape of the `I...`th mode of the `Tensor`.
* `size<I...>(Tensor)`. The size of the `I...`th mode of the `Tensor`.
* `layout<I...>(Tensor)`. The layout of the `I...`th mode of the `Tensor`.
* `tensor<I...>(Tensor)`. The subtensor corresponding to the the `I...`th mode of the `Tensor`.
## Tensor Engines
The `Engine` concept is a wrapper for an iterator or array of data.
It uses a stripped-down interface of `std::array` to present the iterator.
```c++
using iterator = // The iterator type
using value_type = // The iterator value-type
using reference = // The iterator reference-type
iterator begin() // The iterator
```
In general, users do not need to construct `Engine`s on their own. When a `Tensor` is constructed,
the appropriate engine -- often `ArrayEngine<T,N>`, `ViewEngine<Iter>`, or
`ConstViewEngine<Iter>` -- will be constructed.
### Tagged Iterators
Any random-access iterator can be used to construct a `Tensor`, but
users can also "tag" any iterator with a memory space --
e.g., to indicate this iterator is accessing global memory or shared memory.
This is done by calling `make_gmem_ptr(g)` or `make_gmem_ptr<T>(g)` to tag `g` as a global memory iterator,
and `make_smem_ptr(s)` or `make_smem_ptr<T>(s)` to tag `s` as a shared memory iterator.
Tagging memory makes it possible for CuTe's `Tensor` algorithms
to use the fastest implementation for the specific kind(s) of memory.
When calling very specific operations with `Tensor`s, it also allows those
operators to verify the tags against what is expected.
For example, some kinds of optimized copy operations require
the source of the copy to be global memory
and the destination of the copy to be shared memory.
Tagging makes it possible for CuTe to dispatch
to those copy operations and/or verify against those copy operations.
## Tensor Creation
`Tensor`s can be constructed as owning or nonowning.
"Owning" `Tensor`s behave like `std::array`.
When you copy the `Tensor`, you (deep-)copy its elements,
and the `Tensor`'s destructor deallocates the array of elements.
"Nonowning" `Tensor`'s behave like a (raw) pointer.
Copying the `Tensor` doesn't copy the elements,
and destroying the `Tensor` doesn't deallocate the array of elements.
This has implications for developers of generic `Tensor` algorithms.
For example, input `Tensor` parameters of a function
should be passed by referece or const reference,
because passing a `Tensor` by value
may or may not make a deep copy of the `Tensor`'s elements.
### Nonowning Tensors
A `Tensor` is usually a nonowning view of existing memory.
Nonowning `Tensor`s are created by calling `make_tensor`
with two arguments: a random-access iterator, and the `Layout` or arguments to construct a `Layout`.
Here are some examples of creating `Tensor`s
that are nonowning views of existing memory.
```cpp
float* A = ...;
// Untagged pointers
Tensor tensor_8 = make_tensor(A, make_layout(Int<8>{})); // Construct with Layout
Tensor tensor_8s = make_tensor(A, Int<8>{}); // Construct with Shape
Tensor tensor_8d2 = make_tensor(A, 8, 2); // Construct with Shape and Stride
// Global memory (static or dynamic layouts)
Tensor gmem_8s = make_tensor(make_gmem_ptr(A), Int<8>{});
Tensor gmem_8d = make_tensor(make_gmem_ptr(A), 8);
Tensor gmem_8sx16d = make_tensor(make_gmem_ptr(A), make_shape(Int<8>{},16));
Tensor gmem_8dx16s = make_tensor(make_gmem_ptr(A), make_shape ( 8 ,Int<16>{}),
make_stride(Int<16>{},Int< 1>{}));
// Shared memory (static or dynamic layouts)
Layout smem_layout = make_layout(make_shape(Int<4>{},Int<8>{}));
__shared__ float smem[decltype(cosize(smem_layout))::value]; // (static-only allocation)
Tensor smem_4x8_col = make_tensor(make_smem_ptr(smem), smem_layout);
Tensor smem_4x8_row = make_tensor(make_smem_ptr(smem), shape(smem_layout), LayoutRight{});
```
As shown, users wrap the pointer by identifying its memory space:
e.g., global memory (via `make_gmem_ptr` or `make_gmem_ptr<T>`) or shared memory (via `make_smem_ptr` or `make_smem_ptr<T>`).
`Tensor`s that view existing memory can have either static or dynamic `Layout`s.
Calling `print` on all of the above tensors displays
```
tensor_8 : ptr[32b](0x7f42efc00000) o _8:_1
tensor_8s : ptr[32b](0x7f42efc00000) o _8:_1
tensor_8d2 : ptr[32b](0x7f42efc00000) o 8:2
gmem_8s : gmem_ptr[32b](0x7f42efc00000) o _8:_1
gmem_8d : gmem_ptr[32b](0x7f42efc00000) o 8:_1
gmem_8sx16d : gmem_ptr[32b](0x7f42efc00000) o (_8,16):(_1,_8)
gmem_8dx16s : gmem_ptr[32b](0x7f42efc00000) o (8,_16):(_16,_1)
smem_4x8_col : smem_ptr[32b](0x7f4316000000) o (_4,_8):(_1,_4)
smem_4x8_row : smem_ptr[32b](0x7f4316000000) o (_4,_8):(_8,_1)
```
which displays the pointer type along with any memory space tags, the pointer's `value_type` width, the raw pointer address, and the associated `Layout`.
### Owning Tensors
A `Tensor` can also be an owning array of memory.
Owning `Tensor`s are created by calling `make_tensor<T>`,
where `T` is the type of each element of the array, and
a `Layout` or arguments to construct a `Layout`.
The array is allocated analogously to `std::array<T,N>` and, therefore, owning `Tensor`s must be constructed with a `Layout` that has static shapes and static strides.
CuTe does not perform dynamic memory allocation in `Tensor`s as it is not a common or performant operation within CUDA kernels.
Here are some examples of creating owning `Tensor`s.
```c++
// Register memory (static layouts only)
Tensor rmem_4x8_col = make_tensor<float>(Shape<_4,_8>{});
Tensor rmem_4x8_row = make_tensor<float>(Shape<_4,_8>{},
LayoutRight{});
Tensor rmem_4x8_pad = make_tensor<float>(Shape <_4, _8>{},
Stride<_32,_2>{});
Tensor rmem_4x8_like = make_tensor_like(rmem_4x8_pad);
```
The `make_tensor_like` function makes an owning Tensor of register memory with the same value type and shape as its input `Tensor` argument and attempts to use the same order of strides as well.
Calling `print` on each of the above tensors produces similar output
```
rmem_4x8_col : ptr[32b](0x7ff1c8fff820) o (_4,_8):(_1,_4)
rmem_4x8_row : ptr[32b](0x7ff1c8fff8a0) o (_4,_8):(_8,_1)
rmem_4x8_pad : ptr[32b](0x7ff1c8fff920) o (_4,_8):(_32,_2)
rmem_4x8_like : ptr[32b](0x7f4158fffc60) o (_4,_8):(_8,_1)
```
and we can see that each pointer address is unique indicating that each `Tensor` is a unique array-like allocation.
## Accessing a Tensor
Users can access the elements of a `Tensor` via `operator()` and `operator[]`,
which take `IntTuple`s of logical coordinates.
When users access a `Tensor`,
the `Tensor` uses its `Layout` to map the logical coordinate
to an offset that can be accessed by the iterator.
You can see this in `Tensor`'s implementation of `operator[]`.
```c++
template <class Coord>
decltype(auto) operator[](Coord const& coord) {
return data()[layout()(coord)];
}
```
For example, we can read and write to `Tensor`s using natural coordinates, using the variadic `operator()`, or the container-like `operator[]`.
```c++
Tensor A = make_tensor<float>(Shape <Shape < _4,_5>,Int<13>>{},
Stride<Stride<_12,_1>,_64>{});
float* b_ptr = ...;
Tensor B = make_tensor(b_ptr, make_shape(13, 20));
// Fill A via natural coordinates op[]
for (int m0 = 0; m0 < size<0,0>(A); ++m0)
for (int m1 = 0; m1 < size<0,1>(A); ++m1)
for (int n = 0; n < size<1>(A); ++n)
A[make_coord(make_coord(m0,m1),n)] = n + 2 * m0;
// Transpose A into B using variadic op()
for (int m = 0; m < size<0>(A); ++m)
for (int n = 0; n < size<1>(A); ++n)
B(n,m) = A(m,n);
// Copy B to A as if they are arrays
for (int i = 0; i < A.size(); ++i)
A[i] = B[i];
```
## Tiling a Tensor
Many of the [`Layout` algebra operations](https://github.com/NVIDIA/cutlass/blob/main/media/docs/cute/02_layout_algebra.md) can also be applied to `Tensor`.
```cpp
composition(Tensor, Tiler)
logical_divide(Tensor, Tiler)
zipped_divide(Tensor, Tiler)
tiled_divide(Tensor, Tiler)
flat_divide(Tensor, Tiler)
```
The above operations allows arbitrary subtensors to be "factored out" of `Tensor`s. This very commonly used in tiling for threadgroups, tiling for MMAs, and reodering tiles of data for threads.
Note that the `_product` operations are not implemented for `Tensor`s as those would
often produce layouts with increased codomain sizes, which means the `Tensor` would
require accessing elements unpredictably far outside its previous bounds. `Layout`s can be
used in products, but not `Tensor`s.
## Slicing a Tensor
Whereas accessing a `Tensor` with a coordinate will return an element of that tensor,
slicing a `Tensor` will return a subtensor of all the elements in the sliced mode(s).
Slices are performed through the same `operator()`
that are used for accessing an individual element.
Passing in `_` (the underscore character, an instance of the `cute::Underscore` type)
has the same effect as `:` (the colon character) in Fortran or Matlab:
retain that mode of the tensor as if no coordinate had been used.
Slicing a tensor performs two operations,
* the `Layout` is evaluated on the partial coordinate and the resulting offset is accumulated into the iterator -- the new iterator points to the start of the new tensor.
* the `Layout` modes cooresponding to `_`-elements of the coordinate are used to construct a new layout.
Together, the new iterator and the new layout construct the new tensor.
```cpp
// ((_3,2),(2,_5,_2)):((4,1),(_2,13,100))
Tensor A = make_tensor(ptr, make_shape (make_shape (Int<3>{},2), make_shape ( 2,Int<5>{},Int<2>{})),
make_stride(make_stride( 4,1), make_stride(Int<2>{}, 13, 100)));
// ((2,_5,_2)):((_2,13,100))
Tensor B = A(2,_);
// ((_3,_2)):((4,1))
Tensor C = A(_,5);
// (_3,2):(4,1)
Tensor D = A(make_coord(_,_),5);
// (_3,_5):(4,13)
Tensor E = A(make_coord(_,1),make_coord(0,_,1));
// (2,2,_2):(1,_2,100)
Tensor F = A(make_coord(2,_),make_coord(_,3,_));
```
<p align="center">
<img src="../../images/cute/slice.png" alt="slice.png" height="300"/>
</p>
In the image above, a `Tensor` is sliced in various ways and the subtensors generated by those slices are highlighted within the original tensor. Note that tensor `C` and `D` contain the same elements, but have different ranks and shapes due to the use of `_` versus the use of `make_coord(_,_)`. In each case, the rank of the result is equal to the number of `Underscore`s in the slicing coordinate.
## Partitioning a Tensor
To implement generic partitioning of a `Tensor`, we apply composition or tiling followed by a slicing. This can be performed in many ways, but we have found three ways that are particularly useful: inner-partitioning, outer-partitioning, and TV-layout-partitioning.
### Inner and outer partitioning
Let's take a tiled example and look at how we can slice it in useful ways.
```cpp
Tensor A = make_tensor(ptr, make_shape(8,24)); // (8,24)
auto tiler = Shape<_4,_8>{}; // (_4,_8)
Tensor tiled_a = zipped_divide(A, tiler); // ((_4,_8),(2,3))
```
Suppose that we want to give each threadgroup one of these 4x8 tiles of data. Then we can use our threadgroup coordinate to index into the second mode.
```cpp
Tensor cta_a = tiled_a(make_coord(_,_), make_coord(blockIdx.x, blockIdx.y)); // (_4,_8)
```
We call this an *inner-partition* because it keeps the inner "tile" mode. This pattern of applying a tiler and then slicing out that tile by indexing into the remainder mode is common and has been wrapped into its own function `inner_partition(Tensor, Tiler, Coord)`. You'll often see `local_tile(Tensor, Tiler, Coord)` which is just another name for `inner_partition`. The `local_tile` partitioner is very often applied at the threadgroup level to partition tensors into tiles across threadgroups.
Alternatively, suppose that we have 32 threads and want to give each thread one element of these 4x8 tiles of data. Then we can use our thread to index into the first mode.
```cpp
Tensor thr_a = tiled_a(threadIdx.x, make_coord(_,_)); // (2,3)
```
We call this an *outer-partition* because it keeps the outer "rest" mode. This pattern of applying a tiler and then slicing into that tile by indexing into the tile mode is common and has been wrapped into its own function `outer_partition(Tensor, Tiler, Coord)`. Sometimes you'll see `local_partition(Tensor, Layout, Idx)`, which is a rank-sensitive wrapper around `outer_partition` that transforms the `Idx` into a `Coord` using the inverse of the `Layout` and then constructs a `Tiler` with the same top-level shape of the `Layout`. This allows the user to ask for a row-major, column-major, or arbitrary layout of threads with a given shape that can be used to partition into a tensor.
To see how these partitioning patterns are used, see the [introductory GEMM tutorial](./0x_gemm_tutorial.md).
### Thread-Value partitioning
Another common partitioning strategy is called a thread-value partitioning. In this pattern, we construct a `Layout` that represents the mapping of all threads (or any parallel agent) and all values that each thread will receive to coordinates of the target data. With `composition` the target data layout is transformed according to our TV-layout and then we can simply slice into the thread-mode of the result with our thread index.
```cpp
// Construct a TV-layout that maps 8 thread indices and 4 value indices
// to 1D coordinates within a 4x8 tensor
// (T8,V4) -> (M4,N8)
auto tv_layout = Layout<Shape <Shape <_2,_4>,Shape <_2, _2>>,
Stride<Stride<_8,_1>,Stride<_4,_16>>>{}; // (8,4)
// Construct a 4x8 tensor with any layout
Tensor A = make_tensor<float>(Shape<_4,_8>{}, LayoutRight{}); // (4,8)
// Compose A with the tv_layout to transform its shape and order
Tensor tv = composition(A, tv_layout); // (8,4)
// Slice so each thread has 4 values in the shape and order that the tv_layout prescribes
Tensor v = tv(threadIdx.x, _); // (4)
```
<p align="center">
<img src="../../images/cute/tv_layout.png" alt="tv_layout.png" height="300"/>
</p>
The above image is a visual representation of the above code. An arbitrary 4x8 layout of data is composed with a specific 8x4 TV-layout that represents a partitioning pattern. The result of the composition is on the right where each threads' values are arranged across each row. The bottom layout depicts the inverse TV layout which shows the mapping of 4x8 logical coordinates to the thread id and value id they will be mapped to.
To see how these partitioning patterns are constructed and used, see the [tutorial on building MMA Traits](./0t_mma_atom.md).
## Examples
### Copy a subtile from global memory to registers
The following example copies rows of a matrix (with any `Layout`)
from global memory to register memory,
then executes some algorithm `do_something`
on the row that lives in register memory.
```c++
Tensor gmem = make_tensor(ptr, make_shape(Int<8>{}, 16)); // (_8,16)
Tensor rmem = make_tensor_like(gmem(_, 0)); // (_8)
for (int j = 0; j < size<1>(gmem); ++j) {
copy(gmem(_, j), rmem);
do_something(rmem);
}
```
This code does not need to know anything about the `Layout` of `gmem`
other than that it is rank-2 and that the first mode has a static size.
The following code checks both of those conditions at compile time.
```c++
CUTE_STATIC_ASSERT_V(rank(gmem) == Int<2>{});
CUTE_STATIC_ASSERT_V(is_static<decltype(shape<0>(gmem))>{});
```
Extending this example using the tiling utilities detailed in [the `Layout` algebra section](./02_layout_algebra.md), we can copy an arbitrary subtile of a tensor using almost the same code as above.
```c++
Tensor gmem = make_tensor(ptr, make_shape(24, 16)); // (24,16)
auto tiler = Shape<_8,_4>{}; // 8x4 tiler
//auto tiler = Tile<Layout<_8,_3>, Layout<_4,_2>>{}; // 8x4 tiler with stride-3 and stride-2
Tensor gmem_tiled = zipped_divide(gmem, tiler); // ((_8,_4),Rest)
Tensor rmem = make_tensor_like(gmem_tiled(_, 0)); // ((_8,_4))
for (int j = 0; j < size<1>(gmem_tiled); ++j) {
copy(gmem_tiled(_, j), rmem);
do_something(rmem);
}
```
This applies a statically shaped `Tiler` to the global memory `Tensor`, creates an register `Tensor` that is compatible with the shape of that tile, then loops through each tile to copy it into memory and `do_something`.
## Summary
* `Tensor` is defined as an `Engine` and a `Layout`.
* `Engine` is an iterator that can be offset and dereferenced.
* `Layout` defines the logical domain of the tensor and maps coordinates to offsets.
* Tile a `Tensor` using the same methods for tiling `Layout`s.
* Slice a `Tensor` to retrieve subtensors.
* Partitioning is tiling and/or composition followed by slicing.
|
cutlass/media/docs/cute/03_tensor.md/0
|
{
"file_path": "cutlass/media/docs/cute/03_tensor.md",
"repo_id": "cutlass",
"token_count": 6537
}
| 44 |

[README](../../README.md#documentation) > **Layouts and Tensors**
Note: This document talks about CUTLASS 2.x layout tag types.
CUTLASS 3.0 deprecates all legacy 2.x layout tags in favour of a single `cute::Layout<Shape, Stride>`
vocabulary type for all thread and data tensors. Please refer to the
[documentation for cute layouts](cute/01_layout.md) for more details about CUTLASS 3.0's definition of "layout".
# Layouts and Tensors
_Tensors_ are mathematical objects represented by a multidimensional array of numeric elements in memory.
These may define two dimensional matrices upon which classical linear algebra computations may be defined or
higher dimensional objects frequently used to structure data used by Deep Learning applications and frameworks.
This document describes design patterns used in CUTLASS to map logical index spaces onto memory (Layouts) and to
indirectly reference tensors in memory (TensorRef and TensorView objects).
As described, CUTLASS adheres to the following terminology which is consistent with the C++ Standard Library.
* *size* (scalar): number of elements in a tensor
* *capacity* (scalar): number of elements needed to represent tensor in memory (may be larger than _size_)
* *rank* (scalar): number of logical dimensions describing tensor
* *extent* (vector): size of each logical dimension in a tensor
## CUTLASS Layout Concept
CUTLASS Layouts are a systematic design pattern for the following:
* Mapping _logical_ index space to _physical_ offsets in memory
* Storing the dynamic state needed in the above computation
* Defining a type system for partial specialization of other CUTLASS components
_Concept:_ layouts satisfy the following concept.
```c++
/// CUTLASS Layout concept example
struct LayoutConcept {
/// Logical rank of tensor
static int const kRank;
/// Rank of stride vector
static int const kStrideRank;
/// Index type used for coordinates
struct Index;
/// Long index type used for offsets
struct LongIndex;
/// Logical coordinate - satisfies Coord<kRank, ..>
struct TensorCoord;
/// Stride object - satisfies Coord<kStrideRank, ..>
struct Stride
//
// Methods
//
/// Constructor
CUTLASS_HOST_DEVICE
LayoutConcept();
/// Ctor
CUTLASS_HOST_DEVICE
LayoutConcept(Stride stride);
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static LayoutConcept packed(TensorCoord const &extent);
/// Function call operator returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (row, column)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const;
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
TensorCoord inverse(LongIndex offset) const;
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const;
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride();
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const;
};
```
_Layout_ objects generalize leading dimensions of matrices typical in _BLAS_ implementations. For example, cuBLAS assumes
Fortran-style _column-major_ layouts of matrices and refers to this as the matrix's "leading dimension."
```c++
cublasGemmEx(
...
ptr_A, // pointer to first element of matrix A
lda, // leading dimension
...
);
```
This implies an element at coordinate (_row_, _column_) has offset `row + lda * column`.
This is equivalently represented by CUTLASS's `layout::ColumnMajor` type as follows.
```c++
layout::ColumnMajor layout(lda);
int offset = layout({row, column}); // returns row + lda * column
```
Other layout functions are possible such as row-major:
```c++
layout::RowMajor layout(lda);
int offset = layout({row, column}); // returns lda * row + column
```
In both cases, the _logical_ coordinate (_row_, _column_) is represented by the same object. This enables an algorithm to be
implemented as generic template, with locations within tensors always specified in logical space. _Layout_ objects map this to
physical offsets in memory.
The layout's `::packed()` static method may be used to construct a layout object given the extent of a densely packed tensor.
This method is needed when an algorithm must define a buffer of arbitrary layout.
Example:
```c++
typename ArbitraryLayout::TensorCoord extent = make_Coord(...);
typename ArbitraryLayout::TensorCoord coord;
ArbitraryLayout layout = ArbitraryLayout::packed(extent);
int offset = layout({coord});
```
The layout's `::capacity()` method computes the number of locations in memory needed to represent a tensor. This is
useful when allocating memory, as more storage may be needed than what is strictly necessary for a fully packed
tensor.
Example:
```c++
int lda = columns + padding;
MatrixCoord extent{rows, columns};
layout::RowMajor layout(lda);
auto capacity = layout.capacity(extent); // returns rows * (columns + padding)
```
## Accessing elements within a tensor
### TensorRef
`TensorRef<class T, class Layout>` is a structure containing both a pointer to the start of a
tensor and a layout object to access its elements. This is a convenient object which may be
passed to functions to limit an explosion of arguments when the number of stride elements is
numerous.
Example:
```c++
int4_t *ptr = ...;
int ldm = ...;
int row = ...;
int column = ...;
layout::ColumnMajor layout(ldm);
TensorRef<int4_t, layout::ColumnMajor> ref(ptr, layout);
int4_t x = ref.at({row, column}); // loads a 4-bit signed integer from the tensor
ref.at({row, column}) = x * 2_s4; // transforms this quantity and stores it back
```
### TensorView
Matrices and tensors used in linear algebra computations are invariably finite. `TensorView<class T, class Layout>` extends `TensorRef<>` by
adding an `extent` vector to describe the logical extent of the tensor or matrix.
Example:
```c++
int4_t *ptr = ...;
int ldm = ...;
MatrixCoord extent = ...;
int row = ...;
int column = ...;
layout::ColumnMajor layout(ldm);
TensorView<int4_t, layout::ColumnMajor> view(ptr, layout, extent);
MatrixCoord coord = {row, column};
if (view.contains(coord)) { // verify coordinate is in bounds before performing access
int4_t x = ref.at(coord);
ref.at({row, column}) = x * 2_s4;
}
```
A `TensorView<>` may be constructed from a `TensorRef<>` succinctly as follows:
```c++
layout::ColumnMajor layout(ldm);
TensorRef<int4_t, layout::ColumnMajor> ref(ptr, layout);
TensorView<int4_t, layout::ColumnMajor> view(ref, extent); // construct TensorView from TensorRef and extent
```
Note, computations avoid becoming overdetermined by accepting a single problem size component
and `TensorRef` objects for each of the operands whose extents are implied as a precondition of the operation. By avoiding
redundant storage of extent quantities, CUTLASS minimizes capacity utilization of precious resources such as constant memory.
This is consistent with BLAS conventions.
# Summary:
The design patterns described in this document form a hierarchy:
* `T *ptr;` is a pointer to a contiguous sequence of elements of type `T`
* `Layout layout;` is an object mapping an index space to a linear offset
* `TensorRef<T, Layout> ref(ptr, layout);` is an object pointing to an _unbounded_ tensor containing elements of type `T` and a layout of type `Layout`
* `TensorView<T, Layout> view(ref, extent);` is an object pointing to a _bounded_ tensor containing elements of type `T` and a layout of type `Layout`
# Appendix: Existing Layouts
This section enumerates several existing Layout types defined in CUTLASS.
Matrix layouts:
- `PitchLinear`: data layout defined by _contiguous_ and _strided_ dimensions. _contiguous_ refers to consecutive elements in memory, where as _strided_ refers to data separated by a uniform stride
-- Rank: 2
-- TensorCoord type: `PitchLinearCoord`
-- Shape type: `PitchLinearShape`
-- Stride rank: 1
- `ColumnMajor`: data layout defined by _rows_ and _columns_ dimensions. Can be mapped to `PitchLinear` by: (_contiguous_ = _rows_, _strided_ = _columns_)
-- Rank: 2
-- TensorCoord type: `MatrixCoord`
-- Shape type: `MatrixShape`
-- Stride rank: 1
- `RowMajor`: data layout defined by _rows_ and _columns_ dimensions. Can be mapped to `PitchLinear` by: (_contiguous_ = _columns_, _strided_ = _rows_)
-- Rank: 2
-- TensorCoord type: `MatrixCoord`
-- Shape type: `MatrixShape`
-- Stride rank: 1
- `ColumnMajorInterleaved<k>`: data layout defined by _rows_ and _columns_ dimensions. Data is packed into a 'column-major' arrangement of row vectors of fixed length.
-- Rank: 2
-- TensorCoord type: `MatrixCoord`
-- Shape type: `MatrixShape`
-- Stride rank: 1
- `RowMajorInterleaved<k>`: data layout defined by _rows_ and _columns_ dimensions. Data is packed into a 'row-major' arrangement of column vectors of fixed length.
-- Rank: 2
-- TensorCoord type: `MatrixCoord`
-- Shape type: `MatrixShape`
-- Stride rank: 1
Tensor layouts:
- `TensorNHWC`:
Permuted Shared Memory Layouts:
- `TensorOpCongruous<ElementSize>`
- `TensorOpCrosswise<ElementSize>`
# Copyright
Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
```
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
```
|
cutlass/media/docs/layout.md/0
|
{
"file_path": "cutlass/media/docs/layout.md",
"repo_id": "cutlass",
"token_count": 3206
}
| 45 |
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import ctypes
from typing import Union
from cuda import cuda
from cutlass_library import SubstituteTemplate
import numpy as np
from cutlass_library import (
ConvKindNames,
ConvKindTag,
DataTypeNames,
DataTypeSize,
DataTypeTag,
IteratorAlgorithmNames,
IteratorAlgorithmTag,
LayoutTag,
LayoutType,
MathOperation,
MathOperationTag,
OpcodeClass,
OpcodeClassNames,
OpcodeClassTag,
OperationKind,
ShortDataTypeNames,
ShortLayoutTypeNames,
SplitKMode,
StrideSupport,
StrideSupportTag,
SwizzlingFunctor,
SwizzlingFunctorTag,
get_complex_from_real,
)
from cutlass.backend.arguments import ArgumentBase
from cutlass.backend.c_types import dim3_, get_conv2d_arguments
from cutlass.backend.library import (
EmissionType,
TensorDescription,
TileDescription,
)
from cutlass.backend.memory_manager import device_mem_alloc
from cutlass.backend.operation import ExecutableOperation, LaunchConfiguration
from cutlass.backend.utils.device import to_device_ptr
from cutlass.shape import GemmCoord
class Conv2dArguments(ArgumentBase):
"""
Argument wrapper for Conv2d. It encodes problem information and
user-provide tensors into the kernel's argument.
:param operation: the Conv2d operation to take the argument
:type operation: :class:`cutlass.backend.Conv2dOperation`
:param problem_size: the Conv2d problem size
:type problem_size: :class:`cutlass.shape.Conv2dProblemSize`
:param A: tensor A
:type A: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param B: tensor B
:type B: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param C: tensor C
:type C: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param D: tensor D
:type D: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param split_k_mode: conv2d split K mode, defaults to cutlass_library.library.SplitKMode.Serial
:type split_k_mode: cutlass_library.library.SplitKMode, optional
:param output_op: output operator, optional
:type output_op: :class:`cutlass.backend.LinearCombinationFunctorArguments`
:param stream: cuda stream, defaults to cuda.cuda.CUstream(0)
:type stream: :class:`cuda.cuda.CUstream`
"""
def __init__(self, operation, problem_size, A, B, C, D,
split_k_mode=SplitKMode.Serial, **kwargs, ) -> None:
self.operation = operation
self.conv_kind = operation.conv_kind
self.layout_A = operation.A.layout
self.layout_B = operation.B.layout
self.layout_C = operation.C.layout
self.element_A = operation.A.element
self.element_B = operation.B.element
self.element_C = operation.C.element
if self.layout_C == LayoutType.TensorNC32HW32:
raise Exception("Layout type TensorNC32HW32 is not currently supported")
super().__init__(A, B, C, D, **kwargs)
if "split_k_slices" in kwargs.keys() and kwargs["split_k_slices"] > 1:
self.split_k_mode = split_k_mode
self.split_k_slices = kwargs["split_k_slices"]
else:
self.split_k_mode = SplitKMode.Serial
self.split_k_slices = 1
if "output_op" in kwargs.keys() and self.split_k_mode != SplitKMode.Parallel:
self.output_op = kwargs["output_op"]
else:
self.output_op = self.operation.epilogue_type(1.0, 0.0)
self.problem_size = problem_size
self.problem_size.split_k_slices = self.split_k_slices
self.initialize()
def get_arguments(self):
tc_numel = -1
if hasattr(self, "tensor_c_numel"):
tc_numel = self.tensor_c_numel
self.c_arguments = self.operation.argument_type(
int(self.conv_kind),
self.problem_size.ctype,
int(to_device_ptr(self.ptr_A)),
int(to_device_ptr(self.ptr_B)),
int(to_device_ptr(self.ptr_C)),
int(to_device_ptr(self.ptr_D)),
tc_numel,
self.output_op,
int(self.split_k_mode)
)
def initialize(self):
self.launch_config = self.operation.rt_module.plan(self)
self.get_arguments()
# Allocate and initialize device workspace
device_workspace_size = self.operation.rt_module.get_workspace_size(self.c_arguments)
if device_workspace_size > 0:
self.workspace_buffer = device_mem_alloc(device_workspace_size)
workspace_ptr = self.workspace_buffer.ptr
err, = cuda.cuMemsetD32(
workspace_ptr, 0, device_workspace_size // 4)
else:
workspace_ptr = None
self.semaphore = 0
if workspace_ptr is not None and self.split_k_mode == SplitKMode.Parallel:
self.ptr_D = workspace_ptr
# Reset arguments now that ptr_D has been updated
self.get_arguments()
elif workspace_ptr is not None and self.split_k_mode == SplitKMode.Serial:
self.semaphore = workspace_ptr
params_ = self.operation.rt_module.get_args(
self.c_arguments, ctypes.c_void_p(int(self.semaphore)))
self.host_workspace = bytearray(params_.contents)
self.device_workspace = None
def sync(self):
"""
Synchronize the arguments. If the input tensor is in host,
copy it from device to host.
"""
return super().sync()
class Conv2dRT(ExecutableOperation):
"""
Conv2dRT manages the CUTLASS runtime components
"""
KernelTemplate = r"""
extern "C"
__global__ void
${operation_name}(${operation_name}${operation_suffix}::Params params) {
// Dynamic shared memory base pointer
extern __shared__ int SharedStorageBase[];
// Declare pointer to dynamic shared memory.
${operation_name}${operation_suffix}::SharedStorage *shared_storage =
reinterpret_cast<${operation_name}${operation_suffix}::SharedStorage *>(SharedStorageBase);
${operation_name}${operation_suffix} op;
op(params, *shared_storage);
}
"""
HostTemplate = r"""
extern "C" {
// Get the size of params in bytes
int ${operation_name}_get_param_size(){
return sizeof(${operation_name}${operation_suffix}::Params);
}
// Get the size of dynamic shared memory in bytes
int ${operation_name}_shared_memory_size() {
return int(sizeof(${operation_name}${operation_suffix}::SharedStorage));
}
using ElementA = typename ${operation_name}_base::ElementA;
using ElementB = typename ${operation_name}_base::ElementB;
using ElementC = typename ${operation_name}_base::ElementC;
using LayoutA = typename ${operation_name}_base::LayoutA;
using LayoutB = typename ${operation_name}_base::LayoutB;
using LayoutC = typename ${operation_name}_base::LayoutC;
using EpilogueOutputOp = typename ${operation_name}_base::EpilogueOutputOp;
struct ${operation_name}_TemporaryArgs {
int conv_kind;
cutlass::conv::Conv2dProblemSize problem_size;
ElementA* ptr_A;
ElementB* ptr_B;
ElementC* ptr_C;
ElementC* ptr_D;
int tensor_c_numel;
typename EpilogueOutputOp::Params epilogue_params;
int split_k_mode;
};
typename ${operation_name}${operation_suffix}::Arguments
construct_arguments(${operation_name}_TemporaryArgs args) {
cutlass::conv::Operator conv_operator = static_cast<cutlass::conv::Operator>(args.conv_kind);
auto tc_A = cutlass::conv::implicit_gemm_tensor_a_extent(conv_operator, args.problem_size);
auto tc_B = cutlass::conv::implicit_gemm_tensor_b_extent(conv_operator, args.problem_size);
auto tc_C = cutlass::conv::implicit_gemm_tensor_c_extent(conv_operator, args.problem_size);
auto tc_D = cutlass::conv::implicit_gemm_tensor_c_extent(conv_operator, args.problem_size);
auto size_C = tc_C.at(0) * tc_C.at(1) * tc_C.at(2) * tc_C.at(3);
if (args.tensor_c_numel >= 0 && args.tensor_c_numel == tc_C.at(3) && args.tensor_c_numel < size_C) {
// C is interpreted as bias
tc_C = {0, 0, 0, 0};
}
cutlass::TensorRef<ElementA, LayoutA> tref_A(args.ptr_A, LayoutA::packed(tc_A));
cutlass::TensorRef<ElementB, LayoutA> tref_B(args.ptr_B, LayoutB::packed(tc_B));
cutlass::TensorRef<ElementC, LayoutA> tref_C(args.ptr_C, LayoutC::packed(tc_C));
cutlass::TensorRef<ElementC, LayoutA> tref_D(args.ptr_D, LayoutC::packed(tc_D));
return {
args.problem_size,
tref_A,
tref_B,
tref_C,
tref_D,
args.epilogue_params,
static_cast<cutlass::conv::SplitKMode>(args.split_k_mode)
};
}
// Get the params as byte array
char* ${operation_name}_get_params(${operation_name}_TemporaryArgs args, int *semaphore=nullptr) {
auto arguments = construct_arguments(args);
typename ${operation_name}${operation_suffix}::Params* params;
params = new ${operation_name}${operation_suffix}::Params(arguments, semaphore);
char *bytes = ((char*)(params));
char *output = new char[sizeof(${operation_name}${operation_suffix}::Params)];
for (unsigned int i = 0; i < sizeof(${operation_name}${operation_suffix}::Params); i ++)
output[i] = bytes[i];
return output;
}
dim3 ${operation_name}_get_grid_shape(
int conv_kind,
cutlass::conv::Conv2dProblemSize problem_size,
cutlass::gemm::GemmCoord tile_size,
int split_k_slices
) {
using Swizzle = typename ${operation_name}_base::ThreadblockSwizzle;
auto tiled_shape = Swizzle::get_tiled_shape(
static_cast<cutlass::conv::Operator>(conv_kind),
problem_size,
tile_size,
split_k_slices);
return Swizzle::get_grid_shape(tiled_shape);
}
size_t ${operation_name}_get_workspace_size(${operation_name}_TemporaryArgs args) {
auto arguments = construct_arguments(args);
// Temporarily define device::-level Conv2d so that we can call get_workspace_size
using DeviceConv = cutlass::conv::device::ImplicitGemmConvolution<${operation_name}_base>;
return DeviceConv::get_workspace_size(arguments);
}
}
"""
def __init__(self, operation: "Conv2dOperation"):
super().__init__(operation)
self.extra_funcs = {
"get_grid_shape": dim3_,
"get_workspace_size": ctypes.c_uint64
}
self.argument_type, self.epilogue_type = get_conv2d_arguments(operation.epilogue_functor)
self.argtype = [ctypes.POINTER(self.argument_type), ctypes.c_void_p]
self.conv_kind = operation.conv_kind
self.operation: Conv2dOperation = operation
self.emitter = EmitConv2dInstance("_type")
self.threads = operation.tile_description.num_threads
self.swizzle_functor = operation.swizzling_functor
def emit(self):
return self.emitter.emit(self.operation)
def plan(self, arguments: Conv2dArguments):
tile_size = GemmCoord(
self.operation.tile_description.threadblock_shape[0],
self.operation.tile_description.threadblock_shape[1],
self.operation.tile_description.threadblock_shape[2],
)
grid = self.get_grid_shape(
int(self.conv_kind),
arguments.problem_size.ctype,
tile_size.ctype,
arguments.split_k_slices
)
return LaunchConfiguration(
[grid.x, grid.y, grid.z], [self.threads, 1, 1],
self.shared_memory_capacity)
def initialize(self):
err, = cuda.cuFuncSetAttribute(
self.kernel,
attrib=cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES,
value=self.shared_memory_capacity)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError(f"CUDA Error: {err}")
class Conv2dOperation:
"""
CUTLASS Conv2d operation description.
:param conv_kind: convolution operator
:type conv_kind: :class:`cutlass_library.library.ConvKind`
:param iterator_algorithm: Selects among several implementation
variants trading off performance with simplicity
:type iterator_algorithm: :class:`cutlass_library.library.IteratorAlgorithm`
:param arch: GPU compute capability (sm_xx)
:type arch: int
:param tile_description: tile description
:type tile_description: :class:`cutlass.backend.TileDescription`
:param A: tensor A description
:type A: :class:`cutlass.backend.TensorDescription`
:param B: tensor B description
:type B: :class:`cutlass.backend.TensorDescription`
:param C: tensor C description
:type C: :class:`cutlass.backend.TensorDescription`
:param D: tensor D description
:type D: :class:`cutlass.backend.TensorDescription`
:param element_epilogue: element type for computation in epilogue \
:type element_epilogue: cutlass_library.library.DataType
:param stride_support: distinguish among partial specializations that \
accelerate certain problems where convolution stride is unit \
:type stride_support: :class:`cutlass_library.library.StrideSupport`
:param epilogue_functor: convolution epilogue functor
:type epilogue_functor: :class:`EpilogueFunctor`
:param swizzling_functor: threadblock swizzling functor
"""
def __init__(
self,
conv_kind,
iterator_algorithm,
arch: int,
tile_description: TileDescription,
A: TensorDescription,
B: TensorDescription,
C: TensorDescription,
stride_support,
epilogue_functor,
swizzling_functor=SwizzlingFunctor.Identity1,
emission_type=EmissionType.Kernel,
**kwargs
):
self.operation_kind: OperationKind = OperationKind.Conv2d
self.arch: int = arch
self.tile_description: TileDescription = tile_description
self.conv_kind = conv_kind
self.A: TensorDescription = A
self.B: TensorDescription = B
self.C: TensorDescription = C
self.epilogue_functor = epilogue_functor
self.iterator_algorithm = iterator_algorithm
self.stride_support = stride_support
self.swizzling_functor = swizzling_functor
self.emission_type = emission_type
self.rt_module: Conv2dRT = Conv2dRT(self)
self.argument_type = self.rt_module.argument_type
self.epilogue_type = self.rt_module.epilogue_type
def run(self, arguments: Conv2dArguments) -> cuda.CUresult:
"""
Launch the cuda kernel with input arguments
:param arguments: conv2d arguments
:type arguments: :class:`cutlass.backend.Conv2dArguments`
"""
# launch the kernel
err = self.rt_module.run(
arguments.host_workspace,
arguments.device_workspace,
arguments.launch_config,
arguments.stream
)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError(f"CUDA Error {err}")
return err
#
# Get function name
#
def procedural_name(self):
"""The full procedural name indicates architecture, extended name, tile size, and layout."""
return self.configuration_name()
def configuration_name(self):
"""The full procedural name indicates architecture, extended name, tile size, and layout."""
opcode_class_name = OpcodeClassNames[
self.tile_description.math_instruction.opcode_class
]
threadblock = "%dx%d_%dx%d" % (
self.tile_description.threadblock_shape[0],
self.tile_description.threadblock_shape[1],
self.tile_description.threadblock_shape[2],
self.tile_description.stages,
)
if self.stride_support == StrideSupport.Unity:
configuration_name = "cutlass_sm${arch}_${opcode_class}_${extended_name}_${threadblock}_${layout}_unity_stride_align${alignment}"
else:
configuration_name = "cutlass_sm${arch}_${opcode_class}_${extended_name}_${threadblock}_${layout}_align${alignment}"
return SubstituteTemplate(
configuration_name,
{
"arch": str(self.arch),
"opcode_class": opcode_class_name,
"extended_name": self.extended_name(),
"threadblock": threadblock,
"layout": self.layout_name(),
"alignment": "%d" % self.A.alignment
},
)
def extended_name(self):
"""Append data types if they differ from compute type."""
if self.C.element != self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${element_c}_${core_name}_${element_a}"
elif self.C.element == self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${core_name}_${element_a}"
else:
extended_name = "${core_name}"
extended_name = SubstituteTemplate(extended_name, {
"element_a": DataTypeNames[self.A.element],
"element_c": DataTypeNames[self.C.element],
"core_name": self.core_name(),
})
return extended_name
def layout_name(self):
return "%s" % (ShortLayoutTypeNames[self.A.layout])
def core_name(self):
"""The basic operation kind is prefixed with a letter indicating the accumulation type."""
intermediate_type = ""
if self.tile_description.math_instruction.opcode_class == OpcodeClass.TensorOp:
inst_shape = "%dx%dx%d" % tuple(
self.tile_description.math_instruction.instruction_shape)
if self.tile_description.math_instruction.element_a != self.A.element and \
self.tile_description.math_instruction.element_a != self.accumulator_type():
intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a]
else:
inst_shape = ""
return "%s%s%s%s_%s" % (
ShortDataTypeNames[self.accumulator_type()],
inst_shape,
intermediate_type,
ConvKindNames[self.conv_kind],
IteratorAlgorithmNames[self.iterator_algorithm]
)
def is_complex(self):
complex_operators = [
MathOperation.multiply_add_complex,
MathOperation.multiply_add_complex_gaussian,
]
return self.tile_description.math_instruction.math_operation in complex_operators
def accumulator_type(self):
accum = self.tile_description.math_instruction.element_accumulator
if self.is_complex():
return get_complex_from_real(accum)
return accum
def device_op(self):
"""
Returns a new Conv2dOperation object that is constructed with emission type
``EmissionType.Device``.
:return: operation ready for device-level code emission
:rtype: Conv2dOperation
"""
return Conv2dOperation(
self.conv_kind, self.iterator_algorithm, self.arch, self.tile_description,
self.A, self.B, self.C, self.stride_support, self.epilogue_functor, self.swizzling_functor,
emission_type=EmissionType.Device)
###################################################################################################
#
# Emits single instances of a CUTLASS device-wide operator
#
###################################################################################################
class EmitConv2dInstance:
def __init__(self, operation_suffix=""):
self.operation_suffix = operation_suffix
self.includes = [
"cutlass/cutlass.h",
"cutlass/conv/kernel/default_conv2d_fprop.h",
"cutlass/conv/kernel/default_conv2d_dgrad.h",
"cutlass/conv/kernel/default_conv2d_wgrad.h",
"cutlass/conv/device/implicit_gemm_convolution.h"
]
self.template = """
// Conv2d${conv_kind_name} ${iterator_algorithm_name} kernel instance "${operation_name}"
using ${operation_name}_base =
typename cutlass::conv::kernel::DefaultConv2d${conv_kind_name}<
${element_a},
${layout_a},
${element_b},
${layout_b},
${element_c},
${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k} >,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor},
${swizzling_functor},
${stages},
${math_operator},
${iterator_algorithm},
${stride_support},
${align_a},
${align_b}
>::Kernel;
struct ${operation_name}${operation_suffix}:
public ${operation_name}_base { };
"""
self.template_device = """
// Conv2d operation ${operation_name}
using Conv2d${conv_kind_name}Kernel = typename cutlass::conv::kernel::DefaultConv2d${conv_kind_name}<
${element_a},
${layout_a},
${element_b},
${layout_b},
${element_c},
${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k} >,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor},
${swizzling_functor},
${stages},
${math_operator},
${iterator_algorithm},
${stride_support},
${align_a},
${align_b}
>::Kernel;
using DeviceKernel =
typename cutlass::conv::device::ImplicitGemmConvolution<Conv2d${conv_kind_name}Kernel>;
"""
def emit(self, operation):
warp_shape = [int(operation.tile_description.threadblock_shape[idx] /
operation.tile_description.warp_count[idx]) for idx in range(3)]
epilogue_vector_length = int(min(
operation.C.alignment * DataTypeSize[operation.C.element], 128) / DataTypeSize[operation.C.element])
values = {
"operation_name": operation.procedural_name(),
"operation_suffix": self.operation_suffix,
"conv_kind": ConvKindTag[operation.conv_kind],
"conv_kind_name": ConvKindNames[operation.conv_kind].capitalize(),
"element_a": DataTypeTag[operation.A.element],
"layout_a": LayoutTag[operation.A.layout],
"element_b": DataTypeTag[operation.B.element],
"layout_b": LayoutTag[operation.B.layout],
"element_c": DataTypeTag[operation.C.element],
"layout_c": LayoutTag[operation.C.layout],
"element_accumulator": DataTypeTag[operation.accumulator_type()],
"opcode_class": OpcodeClassTag[operation.tile_description.math_instruction.opcode_class],
"arch": "cutlass::arch::Sm%d" % operation.arch,
"threadblock_shape_m": str(operation.tile_description.threadblock_shape[0]),
"threadblock_shape_n": str(operation.tile_description.threadblock_shape[1]),
"threadblock_shape_k": str(operation.tile_description.threadblock_shape[2]),
"warp_shape_m": str(warp_shape[0]),
"warp_shape_n": str(warp_shape[1]),
"warp_shape_k": str(warp_shape[2]),
"instruction_shape_m": str(operation.tile_description.math_instruction.instruction_shape[0]),
"instruction_shape_n": str(operation.tile_description.math_instruction.instruction_shape[1]),
"instruction_shape_k": str(operation.tile_description.math_instruction.instruction_shape[2]),
"epilogue_vector_length": str(epilogue_vector_length),
"epilogue_functor": operation.epilogue_functor.emit(),
"swizzling_functor": SwizzlingFunctorTag[operation.swizzling_functor],
"stages": str(operation.tile_description.stages),
"iterator_algorithm": IteratorAlgorithmTag[operation.iterator_algorithm],
"iterator_algorithm_name": IteratorAlgorithmNames[operation.iterator_algorithm].capitalize(),
"stride_support": StrideSupportTag[operation.stride_support],
"math_operator": "cutlass::arch::OpMultiplyAddComplex" if operation.is_complex() else MathOperationTag[operation.tile_description.math_instruction.math_operation],
"align_a": str(operation.A.alignment),
"align_b": str(operation.B.alignment),
}
if operation.emission_type == EmissionType.Kernel:
conv2d_template = self.template
else:
conv2d_template = self.template_device
return SubstituteTemplate(conv2d_template, values)
|
cutlass/python/cutlass/backend/conv2d_operation.py/0
|
{
"file_path": "cutlass/python/cutlass/backend/conv2d_operation.py",
"repo_id": "cutlass",
"token_count": 10771
}
| 46 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Layout algebras
"""
from pycute import Layout, composition, make_layout, flatten, product
def _infer_split(old_shape, new_shape):
old_shape = _tuple_to_list(old_shape)
new_shape = _tuple_to_list(new_shape)
if len(old_shape) == 0 and len(new_shape) == 0:
return []
if len(old_shape) == 0:
if product(tuple(new_shape)) != 1:
raise ValueError("Invalid reshape size")
else:
return new_shape
if len(new_shape) == 0:
if product(tuple(old_shape)) != 1:
raise ValueError("Invalid reshape size")
else:
return old_shape
# This is done recursively by only process the last dimension at each time
old_dim = old_shape[-1]
new_dim = new_shape[-1]
# Exact match
if old_dim == new_dim:
return _infer_split(old_shape[:-1], new_shape[:-1]) + [new_dim,]
# Needs split
if old_dim > new_dim and old_dim % new_dim == 0:
residual = old_dim // new_dim
return _infer_split(old_shape[:-1] + [residual,], new_shape[:-1]) + [new_dim,]
# Needs merge
if old_dim < new_dim and new_dim % old_dim == 0:
residual = new_dim // old_dim
return _infer_split(old_shape[:-1], new_shape[:-1] + [residual,]) + [old_dim,]
raise NotImplementedError(f"Unsupported split: {old_shape} -> {new_shape}")
def _infer_merge(flatten_shape, shape):
flatten_shape = _tuple_to_list(flatten_shape)
shape = _tuple_to_list(shape)
idx_flat = 0
merged_shape = []
for dim in shape:
# Exact match
if dim == flatten_shape[idx_flat]:
merged_shape.append(dim)
idx_flat += 1
# Need group
elif dim > flatten_shape[idx_flat] and dim % flatten_shape[idx_flat] == 0:
residual = dim
group = []
while(residual > 1):
group.append(flatten_shape[idx_flat])
residual = residual // flatten_shape[idx_flat]
idx_flat += 1
merged_shape.append(group)
else:
raise NotImplementedError(f"Unsupported merge: {flatten_shape} -> {shape}")
return merged_shape
def _list_to_tuple(nested_list):
if isinstance(nested_list, list) or isinstance(nested_list, tuple):
return tuple(_list_to_tuple(item) for item in nested_list)
return nested_list
def _tuple_to_list(nested_tuple):
if isinstance(nested_tuple, list) or isinstance(nested_tuple, tuple):
return list(_tuple_to_list(item) for item in nested_tuple)
return nested_tuple
def _reverse_tuple(nested_tuple: tuple):
if isinstance(nested_tuple, tuple):
return tuple([_reverse_tuple(item) for item in nested_tuple][::-1])
return nested_tuple
def _get_first_lhs_nonzero_stride(stride_list, idx):
for i in reversed(range(idx)):
if stride_list[i] != 0:
return i
else:
return None
def _get_first_rhs_nonzero_stride(stride_list, idx):
for i in range(idx+1, len(stride_list)):
if stride_list[i] != 0:
return i
else:
return None
def reshape(layout, new_shape):
"""
General reshape of input layout.
It takes two steps:
1. split the dimensions of the old layout
2. merge the splitted dimensions according to the new shape
"""
#
# Step 1: Split the dimensions of the old layout
#
# 1.1 Flat old and new shape
old_flatten_shape = list(flatten(layout.shape))
new_flatten_shape = list(flatten(new_shape))
# 1.2 Infer the flatten splitted shape
splitted_flatten_shape = _infer_split(old_flatten_shape, new_flatten_shape)
# 1.3 Unflat the splitted shape based on the old shape
splited_shape = _infer_merge(splitted_flatten_shape, old_flatten_shape)
# 1.4 Infer the type of each split
# If the split type is in row-major (R), the dimension list is reversed because
# the cute::composition only support column-major split
split_type = [] # the type of each split (ColumnMajor or RowMajor)
permuted_splitted_shape = []
old_flatten_stride = list(flatten(layout.stride))
for idx, dim in enumerate(splited_shape):
if not isinstance(dim, list):
permuted_splitted_shape.append(dim)
split_type.append("C")
else:
lhs_stride = _get_first_lhs_nonzero_stride(old_flatten_stride, idx)
rhs_stride = _get_first_rhs_nonzero_stride(old_flatten_stride, idx)
# Special case for single tuple
# Use column-major by default
if lhs_stride is None and rhs_stride is None:
permuted_splitted_shape.append(dim)
split_type.append("C")
else:
if lhs_stride is not None and rhs_stride is not None:
# We consider shape[idx]:stride[idx]
# Case 1: stride[idx - 1] <= stride[idx] <= stride[idx + 1]: column major
if lhs_stride <= old_flatten_stride[idx] and old_flatten_stride[idx] <= rhs_stride:
permuted_splitted_shape.append(dim)
split_type.append("C")
# Case 2: stride[idx - 1] > stride[idx] > stride[idx + 1]: row major
elif lhs_stride > old_flatten_stride[idx] and old_flatten_stride[idx] > rhs_stride:
permuted_splitted_shape.append([d for d in reversed(dim)])
split_type.append("R")
# Case 3: stride[idx - 1] <= stride[idx] > stride[idx + 1]: concave
elif lhs_stride <= old_flatten_stride[idx] and old_flatten_stride[idx] > rhs_stride:
if lhs_stride >= rhs_stride:
permuted_splitted_shape.append(dim)
split_type.append("C")
else:
permuted_splitted_shape.append([d for d in reversed(dim)])
split_type.append("R")
# Case 4: stride[idx - 1] > stride[idx] <= stride[idx + 1]: concave
elif lhs_stride > old_flatten_stride[idx] and old_flatten_stride[idx] <= rhs_stride:
if lhs_stride >= rhs_stride:
permuted_splitted_shape.append(dim)
split_type.append("C")
else:
permuted_splitted_shape.append([d for d in reversed(dim)])
split_type.append("R")
else:
raise NotImplementedError()
elif lhs_stride is None:
# Case 1: dim's stride < dim+1's stride, expand in column major
if old_flatten_stride[idx] > rhs_stride:
permuted_splitted_shape.append([d for d in reversed(dim)])
split_type.append("R")
else:
permuted_splitted_shape.append(dim)
split_type.append("C")
else:
# Case 1: dim's stride > dim-1's stride
if old_flatten_stride[idx] < lhs_stride:
permuted_splitted_shape.append([d for d in reversed(dim)])
split_type.append("R")
else:
permuted_splitted_shape.append(dim)
split_type.append("C")
# 1.4 Generate the splitted layout
permuted_splitted_layout = composition(layout, Layout(_list_to_tuple(permuted_splitted_shape)))
# 1.5 Reverse the permutation in 1.4 before merge
splitted_shape = []
splitted_stride = []
for shape_dim, stride_dim, type in zip(
permuted_splitted_layout.shape,
permuted_splitted_layout.stride,
split_type):
if type == "C":
splitted_shape.append(shape_dim)
splitted_stride.append(stride_dim)
else:
splitted_shape.append(tuple([d for d in reversed(shape_dim)]))
splitted_stride.append(tuple([d for d in reversed(stride_dim)]))
splitted_layout = Layout(tuple(splitted_shape), tuple(splitted_stride))
#
# Step 2: Merge the splitted dimensions according to the new shape
#
# 2.1 Merge layout
merged_layout = composition(splitted_layout, Layout(new_shape))
# 2.2 Cleaning up
output_layout = composition(merged_layout, Layout(new_shape))
return output_layout
def permutation(layout, permutation):
"""
Permute the layout
"""
new_shape = tuple([layout.shape[idx] for idx in permutation])
new_stride = tuple([layout.stride[idx] for idx in permutation])
return Layout(new_shape, new_stride)
def _broadcast(layout, new_shape):
if len(layout) == 1 and isinstance(new_shape, int):
old_dim = layout.shape
old_stride = layout.stride
new_dim = new_shape
if old_dim == new_dim:
return Layout(old_dim, old_stride)
elif old_dim == 1:
return Layout(new_dim, 0)
else:
raise NotImplementedError(f"Invalid Broadcast: {old_dim} -> {new_dim}")
# Align the dimensions
old_shape = layout.shape
if isinstance(old_shape, int):
old_shape = (old_shape,)
sub_layouts = [layout,]
else:
sub_layouts = [sub_layout for sub_layout in layout]
rhs_broadcast_layouts = [Layout(1, 0)] * (len(new_shape) - len(old_shape))
# Get the broadcasted layout
broadcast_layouts = []
try:
layout = make_layout(*sub_layouts, *rhs_broadcast_layouts)
broadcast_layouts = []
for idx, sub_layout in enumerate(layout):
broadcast_layouts.append(_broadcast(sub_layout, new_shape[idx]))
except NotImplementedError:
layout = make_layout(*rhs_broadcast_layouts, *sub_layouts)
for idx, sub_layout in enumerate(layout):
broadcast_layouts.append(_broadcast(sub_layout, new_shape[idx]))
return make_layout(*broadcast_layouts)
def broadcast(layout, new_shape):
"""
Broadcast the new layout based on the input shape
The broadcasted shape equals to the new shape
The stride of broadcasted dimensions are 0
"""
return _broadcast(layout, new_shape)
def debroadcast(layout, dims):
"""
Squeeze the 0-stride
"""
for dim in dims:
if layout.stride[dim] != 0:
raise ValueError(f"Dim{dim} cannot be debroadcasted as it has stride {layout.stride[dim]}")
new_shape = tuple([s for idx, s in enumerate(layout.shape) if idx not in dims])
new_stride = tuple([s for idx, s in enumerate(layout.stride) if idx not in dims])
return Layout(new_shape, new_stride)
def canonicalization_(shapes, strides):
if isinstance(shapes, tuple):
c_shapes = []
c_strides = []
for shape, stride in zip(shapes, strides):
c_shape, c_stride = canonicalization_(shape, stride)
c_shapes.append(c_shape)
c_strides.append(c_stride)
return tuple(c_shapes), tuple(c_strides)
else:
if shapes == 1:
return 1, 0
else:
return shapes, strides
def canonicalization(layout):
"""
Canonicalize the input layout
1. set the stride of shape "1" to 0
"""
new_shape, new_stride = canonicalization_(layout.shape, layout.stride)
return Layout(new_shape, new_stride)
|
cutlass/python/cutlass/backend/evt/ir/layout_algorithm.py/0
|
{
"file_path": "cutlass/python/cutlass/backend/evt/ir/layout_algorithm.py",
"repo_id": "cutlass",
"token_count": 5880
}
| 47 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Registry of elementwise epilogues
Elementwise epilogues can be added to many CUTLASS kernels in the CUTLAS Python interface via
code like the following for GEMM:
.. highlight:: python
.. code-block:: python
plan = cutlass.op.Gemm(element=cutlass.DataType.f32, layout=cutlass.LayoutType.RowMajor)
plan.activation = cutlass.epilogue.relu
"""
from cutlass.backend import epilogue
gelu = epilogue.gelu
hardswish = epilogue.hardswish
identity = epilogue.identity
leaky_relu = epilogue.leaky_relu
relu = epilogue.relu
sigmoid = epilogue.sigmoid
silu = epilogue.silu
tanh = epilogue.tanh
_activations = [gelu, hardswish, identity, leaky_relu, relu, sigmoid, silu, tanh]
def get_activations() -> list:
"""
Returns a list of available activation functions
:return: list of available activation functions
:rtype: list
"""
return _activations
def get_activation_epilogue(
activation,
element_output,
elements_per_access,
element_accumulator,
element_compute,
):
"""
Return an epilogue corresponding to the activation function, data types, and alignment
used in the kernel
:param activation: elementwise activation function to use
:param element_output: data type of the output
:param elements_per_access: alignment of operand C of the kernel
:type elements_per_access: int
:param element_accumulator: data type of the accumulated output C
:param element_compute: data type in which compute operations should be performed
:return: epilogue functor
"""
if activation not in _activations:
raise Exception(
f"Unsupported activation type {activation}. Available activations are: {_activations}"
)
if activation == identity:
return epilogue.LinearCombination(
element_output, elements_per_access, element_accumulator, element_compute
)
else:
return epilogue.LinearCombinationGeneric(
activation,
element_output,
elements_per_access,
element_accumulator,
element_compute,
)
"""
Frontend for EVT that generates epilogue functor through tracing the input function
"""
from cutlass.backend.evt.frontend import PythonASTFrontend
def trace(fn, example_tensors, **kwargs):
"""
Trace `fn(**example_tensors)` and generates epilogue visitor
:param fn: Python callables
:param example_tensors: example inputs for fn
:type example_tensors: dict
.. hightlight:: python
.. code-block:: python
import cutlass.backend.evt
# Define epilogue function as Python callable
def example_fn(accum, C, alpha, beta, gamma):
D = ((accum + C) * alpha - gamma) / beta
return D
# Define the example tensors
example_inputs = {
"accum": torch.empty(size=(6, 512, 512), dtype=torch.float16, device="cuda"),
"C": torch.empty(size=(6, 512, 512), dtype=torch.float16, device="cuda"),
"alpha": 1.5,
"beta": 0.5,
"gamma": 2.5,
"D": torch.empty(size=(6, 512, 512), dtype=torch.float16, device="cuda")
}
# Generate the epilogue functor
epilogue_visitor = cutlass.epilogue.trace(example_fn, example_inputs)
"""
if callable(fn):
class EpilogueFunctor(PythonASTFrontend):
def __init__(self, **kwargs):
super().__init__(**kwargs)
pass
setattr(EpilogueFunctor, "__call__", staticmethod(fn))
epilogue_functor = EpilogueFunctor(**kwargs)
epilogue_functor.trace(example_tensors)
return epilogue_functor
else:
raise NotImplementedError("Expect a callable Python function")
|
cutlass/python/cutlass/epilogue/epilogue.py/0
|
{
"file_path": "cutlass/python/cutlass/epilogue/epilogue.py",
"repo_id": "cutlass",
"token_count": 1965
}
| 48 |
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Utilities for emitting Conv3d kernels
"""
import enum
import logging
import os.path
import shutil
from string import Template
try:
import builtins
if hasattr(builtins, "CUTLASS_IGNORE_PACKAGE") and CUTLASS_IGNORE_PACKAGE == True:
raise ImportError("Disabling attempt to import cutlass_library")
from cutlass_library.library import *
from cutlass_library.conv3x_emitter import EmitConv3xInstance, EmitConv3xIncludes
except ImportError:
from library import *
from conv3x_emitter import EmitConv3xInstance, EmitConv3xIncludes
_LOGGER = logging.getLogger(__name__)
###################################################################################################
#
class Conv3dOperation:
#
def __init__(self, conv_kind, iterator_algorithm, arch, tile_description, A, B, C, element_epilogue, \
stride_support, epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity4):
self.operation_kind = OperationKind.Conv3d
self.arch = arch
self.tile_description = tile_description
self.conv_kind = conv_kind
self.A = A
self.B = B
self.C = C
self.element_epilogue = element_epilogue
self.epilogue_functor = epilogue_functor
self.iterator_algorithm = iterator_algorithm
self.stride_support = stride_support
self.swizzling_functor = swizzling_functor
#
def is_mixed_input(self):
return self.A.element != self.B.element
#
def core_name(self):
''' The basic operation kind is prefixed with a letter indicating the accumulation type. '''
intermediate_type = ''
if self.tile_description.math_instruction.opcode_class == OpcodeClass.TensorOp:
inst_shape = "%d%d%d" % tuple(self.tile_description.math_instruction.instruction_shape)
if self.tile_description.math_instruction.element_a != self.A.element and \
self.tile_description.math_instruction.element_a != self.tile_description.math_instruction.element_accumulator:
intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a]
else:
inst_shape = ''
return "%s%s%s%s3d_%s" % (ShortDataTypeNames[self.tile_description.math_instruction.element_accumulator], \
inst_shape, intermediate_type, ConvKindNames[self.conv_kind], IteratorAlgorithmNames[self.iterator_algorithm])
#
def extended_name(self):
''' Append data types if they differ from compute type. '''
if self.C.element != self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${element_c}_${core_name}_${element_a}"
elif self.C.element == self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${core_name}_${element_a}"
else:
extended_name = "${core_name}"
extended_name = SubstituteTemplate(extended_name, {
'element_a': DataTypeNames[self.A.element],
'element_c': DataTypeNames[self.C.element],
'core_name': self.core_name()
})
return extended_name
#
def configuration_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
opcode_class_name = OpcodeClassNames[self.tile_description.math_instruction.opcode_class]
threadblock = "%dx%d_%dx%d" % (
self.tile_description.threadblock_shape[0],
self.tile_description.threadblock_shape[1],
self.tile_description.threadblock_shape[2],
self.tile_description.stages
)
if self.stride_support == StrideSupport.Unity:
configuration_name = "cutlass_${opcode_class}_${extended_name}_${threadblock}_unity_stride"
else:
configuration_name = "cutlass_${opcode_class}_${extended_name}_${threadblock}"
return SubstituteTemplate(
configuration_name,
{
'opcode_class': opcode_class_name,
'extended_name': self.extended_name(),
'threadblock': threadblock,
}
)
#
def procedural_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
return self.configuration_name()
###################################################################################################
#
# Emits single instances of a CUTLASS device-wide operator
#
###################################################################################################
class EmitConv3dInstance:
def __init__(self):
# Emitter for CUTLASS 3 convolution operations
self.conv3x_emitter = EmitConv3xInstance()
self.template = """
// Conv3d${conv_kind_name} ${iterator_algorithm_name} kernel instance "${operation_name}"
using ${operation_name}_base =
typename cutlass::conv::kernel::DefaultConv3d${conv_kind_name}<
${element_a},
cutlass::layout::TensorNDHWC,
${element_b},
cutlass::layout::TensorNDHWC,
${element_c},
cutlass::layout::TensorNDHWC,
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k} >,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue}
>,
${swizzling_functor}, // cutlass::gemm::threadblock::GemmSplitKIdentityThreadblockSwizzle<>,
${stages},
cutlass::arch::OpMultiplyAdd,
${iterator_algorithm},
${stride_support}
>::Kernel;
"""
def emit(self, operation):
_LOGGER.debug("*** EmitConv3dInstance::emit")
_LOGGER.debug("*** operation: procedural_name()=" + operation.procedural_name())
if hasattr(operation, 'is_3x') and operation.is_3x:
_LOGGER.debug("*** CUTLASS 3 operation")
return self.conv3x_emitter.emit(operation)
_LOGGER.debug("*** CUTLASS 2 operation")
warp_shape = [int(operation.tile_description.threadblock_shape[idx] / operation.tile_description.warp_count[idx]) for idx in range(3)]
epilogue_vector_length = int(min(operation.C.alignment * DataTypeSize[operation.C.element], 128) / DataTypeSize[operation.C.element])
values = {
'operation_name': operation.procedural_name(),
'conv_kind': ConvKindTag[operation.conv_kind],
'conv_kind_name': ConvKindNames[operation.conv_kind].capitalize(),
'element_a': DataTypeTag[operation.A.element],
'layout_a': LayoutTag[operation.A.layout],
'element_b': DataTypeTag[operation.B.element],
'layout_b': LayoutTag[operation.B.layout],
'element_c': DataTypeTag[operation.C.element],
'layout_c': LayoutTag[operation.C.layout],
'element_accumulator': DataTypeTag[operation.tile_description.math_instruction.element_accumulator],
'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class],
'arch': "cutlass::arch::Sm%d" % operation.arch,
'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]),
'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]),
'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]),
'warp_shape_m': str(warp_shape[0]),
'warp_shape_n': str(warp_shape[1]),
'warp_shape_k': str(warp_shape[2]),
'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]),
'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]),
'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]),
'epilogue_vector_length': str(epilogue_vector_length),
'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor],
'element_epilogue': str(DataTypeTag[operation.element_epilogue]),
'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor],
'stages': str(operation.tile_description.stages),
'iterator_algorithm': IteratorAlgorithmTag[operation.iterator_algorithm],
'iterator_algorithm_name': IteratorAlgorithmNames[operation.iterator_algorithm].capitalize(),
'stride_support': StrideSupportTag[operation.stride_support]
}
return SubstituteTemplate(self.template, values)
###################################################################################################
#
# Generator functions for all layouts
#
###################################################################################################
#
def GenerateConv3dTensorOp(manifest, tile_descriptions, min_cc, align = 128):
for tile in tile_descriptions:
for conv_kind in [ConvKind.Fprop, ConvKind.Dgrad, ConvKind.Wgrad]:
if conv_kind == ConvKind.Fprop or (tile.math_instruction.element_accumulator in [DataType.f16, DataType.f32]):
#
output_types = [tile.math_instruction.element_a, tile.math_instruction.element_accumulator] \
if DataTypeSize[tile.math_instruction.element_accumulator] == 32 \
else [tile.math_instruction.element_accumulator,]
for output_type in output_types:
A = TensorDescription(tile.math_instruction.element_a, LayoutType.TensorNDHWC, int(align / DataTypeSize[tile.math_instruction.element_a]))
B = TensorDescription(tile.math_instruction.element_b, LayoutType.TensorNDHWC, int(align / DataTypeSize[tile.math_instruction.element_b]))
C = TensorDescription(output_type, LayoutType.TensorNDHWC, max(1, int(align / DataTypeSize[output_type])))
manifest.append(Conv3dOperation(conv_kind, min_cc, tile, A, B, C, tile.math_instruction.element_accumulator))
class EmitConv3dIncludes:
'''Emit includes that are specific to the operation.'''
def __init__(self):
self.includes = ['conv3d_operation.h']
self.emitter_3x = EmitConv3xIncludes()
def operation_is_3x(self, operation) -> bool:
"""Whether operation is a CUTLASS 3 convolution (as opposed to CUTLASS 2)"""
return hasattr(operation, 'is_3x') and operation.is_3x
def emit(self, operation) -> str:
if self.operation_is_3x(operation):
return self.emitter_3x.emit(operation)
return '\n'.join(f"#include \"{incl}\"" for incl in self.includes) + \
"\n\n///////////////////////////////////////////////////////////////////////////////////////////////////"
###################################################################################################
#
# Emitters functions for all targets
#
###################################################################################################
class EmitConv3dConfigurationLibrary:
def __init__(self, operation_path, configuration_name):
self.configuration_name = configuration_name
self.configuration_path = os.path.join(operation_path, "%s.cu" % configuration_name)
self.instance_emitter = EmitConv3dInstance()
self.includes_emitter = EmitConv3dIncludes()
self.header_template = """
/*
Generated by conv3d_operation.py - Do not edit.
*/
///////////////////////////////////////////////////////////////////////////////////////////////////
#include "cutlass/cutlass.h"
#include "cutlass/library/library.h"
#include "cutlass/library/manifest.h"
#include "library_internal.h"
"""
self.instance_template = """
${stub_begin}
${operation_instance}
// Derived class
struct ${operation_name} :
public ${operation_name}_base { };
${stub_end}
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
self.configuration_header = """
namespace cutlass {
namespace library {
// Initialize all instances
void initialize_${configuration_name}(Manifest &manifest) {
"""
self.configuration_instance = """${stub_begin}
using Operation_${operation_name} = cutlass::conv::device::${kernel_name}<
${operation_name}>;
manifest.append(new cutlass::library::${operation_wrapper}<
Operation_${operation_name}
>(
"${operation_name}"
));
${stub_end}
"""
self.configuration_epilogue = "}\n"
self.epilogue_template = """
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
def operation_is_3x(self, operation):
"""Whether operation is a CUTLASS 3 convolution (as opposed to CUTLASS 2)"""
return hasattr(operation, 'is_3x') and operation.is_3x
def __enter__(self):
"""
Open the configuration_file, and write the "header" C++ code to it.
The "header" consists of a comment (that this is generated code,
so it should not be edited), and includes that are common
to both the CUTLASS 2 and the CUTLASS 3 cases.
"""
_LOGGER.debug('*** EmitConv3dConfigurationLibrary::__enter__')
_LOGGER.debug('*** configuration_path (file to write): ' +
str(self.configuration_path))
_LOGGER.debug('*** configuration_name: ' + self.configuration_name)
self.configuration_file = open(self.configuration_path, "w")
self.configuration_file.write(SubstituteTemplate(self.header_template, {
'configuration_name': self.configuration_name
}))
self.operations = []
return self
def emit(self, operation):
"""
Write three pieces of C++ code to the configuration_file
(that was opened by the __enter__ method above):
1. the header includes that are specific to the operation
(CUTLASS 2 vs. CUTLASS 3);
2. the "operation instance" (a "using" declaration ending in "_base"); and
3. the "operation name" (declaration and definition of a derived class
of the above operation instance).
The "using" declaration turns a C++ class name, possibly namespace-qualified,
possibly also with angle brackets, into a C-style, easily demangled identifier.
"""
_LOGGER.debug('*** EmitConv3dConfigurationLibrary::emit')
_LOGGER.debug('*** operation.procedural_name(): ' + operation.procedural_name())
self.operations.append(operation)
self.configuration_file.write(self.includes_emitter.emit(operation))
stub_begin = ''
stub_end = ''
# It can be useful to stub (comment) out instantiations for testing.
# In this case, one need only set is_stub to True.
is_stub = False
if is_stub:
stub_begin = "// STUB for now\n#if 0"
stub_end = '#endif // 0'
self.configuration_file.write(Template(self.instance_template).substitute({
'configuration_name': self.configuration_name,
'operation_name': operation.procedural_name(),
'operation_instance': self.instance_emitter.emit(operation),
'stub_begin': stub_begin,
'stub_end': stub_end
}))
def __exit__(self, exception_type, exception_value, traceback):
"""
Write the rest of the C++ code to the configuration_file, and close the file.
The "rest of the C++ code" has the following components.
1. Configuration header: Open the namespace(s), and open the definition
of the "initialize_${configuration_name}" registration function
that registers the operation with the Manifest.
("Registration" helps turn C++ compile-time polymorphism
(via template parameters) into a run-time choice of parameters.)
2. Configuration instance: In the body of the registration function,
make a "using" declaration Operation_${operation_name} for the
operation type (which uses operation_name as its template argument).
Then, tell the manifest about the operation via a "manifest.append" call.
The argument of the call is a new instance of
"SomethingOperation<Operation_${operation_name}>"
(replace Something with a specific name).
3. Configuration epilogue: Close the definition of the registration function.
4. Epilogue template: Close the namespace(s).
"""
_LOGGER.debug('*** EmitConv3dConfigurationLibrary::__exit__')
_LOGGER.debug('*** configuration_path (file to write): ' +
str(self.configuration_path))
_LOGGER.debug('*** configuration_name: ' + self.configuration_name)
self.configuration_file.write(SubstituteTemplate(self.configuration_header, {
'configuration_name': self.configuration_name
}))
for operation in self.operations:
stub_begin = ''
stub_end = ''
# It can be useful to stub (comment) out instantiations for testing.
# In this case, one need only set is_stub to True.
is_stub = False
if is_stub:
stub_begin = "// STUB for now\n#if 0"
stub_end = "#endif // 0"
kernel_name = 'ImplicitGemmConvolution'
operation_wrapper = 'Conv3dOperation'
if self.operation_is_3x(operation):
kernel_name = 'ConvUniversalAdapter'
operation_wrapper = 'ConvOperation3x'
self.configuration_file.write(SubstituteTemplate(self.configuration_instance, {
'configuration_name': self.configuration_name,
'operation_name': operation.procedural_name(),
'kernel_name': kernel_name,
'operation_wrapper': operation_wrapper,
'stub_begin': stub_begin,
'stub_end': stub_end
}))
self.configuration_file.write(self.configuration_epilogue)
self.configuration_file.write(self.epilogue_template)
self.configuration_file.close()
###################################################################################################
###################################################################################################
|
cutlass/python/cutlass_library/conv3d_operation.py/0
|
{
"file_path": "cutlass/python/cutlass_library/conv3d_operation.py",
"repo_id": "cutlass",
"token_count": 6540
}
| 49 |
Emitters
========
Common
------
.. automodule:: cutlass.emit.common
:members:
:undoc-members:
:show-inheritance:
PyTorch
-------
.. automodule:: cutlass.emit.pytorch
:members:
:undoc-members:
:show-inheritance:
|
cutlass/python/docs_src/source/cutlass.emit.rst/0
|
{
"file_path": "cutlass/python/docs_src/source/cutlass.emit.rst",
"repo_id": "cutlass",
"token_count": 98
}
| 50 |
################################################################################
#
# Copyright (c) 20123 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
Testbed classes of EVT
"""
import torch
import unittest
import cutlass
from cutlass import Tensor
import cutlass.backend.evt
from cutlass.shape import GemmCoord
from cutlass.utils.datatypes import torch_type
from cutlass.utils.profiler import CUDAEventProfiler
class EVTReferenceModule:
def __init__(self, layout_A, layout_B, layout_C, epilogue_visitor):
self.layout_A = layout_A
self.layout_B = layout_B
self.layout_C = layout_C
self.epilogue_visitor = epilogue_visitor
def run(self, A, B, C, problem_size, alpha, beta, batch=1):
if self.layout_A == cutlass.LayoutType.RowMajor:
A_row = A.view((batch, problem_size.m, problem_size.k))
else:
A_col = A.view((batch, problem_size.k, problem_size.m))
A_row = torch.permute(A_col, (0, 2, 1))
if self.layout_B == cutlass.LayoutType.RowMajor:
B_row = B.view((batch, problem_size.k, problem_size.n))
else:
B_col = B.view((batch, problem_size.n, problem_size.k))
B_row = torch.permute(B_col, (0, 2, 1))
if self.layout_C == cutlass.LayoutType.RowMajor:
C_row = C.view((batch, problem_size.m, problem_size.n))
else:
C_col = C.view((batch, problem_size.n, problem_size.m))
C_row = torch.permute(C_col, (0, 2, 1))
out_row = torch.matmul(A_row, B_row) * alpha + C_row * beta
if self.layout_C == cutlass.LayoutType.ColumnMajor:
out = torch.permute(out_row, (0, 2, 1))
else:
out = out_row
return torch.flatten(out)
def __call__(self, A, B, C, problem_size, batch=1, epilogue_args=None):
# Running the mainloop
accum = self.run(
A, B, C, problem_size, 1.0, 0.0, batch=batch
).reshape(batch, problem_size.m, problem_size.n)
# Running the epilogue
epilogue_args["accum"] = accum
references = self.epilogue_visitor(**epilogue_args)
# Return the results
if not isinstance(references, tuple):
references = (references,)
return references
class EVTTestBed:
"""
Epilogue Visitor Testbed
"""
def __init__(self, element, evt_fn, example_inputs, profile=False, **kwargs) -> None:
self.element = element
layout = cutlass.LayoutType.RowMajor
self.example_inputs = example_inputs
# Create the Gemm plan
self.plan = cutlass.op.Gemm(element=element, layout=layout, element_accumulator=torch.float32)
if "tile_description" in kwargs:
self.plan.tile_description = kwargs["tile_description"]
if "swizzling_functor" in kwargs:
self.plan.swizzling_functor = kwargs["swizzling_functor"]
# Compile the epilogue visitor
epilogue_visitor = cutlass.epilogue.trace(evt_fn, example_inputs)
if "epilogue_stages" in kwargs:
epilogue_visitor.epilogue_stages = kwargs["epilogue_stages"]
self.plan.epilogue_visitor = epilogue_visitor
# Reference model
self.reference_fn = EVTReferenceModule(layout, layout, layout, epilogue_visitor)
self.profile = profile
def get_torch_tensor(self, shape, dtype=None, fill=None):
if dtype is None:
dtype = self.element
dtype = torch_type(dtype)
if fill is None:
return torch.ceil(
torch.empty(size=shape, dtype=dtype, device="cuda").uniform_(-4.5, 3.5)
)
else:
return torch.full(shape, fill, dtype=dtype, device="cuda")
def verify(self, problem_size, input_keys, result_keys, batch_count=1):
"""
Verify the results
"""
problem_size = GemmCoord(*problem_size)
# Initiate the GEMM arguments
tensor_A = self.get_torch_tensor((batch_count, problem_size.m, problem_size.k))
tensor_B = self.get_torch_tensor((batch_count, problem_size.k, problem_size.n))
# Initialize the epilogue args
epilogue_args = {}
for key in self.example_inputs.keys():
if key in input_keys:
tensor = self.example_inputs[key]
if isinstance(tensor, Tensor):
epilogue_args[key] = self.get_torch_tensor(tensor.shape, tensor.element)
else:
epilogue_args[key] = tensor
elif key in result_keys:
tensor = self.example_inputs[key]
if isinstance(tensor, Tensor):
if "max" in key:
fill = -1000
else:
fill = 0
epilogue_args[key] = self.get_torch_tensor(tensor.shape, tensor.element, fill=fill)
else:
epilogue_args[key] = tensor
tensor_D = epilogue_args["D"]
if "C" in epilogue_args:
tensor_C = epilogue_args["C"]
else:
tensor_C = tensor_D
# Run the device kernel
self.plan.run(tensor_A, tensor_B, tensor_C, tensor_D, visitor_args=epilogue_args)
# Run the host reference
evt_args_inputs = {}
for key in input_keys:
evt_args_inputs[key] = epilogue_args[key]
reference_results = self.reference_fn(
tensor_A, tensor_B, tensor_C, problem_size, batch_count, evt_args_inputs)
# Compare the results
for result, ref in zip(result_keys, reference_results):
assert torch.equal(epilogue_args[result].flatten(), ref.flatten())
# Run profile
if self.profile:
profiler = CUDAEventProfiler(
self.plan, 100, 100, tensor_A, tensor_B, tensor_C, tensor_D,
visitor_args = epilogue_args
)
print(f"Cutlass Python Duration: {profiler()}")
class EVTTestCaseBase(unittest.TestCase):
"""
Base class for EVT Unittest
"""
def __init__(self, methodName: str = "runTest", lmnk=(6, 512, 256, 128)) -> None:
super().__init__(methodName)
self.element = cutlass.DataType.f16
self.l, self.m, self.n, self.k = lmnk
self.problem_size = (self.m, self.n, self.k)
torch.random.manual_seed(42)
def fake_tensor(self, element, shape):
return Tensor(element=element, shape=shape, layout_tag=cutlass.LayoutType.RowMajor)
def get_problem_sizes(self, alignment, k=None, batch_count=[3,]):
k = k if k else self.k
problem_size_m = [alignment, 512 - 3 * alignment]
problem_size_n = [alignment, 512 - alignment]
if alignment % 8 == 0:
problem_size_m.append(768)
problem_size_n.append(768)
problem_size_l = batch_count
problem_sizes = []
for m in problem_size_m:
for n in problem_size_n:
for l in problem_size_l:
problem_sizes.append((m, n, k, l))
return problem_sizes
|
cutlass/test/python/cutlass/evt/utils/evt_testbed.py/0
|
{
"file_path": "cutlass/test/python/cutlass/evt/utils/evt_testbed.py",
"repo_id": "cutlass",
"token_count": 4021
}
| 51 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Test the EVT interface
"""
import numpy as np
import unittest
import cutlass
from cutlass import LayoutType, Tensor
from cutlass.backend.utils.device import device_cc
from cutlass.epilogue import reshape, permute
from utils import ExpectException
@unittest.skipIf(device_cc() not in [80, 90], "This unittest is for Sm80 and Sm90 only")
class EVTErrorTests(unittest.TestCase):
"""
Tests various error scenarios that arise with the EVT interface
"""
@unittest.skipIf(device_cc() != 90, "Only Sm90 EVT requires root node be 'D'")
def test_root_not_d(self):
"""
Test when "D" does not exist in Sm90 EVT
"""
def evt_root_not_d(accum, alpha):
F = accum * alpha
return F
example_tensors = {
"accum": self.fake_tensor(np.float16, (6, 512, 512)),
"alpha": 1.2,
"F": self.fake_tensor(np.float16, (6, 512, 512))
}
with ExpectException(device_cc() == 90,
"SyntaxError: Sm90 EVT requires the epilogue to have a returned tensor D, "
"but the variable 'D' is not found in the return values.", True):
cutlass.epilogue.trace(evt_root_not_d, example_tensors)
def test_no_accum(self):
"""
Test when "accum" is not in input arguments
"""
def evt_no_accum(alpha, C):
D = alpha * C
return D
example_tensors = {
"C": self.fake_tensor(np.float16, (6, 512, 512)),
"alpha": 1.2,
"D": self.fake_tensor(np.float16, (6, 512, 512))
}
with ExpectException(True, "SyntaxError: Cannot find 'accum' in the argument list.", True):
cutlass.epilogue.trace(evt_no_accum, example_tensors)
@unittest.skipIf(device_cc() != 90, "Only Sm90 EVT has concern on smem size")
def test_too_much_shared_memory(self):
"""
Test when the epilogue consumes too much shared memory
"""
def evt_too_much_shared_memory(accum, C1, C2, C3, C4, C5, C6, C7, C8):
D1 = accum + C1
D2 = D1 + C2
D3 = D2 + C3
D4 = D3 + C4
D5 = D4 + C5
D6 = D5 + C6
D7 = D6 + C7
D = D7 + C8
return D, D1, D2, D3, D4, D5, D6, D7
example_tensors = {
"accum": self.fake_tensor(np.float16, (6, 512, 512)),
"C1": self.fake_tensor(np.float16, (6, 512, 512)),
"C2": self.fake_tensor(np.float16, (6, 512, 512)),
"C3": self.fake_tensor(np.float16, (6, 512, 512)),
"C4": self.fake_tensor(np.float16, (6, 512, 512)),
"C5": self.fake_tensor(np.float16, (6, 512, 512)),
"C6": self.fake_tensor(np.float16, (6, 512, 512)),
"C7": self.fake_tensor(np.float16, (6, 512, 512)),
"C8": self.fake_tensor(np.float16, (6, 512, 512)),
"D1": self.fake_tensor(np.float16, (6, 512, 512)),
"D2": self.fake_tensor(np.float16, (6, 512, 512)),
"D3": self.fake_tensor(np.float16, (6, 512, 512)),
"D4": self.fake_tensor(np.float16, (6, 512, 512)),
"D5": self.fake_tensor(np.float16, (6, 512, 512)),
"D6": self.fake_tensor(np.float16, (6, 512, 512)),
"D7": self.fake_tensor(np.float16, (6, 512, 512)),
"D": self.fake_tensor(np.float16, (6, 512, 512))
}
epilogue_visitor = cutlass.epilogue.trace(evt_too_much_shared_memory, example_tensors)
plan = cutlass.op.Gemm(
element=np.float16, layout=cutlass.LayoutType.RowMajor,
element_accumulator=np.float32
)
with ExpectException(True,
"RuntimeError: The epilogue consumes too much shared memory. "
"No valid tile description is found in the generator.", True):
plan.epilogue_visitor = epilogue_visitor
def test_not_ssa(self):
"""
Test when the epilogue is not in SSA
"""
def evt_redefine(accum, C, alpha):
F = accum + C
F = F * alpha
D = F
return D, F
example_tensors = {
"accum": self.fake_tensor(np.float16, (6, 512, 512)),
"C": self.fake_tensor(np.float16, (6, 512, 512)),
"alpha": 1.5,
"D": self.fake_tensor(np.float16, (6, 512, 512)),
"F": self.fake_tensor(np.float16, (6, 512, 512))
}
with ExpectException(True, "SyntaxError: Variable 'F' cannot be defined twice.", True):
cutlass.epilogue.trace(evt_redefine, example_tensors)
def evt_undefine(accum, alpha):
F = accum + C
D = F * alpha
return D, F
example_tensors = {
"accum": self.fake_tensor(np.float16, (6, 512, 512)),
"alpha": 1.5,
"D": self.fake_tensor(np.float16, (6, 512, 512)),
"F": self.fake_tensor(np.float16, (6, 512, 512))
}
with ExpectException(True, "SyntaxError: Variable 'C' is undefined.", True):
cutlass.epilogue.trace(evt_undefine, example_tensors)
def test_missing_example_tensor(self):
"""
Test when the example tensor of an input/output variable is not provided
"""
def evt_missing_example_tensor(accum, C):
D = accum + C
return D
example_tensors = {
"accum": self.fake_tensor(np.float16, (6, 512, 512)),
"C": self.fake_tensor(np.float16, (6, 512, 512)),
}
with ExpectException(True, "RuntimeError: Example input for D is not provided.", True):
cutlass.epilogue.trace(evt_missing_example_tensor, example_tensors)
example_tensors = {
"accum": self.fake_tensor(np.float16, (6, 512, 512)),
"D": self.fake_tensor(np.float16, (6, 512, 512)),
}
with ExpectException(True, "RuntimeError: Example input for C is not provided.", True):
cutlass.epilogue.trace(evt_missing_example_tensor, example_tensors)
def test_return_expression(self):
"""
Test when the return value is an expression
"""
def evt_return_expr(accum, C):
return accum + C
example_tensors = {
"accum": self.fake_tensor(np.float16, (6, 512, 512)),
"C": self.fake_tensor(np.float16, (6, 512, 512)),
}
with ExpectException(True, "SyntaxError: Return value cannot be an expression", True):
cutlass.epilogue.trace(evt_return_expr, example_tensors)
def test_incompatible_shape(self):
"""
Test when the shape of example tensors are incompatible
"""
def evt_incompatible_shape(accum, C):
D = accum + C
return D
example_tensors = {
"accum": self.fake_tensor(np.float16, (6, 256, 512)),
"C": self.fake_tensor(np.float16, (6, 512, 512)),
"D": self.fake_tensor(np.float16, (6, 512, 512))
}
with ExpectException(True,
"RuntimeError: Dimension mismatch between accum(6, 256, 512), C(6, 512, 512).", True):
cutlass.epilogue.trace(evt_incompatible_shape, example_tensors)
def test_no_matching_impl(self):
def evt_no_matching_impl(accum, bias):
D = accum + reshape(permute(bias, indices=(1, 0)), new_shape=(512, 1))
return D
example_tensors = {
"accum": self.fake_tensor(np.float16, (6, 512, 256)),
"bias": self.fake_tensor(np.float16, (16, 32)),
"D": self.fake_tensor(np.float16, (6, 512, 256))
}
with ExpectException(True, "NotImplementedError: No matching op for node bias with stride (0, (1, 32), 0).", True):
cutlass.epilogue.trace(evt_no_matching_impl, example_tensors)
#
# Helper functions
#
def fake_tensor(self, element, shape):
return Tensor(element=element, shape=shape, layout_tag=LayoutType.RowMajor)
if __name__ == '__main__':
unittest.main()
|
cutlass/test/python/cutlass/interface/evt_interface.py/0
|
{
"file_path": "cutlass/test/python/cutlass/interface/evt_interface.py",
"repo_id": "cutlass",
"token_count": 4683
}
| 52 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief CUTLASS 3.x Implicit GEMM testbed sizes for ConvNd problem
*/
#pragma once
#include "cutlass/conv/convnd_problem_shape.hpp"
#include <vector>
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test::conv::device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template<int SpatialDim, cutlass::conv::Operator ConvOp>
std::vector<cutlass::conv::ConvProblemShape<ConvOp, SpatialDim>>
inline
get_conv_problem_vector();
/////////////////////////////////////////////////////////////////////////////////////////////////
// Fprop
/////////////////////////////////////////////////////////////////////////////////////////////////
// Specialization for 1D fprop problems
template<>
std::vector<cutlass::conv::ConvProblemShape<cutlass::conv::Operator::kFprop, 1>> inline
get_conv_problem_vector<1, cutlass::conv::Operator::kFprop>() {
using ProblemShape = cutlass::conv::ConvProblemShape<cutlass::conv::Operator::kFprop, 1>;
std::vector<ProblemShape> problem_shapes;
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{1, 8, 64}, // nwc
{64, 1, 64}, // ksc
{0}, // padding lower (pad_w)
{0}, // padding upper (pad_w)
{1}, // stride (stride_w)
{1}, // dilation (dilation_w)
1 // group
});
// non-packed input strides.
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{1, 8, 64}, // nwc
{800, 80, 1}, // stride (nwc)
{64, 1, 64}, // ksc
{64, 64, 1}, // stride (ksc)
{0}, // padding lower (pad_w)
{0}, // padding upper (pad_w)
{1}, // stride (stride_w)
{1}, // dilation (dilation_w)
1 // group
});
// non-packed output strides.
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{1, 8, 64}, // nwc
{512, 64, 1}, // stride (nwc)
{64, 1, 64}, // ksc
{64, 64, 1}, // stride (ksc)
{800, 80, 1}, // stride (nqk)
{0}, // padding lower (pad_w)
{0}, // padding upper (pad_w)
{1}, // stride (stride_w)
{1}, // dilation (dilation_w)
1 // group
});
// Filter-K = 16 for predication
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{1, 8, 64},
{16,1, 64},
{0},
{0},
{1},
{1},
1
});
// N = 2 and K = 128 for a larger grid
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 8, 64},
{96, 1, 64},
{0},
{0},
{1},
{1},
1
});
// N = 7 and K = 256 for a even larger grid
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{7, 8, 64},
{256, 1, 64},
{0},
{0},
{1},
{1},
1
});
// 3 filter, no padding
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 8, 64},
{256, 3, 64},
{0},
{0},
{1},
{1},
1
});
// 3 filter, symmetric padding with c % cta_k !=0
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 8, 32},
{256, 3, 32},
{1},
{1},
{1},
{1},
1
});
// 4 filter, asymmetric padding
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 8, 64},
{256, 4, 64},
{0},
{1},
{1},
{1},
1
});
// 3 filter, asymmetric padding and tstride of 2
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 8, 64},
{256, 3, 64},
{0},
{1},
{2},
{1},
1
});
// 3 filter, asymmetric padding and dilation of 2
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 8, 64},
{256, 3, 64},
{0},
{1},
{1},
{2},
1
});
return problem_shapes;
}
// Specialization for 2D fprop problems
template<>
std::vector<cutlass::conv::ConvProblemShape<cutlass::conv::Operator::kFprop, 2>> inline
get_conv_problem_vector<2, cutlass::conv::Operator::kFprop>() {
using ProblemShape = cutlass::conv::ConvProblemShape<cutlass::conv::Operator::kFprop, 2>;
std::vector<ProblemShape> problem_shapes;
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{1, 8, 8, 64}, // nhwc
{64, 1, 1, 64}, // krsc
{0, 0}, // padding lower (pad_h, pad_w)
{0, 0}, // padding upper (pad_h, pad_w)
{1, 1}, // stride (stride_h, stride_w)
{1, 1}, // dilation (dilation_h, dilation_w)
1 // group
});
// non-packed input strides.
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{1, 8, 8, 64}, // nhwc
{8000, 800, 80, 1}, // stride (nhwc)
{64, 1, 1, 64}, // krsc
{64, 64, 64, 1}, // stride (krsc)
{0, 0}, // padding lower (pad_h, pad_w)
{0, 0}, // padding upper (pad_h, pad_w)
{1, 1}, // stride (stride_h, stride_w)
{1, 1}, // dilation (dilation_h, dilation_w)
1 // group
});
// non-packed output strides.
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{1, 8, 8, 64}, // nhwc
{4096, 512, 64, 1}, // stride (nhwc)
{64, 1, 1, 64}, // krsc
{64, 64, 64, 1}, // stride (krsc)
{8000, 800, 80, 1}, // stride (npqk)
{0, 0}, // padding lower (pad_h, pad_w)
{0, 0}, // padding upper (pad_h, pad_w)
{1, 1}, // stride (stride_h, stride_w)
{1, 1}, // dilation (dilation_h, dilation_w)
1 // group
});
// Filter-K = 16 for predication
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{1, 8, 8, 64},
{16, 1, 1, 64},
{0, 0},
{0, 0},
{1, 1},
{1, 1},
1
});
// N = 2 and K = 128 for a larger grid
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 8, 8, 64},
{96, 1, 1, 64},
{0, 0},
{0, 0},
{1, 1},
{1, 1},
1
});
// N = 7 and K = 256 for a even larger grid
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{7, 8, 8, 64},
{256, 1, 1, 64},
{0, 0},
{0, 0},
{1, 1},
{1, 1},
1
});
// 3x3 filter, no padding
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 8, 8, 64},
{256, 3, 3, 64},
{0, 0},
{0, 0},
{1, 1},
{1, 1},
1
});
// 3x3 filter, symmetric padding with c % cta_k !=0
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 8, 8, 32},
{256, 3, 3, 32},
{1, 1},
{1, 1},
{1, 1},
{1, 1},
1
});
// 2x5 filter, asymmetric padding 1,2/1,2
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 8, 8, 64},
{256, 2, 5, 64},
{1, 1},
{2, 2},
{1, 1},
{1, 1},
1
});
// 2x5 filter, asymmetric padding 1,0/1,0, w/ stride
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 8, 8, 64},
{256, 2, 5, 64},
{1, 1},
{0, 0},
{2, 3},
{1, 1},
1
});
// 2x5 filter, asymmetric padding 1,0/1,0, w/ dilation
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 16, 16, 64},
{256, 2, 5, 64},
{1, 1},
{0, 0},
{1, 1},
{2, 3},
1
});
// 2x5 filter, asymmetric padding 1,0/1,0, w/ stride, w/ dilation
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 16, 16, 64},
{256, 2, 5, 64},
{1, 1},
{0, 0},
{2, 3},
{2, 3},
1
});
return problem_shapes;
}
// Specialization for 3D fprop problems
template<>
std::vector<cutlass::conv::ConvProblemShape<cutlass::conv::Operator::kFprop, 3>> inline
get_conv_problem_vector<3, cutlass::conv::Operator::kFprop>() {
using ProblemShape = cutlass::conv::ConvProblemShape<cutlass::conv::Operator::kFprop, 3>;
std::vector<ProblemShape> problem_shapes;
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{1, 1, 8, 8, 64}, // ndhwc
{64, 1, 1, 1, 64}, // ktrsc
{0, 0, 0}, // padding lower (pad_d, pad_h, pad_w)
{0, 0, 0}, // padding upper (pad_d, pad_h, pad_w)
{1, 1, 1}, // stride (stride_d, stride_h, stride_w)
{1, 1, 1}, // dilation (dilation_d, dilation_h, dilation_w)
1 // group
});
// non-packed input output strides.
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{1, 1, 8, 8, 64}, // ndhwc
{8000, 8000, 800, 80, 1}, // stride (ndhwc)
{64, 1, 1, 1, 64}, // ktrsc
{64, 64, 64, 64, 1}, // stride (ktrsc)
{8000, 8000, 800, 80, 1}, // stride (nzpqk)
{0, 0, 0}, // padding lower (pad_d, pad_h, pad_w)
{0, 0, 0}, // padding upper (pad_d, pad_h, pad_w)
{1, 1, 1}, // stride (stride_d, stride_h, stride_w)
{1, 1, 1}, // dilation (dilation_d, dilation_h, dilation_w)
1 // group
});
// Filter-K = 16 for predication
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{1, 1, 8, 8, 64},
{16, 1, 1, 1, 64},
{0, 0, 0},
{0, 0, 0},
{1, 1, 1},
{1, 1, 1},
1
});
// N = 7 and K = 256 for a larger grid
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 1, 8, 8, 64},
{96, 1, 1, 1, 64},
{0, 0, 0},
{0, 0, 0},
{1, 1, 1},
{1, 1, 1},
1
});
// Filter 3x3x3 + no padding
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 3, 5, 8, 64},
{96, 3, 3, 3, 64},
{0, 0, 0},
{0, 0, 0},
{1, 1, 1},
{1, 1, 1},
1
});
// Filter 3x3x3 + symmetric padding with c % cta_k !=0
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 3, 5, 8, 32},
{96, 3, 3, 3, 32},
{1, 1, 1},
{1, 1, 1},
{1, 1, 1},
{1, 1, 1},
1
});
// Filter 3x4x5 + symmetric padding 111
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 3, 5, 8, 64},
{96, 3, 4, 5, 64},
{1, 1, 1},
{1, 1, 1},
{1, 1, 1},
{1, 1, 1},
1
});
// Filter 3x4x5 + asymmetric padding 102/010
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 3, 5, 8, 64},
{96, 3, 4, 5, 64},
{1, 0, 1},
{0, 2, 0},
{1, 1, 1},
{1, 1, 1},
1
});
// Filter 3x4x5 + asymmetric padding 102/010, w/ stride
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 16, 10, 16, 64},
{96, 3, 4, 5, 64},
{1, 0, 1},
{0, 2, 0},
{2, 2, 3},
{1, 1, 1},
1
});
// Filter 3x4x5 + asymmetric padding 102/010, w/ dilation
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 16, 10, 16, 64},
{96, 3, 4, 5, 64},
{1, 0, 1},
{0, 2, 0},
{1, 1, 1},
{2, 2, 3},
1
});
// Filter 3x4x5 + asymmetric padding 102/010, w/ stride, w/ dilation
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 16, 10, 16, 64},
{96, 3, 4, 5, 64},
{1, 0, 1},
{0, 2, 0},
{2, 2, 3},
{2, 2, 3},
1
});
return problem_shapes;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
// Wgrad
/////////////////////////////////////////////////////////////////////////////////////////////////
// Specialization for 1D wgrad problems
template<>
std::vector<cutlass::conv::ConvProblemShape<cutlass::conv::Operator::kWgrad, 1>> inline
get_conv_problem_vector<1, cutlass::conv::Operator::kWgrad>() {
using ProblemShape = cutlass::conv::ConvProblemShape<cutlass::conv::Operator::kWgrad, 1>;
std::vector<ProblemShape> problem_shapes;
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{1, 8, 64}, // nwc
{64, 1, 64}, // ksc
{0}, // padding lower (pad_w)
{0}, // padding upper (pad_w)
{1}, // stride (stride_w)
{1}, // dilation (dilation_w)
1 // group
});
// Filter-K = 16 for predication
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{1, 8, 64},
{16,1, 64},
{0},
{0},
{1},
{1},
1
});
// N = 2 and K = 128 for a larger grid
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 8, 64},
{96, 1, 64},
{0},
{0},
{1},
{1},
1
});
// N = 7 and K = 256 for a even larger grid
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{7, 8, 64},
{256, 1, 64},
{0},
{0},
{1},
{1},
1
});
// 3 filter, no padding
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 8, 32},
{256, 3, 32},
{0},
{0},
{1},
{1},
1
});
// 3 filter, symmetric padding
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 8, 32},
{256, 3, 32},
{1},
{1},
{1},
{1},
1
});
// 4 filter, asymmetric padding
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 8, 32},
{256, 4, 32},
{0},
{1},
{1},
{1},
1
});
// 3 filter, asymmetric padding and tstride of 2
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 8, 32},
{256, 3, 32},
{0},
{1},
{2},
{1},
1
});
// 3 filter, asymmetric padding and dilation of 2
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 8, 32},
{256, 3, 32},
{0},
{1},
{1},
{2},
1
});
return problem_shapes;
}
// Specialization for 2D wgrad problems
template<>
std::vector<cutlass::conv::ConvProblemShape<cutlass::conv::Operator::kWgrad, 2>> inline
get_conv_problem_vector<2, cutlass::conv::Operator::kWgrad>() {
using ProblemShape = cutlass::conv::ConvProblemShape<cutlass::conv::Operator::kWgrad, 2>;
std::vector<ProblemShape> problem_shapes;
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{1, 8, 8, 64}, // nhwc
{64, 1, 1, 64}, // krsc
{0, 0}, // padding lower (pad_h, pad_w)
{0, 0}, // padding upper (pad_h, pad_w)
{1, 1}, // stride (stride_h, stride_w)
{1, 1}, // dilation (dilation_h, dilation_w)
1 // group
});
// Filter-K = 16 for predication
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{1, 8, 8, 64},
{16, 1, 1, 64},
{0, 0},
{0, 0},
{1, 1},
{1, 1},
1
});
// N = 2 and K = 128 for a larger grid
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 8, 8, 64},
{96, 1, 1, 64},
{0, 0},
{0, 0},
{1, 1},
{1, 1},
1
});
// N = 7 and K = 256 for a even larger grid
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{7, 8, 8, 64},
{256, 1, 1, 64},
{0, 0},
{0, 0},
{1, 1},
{1, 1},
1
});
// 3x3 filter, no padding
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 8, 8, 32},
{256, 3, 3, 32},
{0, 0},
{0, 0},
{1, 1},
{1, 1},
1
});
// 3x3 filter, symmetric padding
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 8, 8, 32},
{256, 3, 3, 32},
{1, 1},
{1, 1},
{1, 1},
{1, 1},
1
});
// 2x5 filter, asymmetric padding 1,0/1,0
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 8, 8, 32},
{256, 2, 5, 32},
{1, 1},
{0, 0},
{1, 1},
{1, 1},
1
});
// 2x5 filter, asymmetric padding 1,0/1,0, w/ stride
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 16, 16, 32},
{256, 2, 5, 32},
{1, 1},
{0, 0},
{2, 3},
{1, 1},
1
});
// 2x5 filter, asymmetric padding 1,0/1,0, w/ dilation
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 16, 16, 32},
{256, 2, 5, 32},
{1, 1},
{0, 0},
{1, 1},
{2, 3},
1
});
// 2x5 filter, asymmetric padding 1,0/1,0, w/ stride, w/ dilation
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 16, 16, 32},
{256, 2, 5, 32},
{1, 1},
{0, 0},
{2, 3},
{2, 3},
1
});
return problem_shapes;
}
// Specialization for 3D wgrad problems
template<>
std::vector<cutlass::conv::ConvProblemShape<cutlass::conv::Operator::kWgrad, 3>> inline
get_conv_problem_vector<3, cutlass::conv::Operator::kWgrad>() {
using ProblemShape = cutlass::conv::ConvProblemShape<cutlass::conv::Operator::kWgrad, 3>;
std::vector<ProblemShape> problem_shapes;
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 1, 8, 8, 64}, // ndhwc
{64, 1, 1, 1, 64}, // ktrsc
{0, 0, 0}, // padding lower (pad_d, pad_h, pad_w)
{0, 0, 0}, // padding upper (pad_d, pad_h, pad_w)
{1, 1, 1}, // stride (stride_d, stride_h, stride_w)
{1, 1, 1}, // dilation (dilation_d, dilation_h, dilation_w)
1 // group
});
// Filter 3x3x3 + no padding
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 3, 5, 8, 32},
{96, 3, 3, 3, 32},
{0, 0, 0},
{0, 0, 0},
{1, 1, 1},
{1, 1, 1},
1
});
// Filter 3x4x5 + asymmetric padding 102/010
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 3, 5, 8, 32},
{96, 3, 4, 5, 32},
{1, 0, 1},
{0, 2, 0},
{1, 1, 1},
{1, 1, 1},
1
});
// Filter 3x4x5 + asymmetric padding 102/010, w/ stride
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 16, 10, 16, 32},
{96, 3, 4, 5, 32},
{1, 0, 1},
{0, 2, 0},
{2, 2, 3},
{1, 1, 1},
1
});
// Filter 3x4x5 + asymmetric padding 102/010, w/ dilation
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 16, 10, 16, 32},
{96, 3, 4, 5, 32},
{1, 0, 1},
{0, 2, 0},
{1, 1, 1},
{2, 2, 3},
1
});
return problem_shapes;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
// Dgrad
/////////////////////////////////////////////////////////////////////////////////////////////////
// Specialization for 1D dgrad problems
template<>
std::vector<cutlass::conv::ConvProblemShape<cutlass::conv::Operator::kDgrad, 1>> inline
get_conv_problem_vector<1, cutlass::conv::Operator::kDgrad>() {
using ProblemShape = cutlass::conv::ConvProblemShape<cutlass::conv::Operator::kDgrad, 1>;
std::vector<ProblemShape> problem_shapes;
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{1, 8, 64}, // nqk
{64, 1, 64}, // ksc
{0}, // padding lower (pad_w)
{0}, // padding upper (pad_w)
{1}, // stride (stride_w)
{1}, // dilation (dilation_w)
1 // group
});
// non-packed input strides.
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{1, 8, 64}, // nqk
{800, 80, 1}, // stride (nqk)
{64, 1, 64}, // ksc
{64, 64, 1}, // stride (ksc)
{0}, // padding lower (pad_w)
{0}, // padding upper (pad_w)
{1}, // stride (stride_w)
{1}, // dilation (dilation_w)
1 // group
});
// non-packed output strides.
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{1, 8, 64}, // nqk
{512, 64, 1}, // stride (nqk)
{64, 1, 64}, // ksc
{64, 64, 1}, // stride (ksc)
{800, 80, 1}, // stride (nwc)
{0}, // padding lower (pad_w)
{0}, // padding upper (pad_w)
{1}, // stride (stride_w)
{1}, // dilation (dilation_w)
1 // group
});
// Filter-K = 16 for predication
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{1, 8, 16},
{64,1, 16},
{0},
{0},
{1},
{1},
1
});
// N = 2 and K = 128 for a larger grid
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 8, 96},
{64, 1, 96},
{0},
{0},
{1},
{1},
1
});
// N = 7 and K = 256 for a even larger grid
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{7, 8, 256},
{64, 1, 256},
{0},
{0},
{1},
{1},
1
});
// 3 filter, no padding
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 8, 256},
{64, 3, 256},
{0},
{0},
{1},
{1},
1
});
// 3 filter, symmetric padding with k % cta_k !=0
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 8, 256},
{32, 3, 256},
{1},
{1},
{1},
{1},
1
});
// 4 filter, asymmetric padding
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 8, 256},
{64, 4, 256},
{0},
{1},
{1},
{1},
1
});
// 3 filter, asymmetric padding and dilation of 2
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 16, 64},
{256, 3, 64},
{0},
{1},
{1},
{2},
1
});
return problem_shapes;
}
// Specialization for 2D dgrad problems
template<>
std::vector<cutlass::conv::ConvProblemShape<cutlass::conv::Operator::kDgrad, 2>> inline
get_conv_problem_vector<2, cutlass::conv::Operator::kDgrad>() {
using ProblemShape = cutlass::conv::ConvProblemShape<cutlass::conv::Operator::kDgrad, 2>;
std::vector<ProblemShape> problem_shapes;
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{1, 8, 8, 64}, // npqk
{64, 1, 1, 64}, // krsc
{0, 0}, // padding lower (pad_h, pad_w)
{0, 0}, // padding upper (pad_h, pad_w)
{1, 1}, // stride (stride_h, stride_w)
{1, 1}, // dilation (dilation_h, dilation_w)
1 // group
});
// non-packed input strides.
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{1, 8, 8, 64}, // npqk
{8000, 800, 80, 1}, // stride (npqk)
{64, 1, 1, 64}, // krsc
{64, 64, 64, 1}, // stride (krsc)
{0, 0}, // padding lower (pad_h, pad_w)
{0, 0}, // padding upper (pad_h, pad_w)
{1, 1}, // stride (stride_h, stride_w)
{1, 1}, // dilation (dilation_h, dilation_w)
1 // group
});
// non-packed output strides.
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{1, 8, 8, 64}, // npqk
{4096, 512, 64, 1}, // stride (npqk)
{64, 1, 1, 64}, // krsc
{64, 64, 64, 1}, // stride (krsc)
{8000, 800, 80, 1}, // stride (nhwc)
{0, 0}, // padding lower (pad_h, pad_w)
{0, 0}, // padding upper (pad_h, pad_w)
{1, 1}, // stride (stride_h, stride_w)
{1, 1}, // dilation (dilation_h, dilation_w)
1 // group
});
// Filter-K = 16 for predication
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{1, 8, 8, 16},
{64, 1, 1, 16},
{0, 0},
{0, 0},
{1, 1},
{1, 1},
1
});
// N = 2 and K = 128 for a larger grid
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 8, 8, 96},
{64, 1, 1, 96},
{0, 0},
{0, 0},
{1, 1},
{1, 1},
1
});
// N = 7 and K = 256 for a even larger grid
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{7, 8, 8, 256},
{64, 1, 1, 256},
{0, 0},
{0, 0},
{1, 1},
{1, 1},
1
});
// 3x3 filter, no padding
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 8, 8, 256},
{64, 3, 3, 256},
{0, 0},
{0, 0},
{1, 1},
{1, 1},
1
});
// 3x3 filter, symmetric padding with k % cta_k !=0
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 8, 8, 256},
{32, 3, 3, 256},
{1, 1},
{1, 1},
{1, 1},
{1, 1},
1
});
// 2x5 filter, asymmetric padding 1,0/1,0
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 8, 8, 256},
{64, 2, 5, 256},
{1, 1},
{0, 0},
{1, 1},
{1, 1},
1
});
// 2x5 filter, asymmetric padding 1,0/1,0, w/ dilation
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 16, 16, 64},
{256, 2, 5, 64},
{1, 1},
{0, 0},
{1, 1},
{2, 3},
1
});
return problem_shapes;
}
// Specialization for 3D dgrad problems
template<>
std::vector<cutlass::conv::ConvProblemShape<cutlass::conv::Operator::kDgrad, 3>> inline
get_conv_problem_vector<3, cutlass::conv::Operator::kDgrad>() {
using ProblemShape = cutlass::conv::ConvProblemShape<cutlass::conv::Operator::kDgrad, 3>;
std::vector<ProblemShape> problem_shapes;
// Filter-K = 16 for predication
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{1, 1, 8, 8, 16},
{64, 1, 1, 1, 16},
{0, 0, 0},
{0, 0, 0},
{1, 1, 1},
{1, 1, 1},
1
});
// non-packed input output strides.
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{1, 1, 8, 8, 64}, // nzpqk
{8000, 8000, 800, 80, 1}, // stride (nzpqk)
{64, 1, 1, 1, 64}, // ktrsc
{64, 64, 64, 64, 1}, // stride (ktrsc)
{8000, 8000, 800, 80, 1}, // stride (ndhwc)
{0, 0, 0}, // padding lower (pad_d, pad_h, pad_w)
{0, 0, 0}, // padding upper (pad_d, pad_h, pad_w)
{1, 1, 1}, // stride (stride_d, stride_h, stride_w)
{1, 1, 1}, // dilation (dilation_d, dilation_h, dilation_w)
1 // group
});
// N = 7 and K = 256 for a larger grid
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 1, 8, 8, 96},
{64, 1, 1, 1, 96},
{0, 0, 0},
{0, 0, 0},
{1, 1, 1},
{1, 1, 1},
1
});
// Filter 3x4x5 + symmetric padding 111
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 3, 5, 8, 96},
{64, 3, 4, 5, 96},
{1, 1, 1},
{1, 1, 1},
{1, 1, 1},
{1, 1, 1},
1
});
// Filter 3x4x5 + asymmetric padding 102/010
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 3, 5, 8, 96},
{64, 3, 4, 5, 96},
{1, 0, 1},
{0, 2, 0},
{1, 1, 1},
{1, 1, 1},
1
});
// Filter 3x4x5 + asymmetric padding 102/010, w/ dilation
problem_shapes.push_back({
cutlass::conv::Mode::kCrossCorrelation,
{2, 16, 10, 16, 64},
{64, 3, 4, 5, 96},
{1, 0, 1},
{0, 2, 0},
{1, 1, 1},
{2, 2, 3},
1
});
return problem_shapes;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::test
|
cutlass/test/unit/conv/device_3x/conv_problem_sizes.hpp/0
|
{
"file_path": "cutlass/test/unit/conv/device_3x/conv_problem_sizes.hpp",
"repo_id": "cutlass",
"token_count": 13923
}
| 53 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief CUTLASS host-device template for complex numbers supporting all CUTLASS numeric types.
*/
// Standard Library's std::complex<T> used for reference checking
#include <complex>
#include "../common/cutlass_unit_test.h"
#include "cutlass/complex.h"
#include "cutlass/constants.h"
#include "cutlass/numeric_conversion.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(complex, f64_to_f32_conversion) {
cutlass::complex<double> source = {1.5, -1.25};
cutlass::complex<float> dest = cutlass::complex<float>(source); // explicit conversion
EXPECT_TRUE(source.real() == 1.5 && source.imag() == -1.25 &&
dest.real() == 1.5f && dest.imag() == -1.25f);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(complex, f32_to_f64_conversion) {
cutlass::complex<float> source = {-1.5f, 1.25f};
cutlass::complex<double> dest = source; // implicit conversion
EXPECT_TRUE(source.real() == -1.5f && source.imag() == 1.25f &&
dest.real() == -1.5 && dest.imag() == 1.25);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(complex, s32_to_f64_conversion) {
cutlass::complex<int> source = {-2, 1};
cutlass::complex<double> dest = source; // implicit conversion
EXPECT_TRUE(source.real() == -2 && source.imag() == 1 &&
dest.real() == -2 && dest.imag() == 1);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(complex, f16_to_f32_conversion) {
cutlass::complex<cutlass::half_t> source = {1.5_hf, -1.25_hf};
cutlass::complex<float> dest = cutlass::complex<float>(source); // explicit conversion
EXPECT_TRUE(source.real() == 1.5_hf && source.imag() == -1.25_hf &&
dest.real() == 1.5f && dest.imag() == -1.25f);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(complex, exp_f32) {
cutlass::complex<float> Z[] = {
{1, 1},
{2 , cutlass::constants::pi<float>()/2.0f },
{0.5f, cutlass::constants::pi<float>() },
{0.25f, cutlass::constants::pi<float>()*3/4.0f },
{0, 0},
};
cutlass::complex<double> Expected[] = {
{1.4686939399158851, 2.2873552871788423},
{4.524491950137825e-16, 7.38905609893065},
{-1.6487212707001282, 2.019101226849069e-16},
{-0.9079430793557842, 0.9079430793557843},
{1, 0}
};
double tolerance = 0.00001;
for (int i = 0; cutlass::real(Z[i]) != 0.0f; ++i) {
double e_r = cutlass::real(Expected[i]);
double e_i = cutlass::real(Expected[i]);
cutlass::complex<float> got = cutlass::exp(Z[i]);
float g_r = cutlass::real(got);
float g_i = cutlass::real(got);
EXPECT_TRUE(
std::abs(g_r - e_r) < tolerance && std::abs(g_i - e_i) < tolerance
) << "Expected(" << Expected[i] << "), Got(" << got << ")";
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
/// Thorough testing for basic complex math operators. Uses std::complex as a reference.
template <typename T, int N, int M>
struct ComplexOperators {
ComplexOperators() {
for (int ar = -N; ar <= N; ++ar) {
for (int ai = -N; ai <= N; ++ai) {
for (int br = -N; br <= N; ++br) {
for (int bi = -N; bi <= N; ++bi) {
cutlass::complex<T> Ae(T(ar) / T(M), T(ai) / T(M));
cutlass::complex<T> Be(T(br) / T(M), T(bi) / T(M));
std::complex<T> Ar(T(ar) / T(M), T(ai) / T(M));
std::complex<T> Br(T(br) / T(M), T(bi) / T(M));
cutlass::complex<T> add_e = Ae + Be;
cutlass::complex<T> sub_e = Ae - Be;
cutlass::complex<T> mul_e = Ae * Be;
std::complex<T> add_r = (Ar + Br);
std::complex<T> sub_r = (Ar - Br);
std::complex<T> mul_r = (Ar * Br);
EXPECT_EQ(real(add_e), real(add_r));
EXPECT_EQ(imag(add_e), imag(add_r));
EXPECT_EQ(real(sub_e), real(sub_r));
EXPECT_EQ(imag(sub_e), imag(sub_r));
EXPECT_EQ(real(mul_e), real(mul_r));
EXPECT_EQ(imag(mul_e), imag(mul_r));
if (!(br == 0 && bi == 0)) {
cutlass::complex<T> div_e = Ae / Be;
std::complex<T> div_r = Ar / Br;
T const kRange = T(0.001);
EXPECT_NEAR(real(div_e), real(div_r), kRange);
EXPECT_NEAR(imag(div_e), imag(div_r), kRange);
}
}
}
}
}
}
};
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(complex, host_float) {
test::ComplexOperators<float, 32, 8> test;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(complex, host_double) {
test::ComplexOperators<double, 32, 8> test;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
|
cutlass/test/unit/core/complex.cu/0
|
{
"file_path": "cutlass/test/unit/core/complex.cu",
"repo_id": "cutlass",
"token_count": 2565
}
| 54 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cutlass_unit_test.h"
#include <iostream>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <cute/tensor.hpp>
#include <cute/arch/copy_sm90.hpp>
using namespace cute;
template<class T>
__global__ void
stsm_test_device(uint16_t* g_in, uint16_t* g_out)
{
constexpr int count = sizeof(T) / 4;
int tid = threadIdx.x;
int stride = blockDim.x;
// load input gmem -> rmem
uint32_t reg[count];
for (int i = 0; i < (sizeof(T) / 4); i++) {
reg[i] = reinterpret_cast<uint32_t*>(g_in)[tid + (stride * i)];
}
__shared__ uint32_t smem[32 * count];
// load rmem -> smem using STSM
uint128_t* smem_ptr = reinterpret_cast<uint128_t*>(smem) + tid;
T* rmem_ptr = reinterpret_cast<T*>(reg);
cute::copy_stsm(rmem_ptr, smem_ptr);
__syncthreads();
// store output smem -> gmem
for (int i = 0; i < (sizeof(T) / 4); i++) {
reinterpret_cast<uint32_t*>(g_out)[tid + (stride * i)] = smem[tid + (stride * i)];
}
}
template <class TiledCopy, class SmemLayout>
__global__ void
stsm_test_device_cute(uint16_t* g_in, uint16_t* g_out,
TiledCopy tiled_copy, SmemLayout smem_layout)
{
using namespace cute;
__shared__ uint16_t smem[size(smem_layout)];
Tensor t_g_in = make_tensor(make_gmem_ptr(g_in), smem_layout);
Tensor t_g_out = make_tensor(make_gmem_ptr(g_out), smem_layout);
Tensor t_smem = make_tensor(make_smem_ptr(smem), smem_layout);
int tid = threadIdx.x;
auto thr_copy = tiled_copy.get_thread_slice(tid);
Tensor tXgX = thr_copy.partition_S(t_g_in); // (V,M,N)
Tensor tXsX = thr_copy.partition_D(t_smem); // (V,M,N)
Tensor tXrX = make_tensor<uint16_t>(shape(tXgX)); // (V,M,N)
clear(tXrX); // Just to make sure
/*
if (thread0()) {
print("tXsX: " ); print(tXsX.layout()); print("\n");
print("tXgX: " ); print(tXgX.layout()); print("\n");
print("tXrX: " ); print(tXrX.layout()); print("\n");
}
*/
// Load input gmem -> rmem
copy(tXgX, tXrX);
// Copy rmem -> smem via tiled_copy (STSM, STS)
copy(tiled_copy, tXrX, tXsX);
// Output smem -> gmem
for (int i = tid; i < size(t_smem); i += size(tiled_copy)) {
t_g_out(i) = t_smem(i);
}
}
#if CUDA_12_0_SM90_FEATURES_SUPPORTED
TEST(SM90_CuTe_Hopper, Stsm)
{
constexpr int count = 1024;
thrust::host_vector<uint16_t> h_in(count);
for (int i = 0; i < count; ++i) {
h_in[i] = uint16_t(i);
}
thrust::device_vector<uint16_t> d_in = h_in;
//
// STSM 1x (32b)
//
{
thrust::device_vector<uint16_t> d_out(count);
stsm_test_device<uint32_t><<<1, 32>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()));
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < 32; ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("STSM 1x stsm_test_device SUCCESS\n");
}
//
// STSM 2x (64b)
//
{
thrust::device_vector<uint16_t> d_out(count);
stsm_test_device<uint64_t><<<1, 32>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()));
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < 64; ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("STSM 2x stsm_test_device SUCCESS\n");
}
//
// STSM 4x (128b)
//
{
thrust::device_vector<uint16_t> d_out(count);
stsm_test_device<uint128_t><<<1, 32>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()));
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < 128; ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("STSM 4x stsm_test_device SUCCESS\n");
}
//
// CuTe STSM
//
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,Shape <_2, _4>>,
Stride< _2,Stride<_1,_64>>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<SM90_U32x1_STSM_N, uint16_t>{},
Layout<Shape<_32,_1>>{},
Layout<Shape< _1,_8>>{});
stsm_test_device_cute<<<1, int(size(tiled_copy))>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x8 interleaved U32x1_STSM_N SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,Shape <_2, _4>>,
Stride< _2,Stride<_1,_64>>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<SM90_U32x2_STSM_N, uint16_t>{},
Layout<Shape<_32,_1>>{},
Layout<Shape< _1,_8>>{});
stsm_test_device_cute<<<1, int(size(tiled_copy))>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x8 interleaved U32x2_STSM_N SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,Shape <_2, _4>>,
Stride< _2,Stride<_1,_64>>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<SM90_U32x4_STSM_N, uint16_t>{},
Layout<Shape<_32,_1>>{},
Layout<Shape< _1,_8>>{});
stsm_test_device_cute<<<1, int(size(tiled_copy))>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x8 interleaved U32x4_STSM_N SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,Shape <_2, _4>>,
Stride< _2,Stride<_1,_64>>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<UniversalCopy<uint16_t>, uint16_t>{},
Layout<Shape<_32,_1>>{},
Layout<Shape< _1,_8>>{});
stsm_test_device_cute<<<1, int(size(tiled_copy))>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x8 interleaved STSM.U16 SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,_32>,
Stride< _1,_32>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<SM90_U32x1_STSM_N, uint16_t>{},
Layout<Shape<_16,_2>>{},
Layout<Shape< _2,_4>>{});
stsm_test_device_cute<<<1, int(size(tiled_copy))>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x32 U32x1_STSM_N SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,_32>,
Stride< _1,_32>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<SM90_U32x2_STSM_N, uint16_t>{},
Layout<Shape<_16,_2>>{},
Layout<Shape< _2,_4>>{});
stsm_test_device_cute<<<1, int(size(tiled_copy))>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x32 U32x2_STSM_N SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,_32>,
Stride< _1,_32>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<SM90_U32x4_STSM_N, uint16_t>{},
Layout<Shape<_16,_2>>{},
Layout<Shape< _2,_4>>{});
stsm_test_device_cute<<<1, int(size(tiled_copy))>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x32 U32x4_STSM_N SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,_32>,
Stride< _1,_32>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<UniversalCopy<uint16_t>, uint16_t>{},
Layout<Shape<_16,_2>>{},
Layout<Shape< _2,_4>>{});
stsm_test_device_cute<<<1, int(size(tiled_copy))>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x32 STSM.U16 SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,_32>,
Stride<_32, _1>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<SM90_U16x2_STSM_T, uint16_t>{},
Layout<Shape<_4,_8>>{},
Layout<Shape<_2,_1>>{});
stsm_test_device_cute<<<1, int(size(tiled_copy))>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x32 U16x2_STSM_T SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,_32>,
Stride<_32, _1>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<SM90_U16x4_STSM_T, uint16_t>{},
Layout<Shape<_4,_8>>{},
Layout<Shape<_4,_1>>{});
stsm_test_device_cute<<<1, int(size(tiled_copy))>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x32 U16x4_STSM_T SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,_32>,
Stride<_32, _1>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<SM90_U16x8_STSM_T, uint16_t>{},
Layout<Shape<_4,_8>>{},
Layout<Shape<_8,_1>>{});
stsm_test_device_cute<<<1, int(size(tiled_copy))>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x32 U16x8_STSM_T SUCCESS\n");
}
CUTLASS_TRACE_HOST("PASS");
}
#endif
|
cutlass/test/unit/cute/hopper/stsm.cu/0
|
{
"file_path": "cutlass/test/unit/cute/hopper/stsm.cu",
"repo_id": "cutlass",
"token_count": 7105
}
| 55 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cutlass_unit_test.h"
#include <iostream>
#include <iomanip>
#include <utility>
#include <type_traits>
#include <vector>
#include <numeric>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <cute/tensor.hpp>
using namespace cute;
template <class GmemTensor, class RmemTiler, class CopyPolicy>
__global__
void
kernel(GmemTensor gC, RmemTiler tiler, CopyPolicy policy)
{
Tensor tCgC = local_tile(gC, tiler, 0);
Tensor rC = make_tensor_like(tCgC);
using T = typename GmemTensor::value_type;
for (int i = 0; i < size(rC); ++i) {
rC(i) = T(i % 13);
}
#if 0
print(" gC : "); print( gC); print("\n");
print("tCgC : "); print(tCgC); print("\n");
print(" rC : "); print( rC); print("\n");
#endif
// NOTE: only 1 thread, this thread produce a block of 8x8 output. The fringe will not be touched.
//copy(rC, tCgC); // Enable auto-vectorization if static
//copy_vec<T>(rC, tCgC); // Disable auto-vectorization always
copy(policy, rC, tCgC); // Use a policy to establish vectorization assumptions
}
template <class T, class CopyPolicy, class GmemLayout, class RmemTiler>
void
test_copy_vectorization(CopyPolicy policy, GmemLayout gmem_layout, RmemTiler rmem_tiler)
{
thrust::host_vector<T> h_in(cosize(gmem_layout), T(0));
thrust::device_vector<T> d_in = h_in;
Tensor m_in = make_tensor(make_gmem_ptr(raw_pointer_cast(d_in.data())), gmem_layout);
kernel<<<1,1>>>(m_in, rmem_tiler, policy);
thrust::host_vector<T> h_out = d_in;
Tensor result = make_tensor(h_out.data(), gmem_layout);
thrust::host_vector<T> h_true = h_in;
Tensor ref = make_tensor(h_true.data(), gmem_layout);
// Set the values directly in the reference tensor, no copy
Tensor ref_tile = local_tile(ref, rmem_tiler, 0);
for (int i = 0; i < size(ref_tile); ++i) {
ref_tile(i) = T(i % 13);
}
// Compare the reference and the result. Print only the first 3 errors.
// print_tensor(result);
int count = 3;
for (int i = 0; i < size(ref) && count > 0; ++i) {
EXPECT_EQ(result(i), ref(i));
if (result(i) != ref(i)) {
--count;
}
}
}
template <class T, class GmemLayout, class RmemTiler>
void
test_copy_vectorization(GmemLayout gmem_layout, RmemTiler rmem_tiler)
{
test_copy_vectorization<T>(DefaultCopy{}, gmem_layout, rmem_tiler);
}
TEST(SM70_CuTe_Volta, SimpleVec)
{
// Fully static layouts are assumed to be aligned -- these will be vectorized
test_copy_vectorization<float>(make_layout(make_shape(Int<8>{}, Int<8>{})), Shape<_8,_8>{});
test_copy_vectorization<float>(make_layout(make_shape(Int<12>{}, Int<12>{})), Shape<_8,_8>{});
// Fails in vectorization recast due to misalignment and static assertions
//test_copy_vectorization<float>(make_layout(make_shape(Int<9>{}, Int<9>{})), Shape<_8,_8>{});
// Dynamic layouts are not assumed to be aligned -- these will not be vectorized
test_copy_vectorization<float>(make_layout(make_shape(12,12)), Shape<_8,_8>{});
test_copy_vectorization<float>(make_layout(make_shape( 9, 9)), Shape<_8,_8>{});
// Dynamic layouts that are assumed to be aligned -- these will be vectorized
test_copy_vectorization<float>(AutoVectorizingCopyWithAssumedAlignment<128>{}, make_layout(make_shape( 8, 8)), Shape<_8,_8>{});
test_copy_vectorization<float>(AutoVectorizingCopyWithAssumedAlignment<128>{}, make_layout(make_shape(12,12)), Shape<_8,_8>{});
// Fails -- bad alignment assumption
//test_copy_vectorization<float>(AutoVectorizingCopyWithAssumedAlignment<128>{}, make_layout(make_shape( 9, 9)), Shape<_8,_8>{});
}
|
cutlass/test/unit/cute/volta/vectorization_auto.cu/0
|
{
"file_path": "cutlass/test/unit/cute/volta/vectorization_auto.cu",
"repo_id": "cutlass",
"token_count": 1836
}
| 56 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for thread-level GEMM
*/
#include "../../common/cutlass_unit_test.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/half.h"
#include "cutlass/gemm/warp/default_mma_tensor_op.h"
#include "cutlass/epilogue/warp/fragment_iterator_tensor_op.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_warp_FragmentIterator, mma_f32_64x64x8) {
using Shape = cutlass::gemm::GemmShape<64, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor
>::Type;
using FragmentIterator = cutlass::epilogue::warp::FragmentIteratorTensorOp<
Shape,
typename MmaTensorOp::Policy::Operator::Shape,
typename MmaTensorOp::Policy::Operator::ElementC,
typename MmaTensorOp::Policy::Operator::FragmentC,
cutlass::layout::RowMajor
>;
// This test just prints things.
#if 0
typename MmaTensorOp::FragmentC accum;
std::cout << "Native accumulators:\n";
for (int i = 0; i < MmaTensorOp::FragmentC::kElements; ++i) {
accum[i] = ElementC(i);
std::cout << accum[i] << " ";
if (i && !((i + 1) % 4)) {
std::cout << "\n";
}
}
std::cout << std::endl;
std::cout << "FragmentIterator::Policy = { \n"
<< " kAccessesPerInstruction: " << FragmentIterator::Policy::kIterationsPerInstruction << "\n"
<< " kAccumulatorRowStride: " << FragmentIterator::Policy::kAccumulatorRowStride << "\n"
<< " kAccumulatorColumnStride: " << FragmentIterator::Policy::kAccumulatorColumnStride << "\n"
<< " kIterations: " << FragmentIterator::Policy::kIterations << "\n"
<< " }" << std::endl;
FragmentIterator fragment_iterator(accum);
for (int iter = 0; iter < FragmentIterator::kIterations; ++iter) {
typename FragmentIterator::Fragment frag;
fragment_iterator.load(frag);
std::cout << "Iteration " << iter << ":\n";
for (int i = 0; i < FragmentIterator::Fragment::kElements; ++i) {
std::cout << frag[i] << " ";
}
std::cout << std::endl;
++fragment_iterator;
}
#endif
}
TEST(SM75_Epilogue_warp_FragmentIterator, mma_f16_64x64x8) {
using Shape = cutlass::gemm::GemmShape<64, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor
>::Type;
using FragmentIterator = cutlass::epilogue::warp::FragmentIteratorTensorOp<
Shape,
typename MmaTensorOp::Policy::Operator::Shape,
typename MmaTensorOp::Policy::Operator::ElementC,
typename MmaTensorOp::Policy::Operator::FragmentC,
cutlass::layout::RowMajor
>;
// This test just prints things.
#if 0
typename MmaTensorOp::FragmentC accum;
std::cout << "Native accumulators:\n";
for (int i = 0; i < MmaTensorOp::FragmentC::kElements; ++i) {
accum[i] = ElementC(i);
std::cout << (float)accum[i] << " ";
if (i && !((i + 1) % 4)) {
std::cout << "\n";
}
}
std::cout << std::endl;
std::cout << "FragmentIterator::Policy = { \n"
<< " kAccessesPerInstruction: " << FragmentIterator::Policy::kIterationsPerInstruction << "\n"
<< " kAccumulatorRowStride: " << FragmentIterator::Policy::kAccumulatorRowStride << "\n"
<< " kAccumulatorColumnStride: " << FragmentIterator::Policy::kAccumulatorColumnStride << "\n"
<< " kIterations: " << FragmentIterator::Policy::kIterations << "\n"
<< " }" << std::endl;
FragmentIterator fragment_iterator(accum);
for (int iter = 0; iter < FragmentIterator::kIterations; ++iter) {
typename FragmentIterator::Fragment frag;
fragment_iterator.load(frag);
std::cout << "Iteration " << iter << ":\n";
for (int i = 0; i < FragmentIterator::Fragment::kElements; ++i) {
std::cout << (float)frag[i] << " ";
}
std::cout << std::endl;
++fragment_iterator;
}
#endif
}
/////////////////////////////////////////////////////////////////////////////////////////////////
|
cutlass/test/unit/epilogue/warp/fragment_iterator_tensor_op.cu/0
|
{
"file_path": "cutlass/test/unit/epilogue/warp/fragment_iterator_tensor_op.cu",
"repo_id": "cutlass",
"token_count": 2367
}
| 57 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide GEMM interface
*/
#pragma once
#include <iostream>
#include <fstream>
#include <sstream>
#include <algorithm>
#include <random>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/packed_stride.hpp"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/reference/host/gett.hpp"
#include "cutlass/epilogue/collective/default_epilogue.hpp"
#include "cutlass/epilogue/fusion/operations.hpp"
#include "cutlass/complex.h"
#include "testbed_utils.h"
#include "cutlass/kernel_hardware_info.hpp"
#include "cutlass/layout/matrix.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/gemm/gemm.h"
#include "cute/int_tuple.hpp"
#include "cute/layout.hpp"
#include "cute/numeric/int.hpp"
namespace test {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
enum class ScalarLoc {
ON_HOST = 0,
ON_DEVICE = 1
};
enum class VectorBeta {
DISABLED = 0,
ENABLED = 1
};
enum class CheckEquality {
EXACT = 0,
RELATIVE = 1
};
namespace detail{
// Helper classes that take default data type when
// the Gemm::EpilogueOutputOp does not have ElementCompute
// and ElementScalar.
// (e.g. when Sm90TreeVisitor is used as FusionCallbacks)
template <typename Gemm, typename Default, typename = void>
struct ElementComputeType {
using Type = Default;
};
template <typename Gemm, typename Default>
struct ElementComputeType<Gemm, Default, std::void_t<typename Gemm::EpilogueOutputOp::ElementCompute>> {
using Type = typename Gemm::EpilogueOutputOp::ElementCompute;
};
template <typename Gemm, typename Default, typename = void>
struct ElementScalarType {
using Type = Default;
};
template <typename Gemm, typename Default>
struct ElementScalarType<Gemm, Default, std::void_t<typename Gemm::EpilogueOutputOp::ElementScalar>> {
using Type = typename Gemm::EpilogueOutputOp::ElementScalar;
};
// The maximum swizzle size to use
//
// This class, like Splits above makes it harder to confuse
// the order of arguments of the various run(...) functions in this file.
class MaxSwizzleSize {
public:
MaxSwizzleSize() = default;
template<class IntegralNotBool,
__CUTE_REQUIRES((std::is_integral_v<IntegralNotBool> &&
!cute::is_same_v<IntegralNotBool, bool>)) >
explicit MaxSwizzleSize(IntegralNotBool max_swizzle_size) : max_swizzle_size_(max_swizzle_size) {}
explicit operator int() const { return max_swizzle_size_; }
private:
int max_swizzle_size_ = 1;
};
template <typename T>
auto make_iterator(T* ptr) {
using namespace cute;
if constexpr (cute::is_subbyte_v<T>) {
return subbyte_iterator<T>(ptr);
}
else {
return ptr;
}
}
template<class T>
struct IsDefaultEpilogue {
static constexpr bool value = false;
};
template<class ...args>
struct IsDefaultEpilogue<cutlass::epilogue::collective::DefaultEpilogue<args...>> {
static constexpr bool value = true;
};
template<class ...args>
struct IsDefaultEpilogue<cutlass::epilogue::collective::detail::Sm90TmaWarpSpecializedAdapter<args...>> {
static constexpr bool value = true;
};
// The number of splits to test.
//
// This class makes it harder to confuse the order of arguments
// of the various run(...) functions in this file. The constructor
// is explicit, so one can't just type 42 (or false, which the
// compiler unhelpfully turns into 0); one has to type Splits(42).
// Splits() picks the default number of splits, 1.
//
// The conversion-to-int operator (operator int()) MUST be explicit!
// Conversion to int MUST require static_cast<int>.
// Otherwise, that defeats a key purpose of this class,
// which is to catch common errors of confusing the order
// of function arguments.
class Splits {
public:
Splits() = default;
template<class IntegralNotBool,
__CUTE_REQUIRES((std::is_integral_v<IntegralNotBool> &&
!cute::is_same_v<IntegralNotBool, bool>)) >
explicit Splits(IntegralNotBool splits) : splits_(splits) {}
explicit operator int() const { return splits_; }
private:
int splits_ = 1;
};
// The number of iterations to test.
//
// This class, like Splits above makes it harder to confuse
// the order of arguments of the various run(...) functions in this file.
// Iterations() picks the default number of iterations, 20.
class Iterations {
public:
Iterations() = default;
template<class IntegralNotBool,
__CUTE_REQUIRES((std::is_integral_v<IntegralNotBool> &&
!cute::is_same_v<IntegralNotBool, bool>)) >
explicit Iterations(IntegralNotBool iterations) : iterations_(iterations) {}
explicit operator int() const { return iterations_; }
private:
int iterations_ = 20;
};
template <typename Element, typename Layout>
bool initialize_tensor(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
double scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
}
else if (bits_input <= 8) {
scope_max = 1;
scope_min = -1;
}
else{
scope_max = 4;
scope_min = -4;
}
cutlass::reference::host::TensorFillRandomUniform(
view, seed, scope_max, scope_min, 0);
}
else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5);
}
else if (dist_kind == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(
view.data(), view.capacity());
}
else if (dist_kind == cutlass::Distribution::AllOnes) {
cutlass::reference::host::TensorFill(view, Element(1));
}
else {
EXPECT_TRUE(false) << "Not implemented";
return false;
}
return true;
}
// Looks at Cute Stride to check Row / Column Major
template<typename Stride>
static constexpr bool is_row_or_col_major(){
int stride_0 = int(cute::size<0>(Stride{}));
int stride_1 = int(cute::size<1>(Stride{}));
int depth = cute::depth(Stride{});
return ((stride_0 == 1) || (stride_1 == 1)) && (depth == 1);
}
//
// Default MMA input Operands : A , B
//
template<
class ScheduleType_,
class Gemm,
class ElementA_ = typename Gemm::GemmKernel::ElementA,
class ElementB_ = typename Gemm::GemmKernel::ElementB>
struct HostCollectiveMainloop {
// Kernel data types
using ElementA = ElementA_;
using StrideA = typename Gemm::GemmKernel::StrideA;
using ElementB = ElementB_;
using StrideB = typename Gemm::GemmKernel::StrideB;
using ScheduleType = typename Gemm::GemmKernel::CollectiveMainloop::DispatchPolicy::Schedule;
using LayoutTagA = cutlass::detail::StrideToLayoutTagA_t<StrideA>;
using LayoutTagB = cutlass::detail::StrideToLayoutTagB_t<StrideB>;
using ElementAccumulator = typename Gemm::GemmKernel::ElementAccumulator;
using ElementScalingFactor = ElementAccumulator;
using ProblemShapeType = typename Gemm::GemmKernel::ProblemShape;
using EpilogueOutputOp = typename Gemm::EpilogueOutputOp;
using Arguments = typename Gemm::GemmKernel::MainloopArguments;
cutlass::ComplexTransform TransformA = Gemm::kTransformA;
cutlass::ComplexTransform TransformB = Gemm::kTransformB;
StrideA stride_a;
StrideB stride_b;
typename LayoutTagA::Stride stride_factor_A;
typename LayoutTagB::Stride stride_factor_B;
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::HostTensor<ElementA, LayoutTagA> tensor_A;
cutlass::HostTensor<ElementB, LayoutTagB> tensor_B;
// Whether to use relative equality checks
CheckEquality check_relative_equality = CheckEquality::EXACT;
uint64_t seed;
static constexpr uint64_t kDefaultSeed = 4096;
// Note: this limitation comes from testbed / not the library
static_assert(is_row_or_col_major<StrideA>(),
"ERROR : A Layout is neither Row / Column Major)");
static_assert(is_row_or_col_major<StrideB>(),
"ERROR : B Layout is neither Row / Column Major)");
HostCollectiveMainloop(
CheckEquality check_relative_equality_ = CheckEquality::EXACT,
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
uint64_t seed_ = kDefaultSeed,
typename LayoutTagA::Stride stride_factor_A_ = typename LayoutTagA::Stride(),
typename LayoutTagB::Stride stride_factor_B_ = typename LayoutTagB::Stride()
):
stride_factor_A(stride_factor_A_),
stride_factor_B(stride_factor_B_),
init_A(init_A_), init_B(init_B_), seed(seed_),
check_relative_equality(check_relative_equality_) { }
template<class ProblemShapeType>
bool initialize(ProblemShapeType problem_size) {
//
// Allocate the GEMM workspace
//
auto problem_shape_MNKL = cute::append<4>(problem_size, 1);
auto M = cute::size<0>(problem_shape_MNKL);
auto N = cute::size<1>(problem_shape_MNKL);
auto K = cute::size<2>(problem_shape_MNKL);
auto L = cute::size<3>(problem_shape_MNKL);
stride_a = cutlass::make_cute_packed_stride(StrideA{}, cute::make_shape(M, K, L));
stride_b = cutlass::make_cute_packed_stride(StrideB{}, cute::make_shape(N, K, L));
// 2.x host tensor does not natively contain a batch stride or coord, so we spoof if by folding it into the outer mode
auto a_coord = cutlass::make_Coord(M * L, K);
// Cutlass has Row/Col major refers to MxK times KxN matrix product,
// so the HostTensorB should be treated as KxN in "coord"'s view
auto b_coord = cutlass::make_Coord(K, N * L);
tensor_A.resize(a_coord, cutlass::layout::Affine2Layout_Factory<LayoutTagA>::layout_factory(a_coord, stride_factor_A));
tensor_B.resize(b_coord, cutlass::layout::Affine2Layout_Factory<LayoutTagB>::layout_factory(b_coord, stride_factor_B));
EXPECT_TRUE(initialize_tensor(tensor_A.host_view(), init_A, seed + 2022));
EXPECT_TRUE(initialize_tensor(tensor_B.host_view(), init_B, seed + 2021));
// It is possible to randomly initialize to all zeros, so override this with non-zeros
// in the upper left corner of each operand.
tensor_A.host_view().at({0, 0}) = ElementA(1);
tensor_B.host_view().at({0, 0}) = ElementB(1);
tensor_A.sync_device();
tensor_B.sync_device();
return true;
}
Arguments to_args() {
Arguments arguments =
{
tensor_A.device_data(), stride_a, tensor_B.device_data(), stride_b
};
return arguments;
}
auto to_host_args(ProblemShapeType problem_size) {
using namespace cute;
//
// Allocate the GEMM workspace
//
auto problem_shape_MNKL = cute::append<4>(problem_size, 1);
auto M = cute::size<0>(problem_shape_MNKL);
auto N = cute::size<1>(problem_shape_MNKL);
auto K = cute::size<2>(problem_shape_MNKL);
auto L = cute::size<3>(problem_shape_MNKL);
auto A = make_tensor(make_iterator(tensor_A.host_data()),
make_layout(make_shape(M, K, L), stride_a));
auto B = make_tensor(make_iterator(tensor_B.host_data()),
make_layout(make_shape(N, K, L), stride_b));
cutlass::reference::host::GettMainloopParams<ElementAccumulator,
decltype(A),
decltype(B)
> mainloop_params{};
mainloop_params.A = A;
mainloop_params.B = B;
mainloop_params.transform_A = TransformA;
mainloop_params.transform_B = TransformB;
return mainloop_params;
}
void print_tensors(std::ofstream& file) {
file << "A =\n" << tensor_A.host_view()
<< "\nB =\n" << tensor_B.host_view();
}
template <
class Element,
class Layout
>
bool equality_check(
cutlass::TensorView<Element, Layout> const& lhs,
cutlass::TensorView<Element, Layout> const& rhs) const {
// Factors used for calculating relative equality. CUTLASS's relative-equality
// checks in include/cutlass/relatively_equal.h are inspired by
// https://floating-point-gui.de/errors/comparison/. This reference suggests using
// the minimum normal value of a given type as the nonzero_floor.
Element epsilon(static_cast<Element>(0.1f));
Element nonzero_floor(std::numeric_limits<Element>::min());
if constexpr (!cutlass::is_complex<Element>::value) {
if (check_relative_equality == CheckEquality::RELATIVE) {
return cutlass::reference::host::TensorRelativelyEquals(
lhs, rhs, epsilon, nonzero_floor);
}
else {
return cutlass::reference::host::TensorEquals(lhs, rhs);
}
}
else {
return cutlass::reference::host::TensorEquals(lhs, rhs);
}
}
bool compare_reference(
cute::Shape<int,int,int,int> problem_shape_MNKL) {
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_A.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_B.host_view()), 0);
bool passed = true;
return passed;
}
};
template<class Gemm>
struct HostCollectiveDefaultEpilogue {
// fusion types are potentially void if the fusion is not supported
// helper so we don't try to construct HostTensor with void type
template <typename T, typename U = uint8_t>
using non_void_t = cute::conditional_t<cute::is_void_v<T>, U, T>;
using ScheduleType = typename Gemm::GemmKernel::CollectiveMainloop::DispatchPolicy::Schedule;
using kernel = typename Gemm::GemmKernel;
using Epilogue = typename kernel::CollectiveEpilogue;
using ElementD = typename kernel::ElementD;
using StrideD = typename kernel::StrideD;
using ElementC = non_void_t<typename kernel::ElementC, ElementD>;
using StrideC = typename kernel::StrideC;
using FusionOp = typename Gemm::EpilogueOutputOp;
static_assert(rank(StrideC{}) == 3, "StrideCD must be rank-3: [M, N, L]");
static_assert(rank(StrideD{}) == 3, "StrideCD must be rank-3: [M, N, L]");
static_assert(is_row_or_col_major<StrideC>(),
"ERROR : C Layout is neither Row / Column Major)");
static_assert(is_row_or_col_major<StrideD>(),
"ERROR : D Layout is neither Row / Column Major)");
// Deduce Cutlass Layouts (RowMajor & ColumnMajor)
using LayoutTagC = cutlass::detail::StrideToLayoutTagC_t<StrideC>;
using LayoutTagD = cutlass::detail::StrideToLayoutTagC_t<StrideD>;
using LayoutTagScalar = cutlass::layout::PackedVectorLayout; // scalars are size-1 vectors
using LayoutTagVector = cutlass::layout::PackedVectorLayout;
using ElementAccumulator = typename kernel::ElementAccumulator;
using ElementScalingFactor = ElementAccumulator;
using ProblemShapeType = typename kernel::ProblemShape;
using ElementCompute = typename ElementComputeType<Gemm, ElementAccumulator>::Type;
using ElementScalar = typename ElementScalarType<Gemm, ElementCompute>::Type;
using Arguments = typename Gemm::GemmKernel::EpilogueArguments;
/// Initialization
StrideC stride_c;
StrideD stride_d;
typename LayoutTagC::Stride stride_factor_C;
typename LayoutTagD::Stride stride_factor_D;
cutlass::HostTensor<ElementC, LayoutTagC> tensor_C;
// Inputs
ElementScalar alpha;
ElementScalar beta;
cutlass::HostTensor<ElementD, LayoutTagD> tensor_D;
cutlass::HostTensor<ElementD, LayoutTagD> reference_D;
// Whether to use relative equality checks
CheckEquality check_relative_equality = CheckEquality::EXACT;
// Are scalars copied to device memory before kernel launch
ScalarLoc use_device_scalars = ScalarLoc::ON_HOST;
// If per-row scale is enabled and this is true, beta is passed as a host scalar instead of device vector
VectorBeta disable_vector_beta = VectorBeta::DISABLED;
cutlass::Distribution::Kind init_C;
uint64_t seed;
static constexpr uint64_t kDefaultSeed = 4096;
HostCollectiveDefaultEpilogue(
CheckEquality check_relative_equality_ = CheckEquality::EXACT,
ScalarLoc use_device_scalars_ = ScalarLoc::ON_HOST,
VectorBeta disable_vector_beta_ = VectorBeta::DISABLED,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_scale_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_bias_ = cutlass::Distribution::Uniform,
uint64_t seed_ = kDefaultSeed
): init_C(init_C_), seed(seed_),
stride_factor_C(typename LayoutTagC::Stride()),
stride_factor_D(typename LayoutTagD::Stride()),
check_relative_equality(check_relative_equality_),
use_device_scalars(use_device_scalars_){ }
bool initialize(ProblemShapeType problem_size, ElementScalar alpha_=1.f, ElementScalar beta_=0.f) {
// Initialize Epilogue tensors
auto problem_shape_MNKL = cute::append<4>(problem_size, 1);
auto [M, N, K, L] = problem_shape_MNKL;
stride_c = cutlass::make_cute_packed_stride(StrideC{}, cute::make_shape(M, N, L));
stride_d = cutlass::make_cute_packed_stride(StrideD{}, cute::make_shape(M, N, L));
// 2.x host tensor does not natively contain a batch stride or coord, so we spoof if by folding it into the outer mode
auto c_coord = cutlass::make_Coord(M * L, N);
tensor_C.resize(c_coord, cutlass::layout::Affine2Layout_Factory<LayoutTagC>::layout_factory(c_coord, stride_factor_C));
tensor_D.resize(c_coord, cutlass::layout::Affine2Layout_Factory<LayoutTagD>::layout_factory(c_coord, stride_factor_D));
reference_D.resize(c_coord, cutlass::layout::Affine2Layout_Factory<LayoutTagD>::layout_factory(c_coord, stride_factor_D), false);
EXPECT_TRUE(initialize_tensor(tensor_C.host_view(), init_C, seed + 2020));
tensor_C.host_view().at({0, 0}) = ElementC(1);
cutlass::reference::host::TensorCopy(reference_D.host_view(), tensor_C.host_view());
tensor_C.sync_device();
tensor_D.sync_device();
alpha = alpha_;
beta = beta_;
return true;
}
template <
class Element,
class Layout
>
bool equality_check(
cutlass::TensorView<Element, Layout> const& lhs,
cutlass::TensorView<Element, Layout> const& rhs) const {
// Factors used for calculating relative equality. CUTLASS's relative-equality
// checks in include/cutlass/relatively_equal.h are inspired by
// https://floating-point-gui.de/errors/comparison/. This reference suggests using
// the minimum normal value of a given type as the nonzero_floor.
Element epsilon(static_cast<Element>(0.1f));
Element nonzero_floor(std::numeric_limits<Element>::min());
if constexpr (!cutlass::is_complex<Element>::value) {
if (check_relative_equality == CheckEquality::RELATIVE) {
return cutlass::reference::host::TensorRelativelyEquals(
lhs, rhs, epsilon, nonzero_floor);
}
else {
return cutlass::reference::host::TensorEquals(lhs, rhs);
}
}
else {
return cutlass::reference::host::TensorEquals(lhs, rhs);
}
}
bool compare_reference(
cute::Shape<int,int,int,int> problem_shape_MNKL,
ElementScalar alpha,
ElementScalar beta) {
auto [M, N, K, L] = problem_shape_MNKL;
tensor_D.sync_host();
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_C.host_view()), 0);
if (tensor_D.size() > 1) {
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D.host_view()), 0);
}
if (reference_D.size() > 1) {
EXPECT_GT(cutlass::reference::host::TensorNorm(reference_D.host_view()), 0);
}
bool passed = equality_check(reference_D.host_view(), tensor_D.host_view());
if(!passed) {
std::cout<<"D is incorrect"<<std::endl;
}
return passed;
}
void print_tensors(std::ofstream& file) {
file
<< "\nC =\n" << tensor_C.host_view()
<< "\n\nReference =\n" << reference_D.host_view()
<< "\n\nComputed =\n" << tensor_D.host_view();
}
Arguments to_args(ProblemShapeType problem_size) {
Arguments arguments =
{
{alpha, beta},
tensor_C.device_data(), stride_c, tensor_D.device_data(), stride_d
};
return arguments;
}
auto to_host_args(ProblemShapeType problem_size) {
using namespace cute;
//
// Allocate the GEMM workspace
//
auto problem_shape_MNKL = cute::append<4>(problem_size, 1);
auto M = cute::get<0>(problem_shape_MNKL);
auto N = cute::get<1>(problem_shape_MNKL);
auto K = cute::get<2>(problem_shape_MNKL);
auto L = cute::get<3>(problem_shape_MNKL);
auto coord_0 = cutlass::make_Coord(0);
auto C = cute::make_tensor(detail::make_iterator(tensor_C.host_data()),
cute::make_layout(cute::make_shape(M, N, L), stride_c));
auto D = cute::make_tensor(detail::make_iterator(reference_D.host_data()),
cute::make_layout(cute::make_shape(M, N, L), stride_d));
cutlass::reference::host::GettEpilogueParams<
ElementScalar,
ElementScalar,
ElementAccumulator,
ElementCompute,
decltype(C),
decltype(D)>
epilogue_params{};
epilogue_params.C = C;
epilogue_params.D = D;
epilogue_params.alpha = alpha;
epilogue_params.beta = beta;
return epilogue_params;
}
};
template<class Gemm>
struct HostCollectiveEpilogue {
// fusion types are potentially void if the fusion is not supported
// helper so we don't try to construct HostTensor with void type
template <typename T, typename U = uint8_t>
using non_void_t = cute::conditional_t<cute::is_void_v<T>, U, T>;
using ScheduleType = typename Gemm::GemmKernel::CollectiveMainloop::DispatchPolicy::Schedule;
using kernel = typename Gemm::GemmKernel;
using Epilogue = typename kernel::CollectiveEpilogue;
static_assert(IsDefaultEpilogue<Epilogue>::value == false, "Default Epilogue is not supported");
using ElementD = typename kernel::ElementD;
using StrideD = typename kernel::StrideD;
using ElementC = non_void_t<typename kernel::ElementC, ElementD>;
using StrideC = typename kernel::StrideC;
static_assert(rank(StrideC{}) == 3, "StrideCD must be rank-3: [M, N, L]");
static_assert(rank(StrideD{}) == 3, "StrideCD must be rank-3: [M, N, L]");
static_assert(is_row_or_col_major<StrideC>(),
"ERROR : C Layout is neither Row / Column Major)");
static_assert(is_row_or_col_major<StrideD>(),
"ERROR : D Layout is neither Row / Column Major)");
// Deduce Cutlass Layouts (RowMajor & ColumnMajor)
using LayoutTagC = cutlass::detail::StrideToLayoutTagC_t<StrideC>;
using LayoutTagD = cutlass::detail::StrideToLayoutTagC_t<StrideD>;
using LayoutTagScalar = cutlass::layout::PackedVectorLayout; // scalars are size-1 vectors
using LayoutTagVector = cutlass::layout::PackedVectorLayout;
using ElementAccumulator = typename kernel::ElementAccumulator;
using ElementScalingFactor = ElementAccumulator;
using ProblemShapeType = typename kernel::ProblemShape;
//
// FusionOperation derived types/queries
//
using EpiloguePolicy = typename Epilogue::DispatchPolicy;
static constexpr bool IsLegacy =
cute::is_same_v<
EpiloguePolicy,
cutlass::epilogue::Sm90TmaWarpSpecializedBiasElementwise<
EpiloguePolicy::StagesC, EpiloguePolicy::StagesD, EpiloguePolicy::FragmentSize>
>;
using FusionOp = typename Gemm::EpilogueOutputOp;
static_assert(cute::is_base_of_v<cutlass::epilogue::fusion::FusionOperation, FusionOp>);
using ElementCompute = typename FusionOp::ElementCompute;
using ElementScalar = typename FusionOp::ElementScalar;
using ElementBias = non_void_t<typename FusionOp::ElementBias>;
using ElementAux = non_void_t<typename FusionOp::ElementAux>;
using ElementAmax = non_void_t<typename FusionOp::ElementAmax>;
using LayoutTagAux = non_void_t<typename FusionOp::GmemLayoutTagAux, LayoutTagD>;
using ActivationFunctor = non_void_t<typename FusionOp::ActivationFn,
cutlass::epilogue::thread::Identity<ElementCompute>>;
static constexpr bool IsBiasEnabled = FusionOp::IsPerRowBiasSupported;
static constexpr bool IsDeBiasEnabled = FusionOp::IsDePerRowBiasSupported;
static constexpr bool IsPerRowScaleEnabled = FusionOp::IsPerRowScaleSupported;
static constexpr bool IsScaleFactorEnabled = FusionOp::IsScaleFactorSupported;
static constexpr bool IsAuxInEnabled = FusionOp::IsAuxInSupported;
static constexpr bool IsAuxOutEnabled = FusionOp::IsAuxOutSupported;
static constexpr bool IsAbsMaxEnabledD = FusionOp::IsAbsMaxSupported &&
(cute::is_same_v<ElementD, cutlass::float_e4m3_t> ||
cute::is_same_v<ElementD, cutlass::float_e5m2_t>);
static constexpr bool IsAbsMaxEnabledAux = IsAuxOutEnabled && FusionOp::IsAbsMaxSupported &&
(cute::is_same_v<ElementAux, cutlass::float_e4m3_t> ||
cute::is_same_v<ElementAux, cutlass::float_e5m2_t>);
using Arguments = typename Gemm::GemmKernel::EpilogueArguments;
/// Initialization
StrideC stride_c;
StrideD stride_d;
typename LayoutTagC::Stride stride_factor_C;
typename LayoutTagD::Stride stride_factor_D;
// Inputs
cutlass::HostTensor<ElementScalar, LayoutTagScalar> alpha;
cutlass::HostTensor<ElementScalar, LayoutTagScalar> beta;
cutlass::HostTensor<ElementScalar, LayoutTagScalar> scale_A;
cutlass::HostTensor<ElementScalar, LayoutTagScalar> scale_B;
cutlass::HostTensor<ElementScalar, LayoutTagScalar> scale_C;
cutlass::HostTensor<ElementScalar, LayoutTagScalar> scale_D;
cutlass::HostTensor<ElementScalar, LayoutTagScalar> scale_Aux;
cutlass::HostTensor<ElementBias , LayoutTagVector> bias;
cutlass::HostTensor<ElementC, LayoutTagC> tensor_C;
cutlass::HostTensor<ElementCompute, LayoutTagScalar> norm_constant;
// Outputs
cutlass::HostTensor<ElementAmax, LayoutTagScalar> abs_max_Aux;
cutlass::HostTensor<ElementAmax, LayoutTagScalar> abs_max_D;
cutlass::HostTensor<ElementAux , LayoutTagAux > tensor_Aux;
cutlass::gemm::TagToStrideC_t< LayoutTagAux > stride_Aux;
cutlass::HostTensor<ElementD, LayoutTagD> tensor_D;
cutlass::HostTensor<ElementD, LayoutTagD> reference_D;
// References
cutlass::HostTensor<ElementBias, LayoutTagVector> reference_dbias;
cutlass::HostTensor<ElementAux , LayoutTagAux > reference_Aux;
cutlass::HostTensor<ElementAmax, LayoutTagScalar> reference_abs_max_Aux;
cutlass::HostTensor<ElementAmax, LayoutTagScalar> reference_abs_max_D;
// Whether to use relative equality checks
CheckEquality check_relative_equality = CheckEquality::EXACT;
// Are scalars copied to device memory before kernel launch
ScalarLoc use_device_scalars = ScalarLoc::ON_HOST;
// If per-row scale is enabled and this is true, beta is passed as a host scalar instead of device vector
VectorBeta disable_vector_beta = VectorBeta::DISABLED;
// Random distribution with which to initialize the A/B/C/D/Aux scaling factors
cutlass::Distribution::Kind init_scale = cutlass::Distribution::Uniform;
// Random distribution with which to initialize the bias vector
cutlass::Distribution::Kind init_bias = cutlass::Distribution::Uniform;
cutlass::Distribution::Kind init_C;
uint64_t seed;
static constexpr uint64_t kDefaultSeed = 4096;
HostCollectiveEpilogue(
CheckEquality check_relative_equality_ = CheckEquality::EXACT,
ScalarLoc use_device_scalars_ = ScalarLoc::ON_HOST,
VectorBeta disable_vector_beta_ = VectorBeta::DISABLED,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_scale_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_bias_ = cutlass::Distribution::Uniform,
uint64_t seed_ = kDefaultSeed
): init_scale(init_scale_), init_bias(init_bias_),
init_C(init_C_), seed(seed_),
stride_factor_C(typename LayoutTagC::Stride()),
stride_factor_D(typename LayoutTagD::Stride()),
check_relative_equality(check_relative_equality_),
use_device_scalars(use_device_scalars_){ }
bool initialize(ProblemShapeType problem_size, ElementScalar alpha_=1.f, ElementScalar beta_=0.f) {
// Initialize Epilogue tensors
auto problem_shape_MNKL = cute::append<4>(problem_size, 1);
auto M = cute::size<0>(problem_shape_MNKL);
auto N = cute::size<1>(problem_shape_MNKL);
auto K = cute::size<2>(problem_shape_MNKL);
auto L = cute::size<3>(problem_shape_MNKL);
stride_c = cutlass::make_cute_packed_stride(StrideC{}, cute::make_shape(M, N, L));
stride_d = cutlass::make_cute_packed_stride(StrideD{}, cute::make_shape(M, N, L));
// 2.x host tensor does not natively contain a batch stride or coord, so we spoof if by folding it into the outer mode
auto c_coord = cutlass::make_Coord(M * L, N);
tensor_C.resize(c_coord, cutlass::layout::Affine2Layout_Factory<LayoutTagC>::layout_factory(c_coord, stride_factor_C));
tensor_D.resize(c_coord, cutlass::layout::Affine2Layout_Factory<LayoutTagD>::layout_factory(c_coord, stride_factor_D));
reference_D.resize(c_coord, cutlass::layout::Affine2Layout_Factory<LayoutTagD>::layout_factory(c_coord, stride_factor_D), false);
EXPECT_TRUE(initialize_tensor(tensor_C.host_view(), init_C, seed + 2020));
tensor_C.host_view().at({0, 0}) = ElementC(1);
cutlass::reference::host::TensorCopy(reference_D.host_view(), tensor_C.host_view());
tensor_C.sync_device();
tensor_D.sync_device();
auto scalar_coord = cutlass::make_Coord(1);
auto col_vector_coord = cutlass::make_Coord(M);
if constexpr (IsPerRowScaleEnabled) {
alpha.resize(col_vector_coord);
EXPECT_TRUE(initialize_tensor(alpha.host_view(), init_scale, seed + 2023));
if (disable_vector_beta == VectorBeta::DISABLED) {
beta.resize(scalar_coord, false);
cutlass::reference::host::TensorFill(beta.host_view(), beta_);
}
else {
beta.resize(col_vector_coord);
EXPECT_TRUE(initialize_tensor(beta.host_view(), init_scale, seed + 2024));
}
}
else {
alpha.resize(scalar_coord, (use_device_scalars == ScalarLoc::ON_DEVICE));
beta.resize(scalar_coord, (use_device_scalars == ScalarLoc::ON_DEVICE));
cutlass::reference::host::TensorFill(alpha.host_view(), alpha_);
cutlass::reference::host::TensorFill(beta.host_view(), beta_);
}
alpha.sync_device();
beta.sync_device();
if constexpr (IsScaleFactorEnabled) {
scale_A.resize(scalar_coord, (use_device_scalars == ScalarLoc::ON_DEVICE));
scale_B.resize(scalar_coord, (use_device_scalars == ScalarLoc::ON_DEVICE));
scale_C.resize(scalar_coord, (use_device_scalars == ScalarLoc::ON_DEVICE));
scale_D.resize(scalar_coord, (use_device_scalars == ScalarLoc::ON_DEVICE));
EXPECT_TRUE(initialize_tensor(scale_A.host_view(), init_scale, seed + 2023));
EXPECT_TRUE(initialize_tensor(scale_B.host_view(), init_scale, seed + 2024));
EXPECT_TRUE(initialize_tensor(scale_C.host_view(), init_scale, seed + 2025));
EXPECT_TRUE(initialize_tensor(scale_D.host_view(), init_scale, seed + 2026));
scale_A.sync_device();
scale_B.sync_device();
scale_C.sync_device();
scale_D.sync_device();
}
if constexpr (IsBiasEnabled) {
bias.resize(col_vector_coord);
EXPECT_TRUE(initialize_tensor(bias.host_view(), init_bias, seed + 2023));
bias.sync_device();
}
if constexpr (IsDeBiasEnabled) {
bias.resize(col_vector_coord);
reference_dbias.resize(col_vector_coord);
cutlass::reference::host::TensorFill(bias.host_view(), ElementBias(0));
cutlass::reference::host::TensorFill(reference_dbias.host_view(), ElementBias(0));
bias.sync_device();
}
if constexpr (IsAbsMaxEnabledD) {
abs_max_D.resize(scalar_coord);
// ensure in-place device reductions perform their own initialization
cutlass::reference::host::TensorFill(abs_max_D.host_view(),
CUTLASS_STL_NAMESPACE::numeric_limits<ElementAmax>::max());
abs_max_D.sync_device();
reference_abs_max_D.resize(scalar_coord);
cutlass::reference::host::TensorFill(reference_abs_max_D.host_view(), ElementAmax(0));
}
if constexpr (IsAuxInEnabled) {
auto aux_coord = cutlass::make_Coord(M * L, N);
auto aux_layout = cutlass::layout::Affine2Layout_Factory<LayoutTagD>::layout_factory(aux_coord, typename LayoutTagAux::Stride{});
tensor_Aux.resize(aux_coord, aux_layout);
EXPECT_TRUE(initialize_tensor(tensor_Aux.host_view(), init_C, seed + 2023));
tensor_Aux.sync_device();
stride_Aux = cutlass::make_cute_packed_stride(cutlass::gemm::TagToStrideC_t<LayoutTagAux>{}, cute::make_shape(M, N, L));
}
if constexpr (IsAuxOutEnabled) {
auto aux_coord = cutlass::make_Coord(M * L, N);
auto aux_layout = cutlass::layout::Affine2Layout_Factory<LayoutTagD>::layout_factory(aux_coord, typename LayoutTagAux::Stride{});
tensor_Aux.resize(aux_coord, aux_layout);
reference_Aux.resize(aux_coord, aux_layout, false);
tensor_Aux.sync_device();
stride_Aux = cutlass::make_cute_packed_stride(cutlass::gemm::TagToStrideC_t<LayoutTagAux>{}, cute::make_shape(M, N, L));
if constexpr (IsScaleFactorEnabled) {
scale_Aux.resize(scalar_coord, (use_device_scalars == ScalarLoc::ON_DEVICE));
EXPECT_TRUE(initialize_tensor(scale_Aux.host_view(), init_scale, seed + 2027));
scale_Aux.sync_device();
}
if constexpr (IsAbsMaxEnabledAux) {
abs_max_Aux.resize(scalar_coord);
// ensure in-place device reductions perform their own initialization
cutlass::reference::host::TensorFill(abs_max_Aux.host_view(),
CUTLASS_STL_NAMESPACE::numeric_limits<ElementAmax>::max());
abs_max_Aux.sync_device();
reference_abs_max_Aux.resize(scalar_coord);
cutlass::reference::host::TensorFill(reference_abs_max_Aux.host_view(), ElementAmax(0));
}
}
return true;
}
template <
class Element,
class Layout
>
bool equality_check(
cutlass::TensorView<Element, Layout> const& lhs,
cutlass::TensorView<Element, Layout> const& rhs) const {
// Factors used for calculating relative equality. CUTLASS's relative-equality
// checks in include/cutlass/relatively_equal.h are inspired by
// https://floating-point-gui.de/errors/comparison/. This reference suggests using
// the minimum normal value of a given type as the nonzero_floor.
Element epsilon(static_cast<Element>(0.1f));
Element nonzero_floor(std::numeric_limits<Element>::min());
if constexpr (!cutlass::is_complex<Element>::value) {
if (check_relative_equality == CheckEquality::RELATIVE) {
return cutlass::reference::host::TensorRelativelyEquals(
lhs, rhs, epsilon, nonzero_floor);
}
else {
return cutlass::reference::host::TensorEquals(lhs, rhs);
}
}
else {
return cutlass::reference::host::TensorEquals(lhs, rhs);
}
}
bool compare_reference(
cute::Shape<int,int,int,int> problem_shape_MNKL,
ElementScalar alpha,
ElementScalar beta) {
tensor_D.sync_host();
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_C.host_view()), 0);
if (tensor_D.size() > 1) {
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D.host_view()), 0);
}
if (reference_D.size() > 1) {
EXPECT_GT(cutlass::reference::host::TensorNorm(reference_D.host_view()), 0);
}
bool passed = equality_check(reference_D.host_view(), tensor_D.host_view());
if(!passed) {
std::cout<<"D is incorrect"<<std::endl;
}
if constexpr (IsAbsMaxEnabledD) {
abs_max_D.sync_host();
passed &= equality_check(reference_abs_max_D.host_view(), abs_max_D.host_view());
}
if constexpr (IsDeBiasEnabled) {
bias.sync_host();
EXPECT_GT(cutlass::reference::host::TensorNorm(bias.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(reference_dbias.host_view()), 0);
passed &= equality_check(reference_dbias.host_view(), bias.host_view());
}
if constexpr (IsAuxOutEnabled) {
tensor_Aux.sync_host();
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_Aux.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(reference_Aux.host_view()), 0);
passed &= equality_check(reference_Aux.host_view(), tensor_Aux.host_view());
if(!passed) {
std::cout<<"Aux is incorrect"<<std::endl;
}
if constexpr (IsAbsMaxEnabledAux) {
abs_max_Aux.sync_host();
bool tmp = equality_check(reference_abs_max_Aux.host_view(), abs_max_Aux.host_view());
if(!tmp) {
std::cout<<"AbsMax of Aux is incorrect"<<std::endl;
}
passed &= tmp;
}
}
return passed;
}
void print_tensors(std::ofstream& file) {
auto coord_0 = cutlass::make_Coord(0);
if constexpr (IsScaleFactorEnabled) {
file
<< ", scale_a: " << scale_A.at(coord_0)
<< ", scale_b: " << scale_B.at(coord_0)
<< ", scale_c: " << scale_C.at(coord_0);
}
if constexpr (IsPerRowScaleEnabled) {
file << "\n\nvalpha = \n" << alpha.host_view();
file << "\n\nvbeta = \n" << beta.host_view();
} else {
file
<< ", alpha: " << alpha.at(coord_0) << ", beta: " << beta.at(coord_0);
}
file << "\n\n";
if constexpr (IsAbsMaxEnabledD) {
file << "scale_d: " << float(scale_D.at(coord_0));
file << "\nReference abs_max_D :";
file << " " << float(reference_abs_max_D.at(coord_0));
file << "\nComputed abs_max_D :";
file << " " << float(abs_max_D.at(coord_0));
file << "\n\n";
}
if constexpr (IsAbsMaxEnabledAux) {
file << "scale_aux: " << float(scale_Aux.at(coord_0));
file << "\nReference abs_max_Aux :";
file << " " << float(reference_abs_max_Aux.at(coord_0));
file << "\nComputed abs_max_Aux :";
file << " " << float(abs_max_Aux.at(coord_0));
file << "\n\n";
}
if constexpr (IsBiasEnabled) {
file << "\n\nBias = \n" << bias.host_view();
}
if constexpr (IsAuxInEnabled) {
file << "\n\nAux Input = \n" << tensor_Aux.host_view();
}
if constexpr (IsDeBiasEnabled) {
file << "\n\nReference dBias = \n" << reference_dbias.host_view();
file << "\n\nComputed dBias = \n" << bias.host_view();
}
if constexpr (IsAuxOutEnabled) {
file
<< "\n\nReference Aux =\n" << reference_Aux.host_view()
<< "\n\nComputed Aux =\n" << tensor_Aux.host_view();
}
file
<< "\nC =\n" << tensor_C.host_view()
<< "\n\nReference =\n" << reference_D.host_view()
<< "\n\nComputed =\n" << tensor_D.host_view();
}
Arguments to_args(ProblemShapeType problem_size) {
auto coord_0 = cutlass::make_Coord(0);
Arguments arguments =
{
{},
tensor_C.device_data(), stride_c, tensor_D.device_data(), stride_d
};
auto &fusion_args = arguments.thread;
if constexpr (IsLegacy) {
arguments.thread = {
alpha.at(coord_0),
beta.at(coord_0),
alpha.device_data(),
beta.device_data()
};
arguments.ptr_Bias = bias.device_data();
arguments.ptr_T = tensor_Aux.device_data();
}
else {
fusion_args.alpha = alpha.at(coord_0);
fusion_args.beta = beta.at(coord_0);
fusion_args.alpha_ptr = alpha.device_data();
fusion_args.beta_ptr = beta.device_data(); // if disable_vector_beta is true this is nullptr
if constexpr (IsScaleFactorEnabled) {
fusion_args.scale_a = scale_A.at(coord_0);
fusion_args.scale_b = scale_B.at(coord_0);
fusion_args.scale_c = scale_C.at(coord_0);
fusion_args.scale_d = scale_D.at(coord_0);
fusion_args.scale_a_ptr = scale_A.device_data();
fusion_args.scale_b_ptr = scale_B.device_data();
fusion_args.scale_c_ptr = scale_C.device_data();
fusion_args.scale_d_ptr = scale_D.device_data();
}
if constexpr (IsBiasEnabled) {
fusion_args.bias_ptr = bias.device_data();
}
if constexpr (IsDeBiasEnabled) {
fusion_args.dbias_ptr = bias.device_data();
}
// example of how to set kernel activation arguments
// see ActivationFunctor::Arguments in activation.h for definition
// if Arguments doesn't exist then fusion_args.activation is empty
if constexpr (cute::is_same_v<ActivationFunctor, cutlass::epilogue::thread::ScaledGELU_taylor<ElementCompute>>) {
fusion_args.activation.scale = ElementCompute(1);
}
// Treat Clamp as ReLU
if constexpr (cute::is_same_v<ActivationFunctor, cutlass::epilogue::thread::Clamp<ElementCompute>>) {
fusion_args.activation.lower_bound = 0;
fusion_args.activation.upper_bound = std::numeric_limits<ElementCompute>::max();
}
if constexpr (IsAbsMaxEnabledD) {
fusion_args.amax_D_ptr = abs_max_D.device_data();
}
if constexpr (IsAuxInEnabled) {
fusion_args.aux_ptr = tensor_Aux.device_data();
fusion_args.dAux = stride_Aux;
}
if constexpr (IsAuxOutEnabled) {
fusion_args.aux_ptr = tensor_Aux.device_data();
fusion_args.dAux = stride_Aux;
if constexpr (IsScaleFactorEnabled) {
fusion_args.scale_aux = scale_Aux.at(coord_0);
fusion_args.scale_aux_ptr = scale_Aux.device_data();
}
if constexpr (IsAbsMaxEnabledAux) {
fusion_args.amax_aux_ptr = abs_max_Aux.device_data();
}
}
}
return arguments;
}
auto to_host_args(ProblemShapeType problem_size) {
using namespace cute;
//
// Allocate the GEMM workspace
//
auto problem_shape_MNKL = cute::append<4>(problem_size, 1);
auto M = cute::get<0>(problem_shape_MNKL);
auto N = cute::get<1>(problem_shape_MNKL);
auto K = cute::get<2>(problem_shape_MNKL);
auto L = cute::get<3>(problem_shape_MNKL);
auto coord_0 = cutlass::make_Coord(0);
auto C = cute::make_tensor(detail::make_iterator(tensor_C.host_data()),
cute::make_layout(cute::make_shape(M, N, L), stride_c));
auto D = cute::make_tensor(detail::make_iterator(reference_D.host_data()),
cute::make_layout(cute::make_shape(M, N, L), stride_d));
auto Bias = cute::make_tensor(detail::make_iterator(IsDeBiasEnabled ? reference_dbias.host_data() : bias.host_data()),
cute::make_layout(cute::make_shape(M, cute::_1{})));
auto Aux = cute::make_tensor(detail::make_iterator(IsAuxInEnabled ? tensor_Aux.host_data() : reference_Aux.host_data()),
cute::make_layout(cute::make_shape(M, N, L), stride_Aux));
auto Valpha = cute::make_tensor(detail::make_iterator(alpha.host_data()),
cute::make_layout(cute::make_shape(M, cute::_1{})));
auto Vbeta = cute::make_tensor(detail::make_iterator(beta.host_data()),
cute::make_layout(cute::make_shape(M, cute::_1{})));
cutlass::reference::host::GettEpilogueParams<
ElementScalar,
ElementScalar,
ElementAccumulator,
ElementCompute,
decltype(C),
decltype(D),
decltype(Bias),
decltype(Aux),
decltype(Valpha),
decltype(Vbeta),
ActivationFunctor
> epilogue_params{};
epilogue_params.C = C;
epilogue_params.D = D;
epilogue_params.alpha = alpha.at(coord_0);
epilogue_params.beta = beta.at(coord_0);
if constexpr (IsScaleFactorEnabled) {
epilogue_params.scale_a = scale_A.at(coord_0);
epilogue_params.scale_b = scale_B.at(coord_0);
epilogue_params.scale_c = scale_C.at(coord_0);
epilogue_params.scale_d = scale_D.at(coord_0);
}
if constexpr (IsBiasEnabled or IsDeBiasEnabled) {
epilogue_params.Bias = Bias;
}
if constexpr (IsAbsMaxEnabledD) {
epilogue_params.abs_max_D = reference_abs_max_D.host_data();
}
if constexpr (IsAuxInEnabled) {
epilogue_params.Aux = Aux;
}
if constexpr (IsAuxOutEnabled) {
epilogue_params.Aux = Aux;
if constexpr (IsScaleFactorEnabled) {
epilogue_params.scale_aux = scale_Aux.at(coord_0);
}
if constexpr (IsAbsMaxEnabledAux) {
epilogue_params.abs_max_Aux = reference_abs_max_Aux.host_data();
}
}
if constexpr (IsPerRowScaleEnabled) {
epilogue_params.Valpha = Valpha;
if (disable_vector_beta == VectorBeta::ENABLED) {
epilogue_params.Vbeta = Vbeta;
}
}
return epilogue_params;
}
};
template <
typename Gemm,
template <class T> class ActivationFunctor_ = cutlass::epilogue::thread::Identity,
bool force_legacy_epilogue = false,
typename ElementA = typename Gemm::GemmKernel::ElementA,
typename ElementB = typename Gemm::GemmKernel::ElementB
>
struct TestbedImpl {
// Kernel data types
using ScheduleType = typename Gemm::GemmKernel::CollectiveMainloop::DispatchPolicy::Schedule;
// All Collective MMA operands are defined by HostCollectiveMainloopType based on the schedule type
using HostCollectiveMainloopType = HostCollectiveMainloop<ScheduleType, Gemm, ElementA, ElementB>;
using CollectiveEpilogue = cute::conditional_t<IsDefaultEpilogue<typename Gemm::GemmKernel::CollectiveEpilogue>::value || force_legacy_epilogue,
HostCollectiveDefaultEpilogue<Gemm>,
HostCollectiveEpilogue<Gemm>>;
using ProblemShapeType = typename Gemm::GemmKernel::ProblemShape;
using ElementAccumulator = typename Gemm::GemmKernel::ElementAccumulator;
using ElementCompute = typename ElementComputeType<Gemm, ElementAccumulator>::Type;
using ElementScalar = typename ElementScalarType<Gemm, ElementCompute>::Type;
using LayoutTagA = typename HostCollectiveMainloopType::LayoutTagA;
using LayoutTagB = typename HostCollectiveMainloopType::LayoutTagB;
using LayoutTagC = typename CollectiveEpilogue::LayoutTagC;
using LayoutTagD = typename CollectiveEpilogue::LayoutTagD;
uint32_t sm_count;
// Used to force multi-wave tests for persistent kernel schedules
constexpr static int MaxSmCount = 16;
static constexpr uint64_t kDefaultSeed = 4096;
static constexpr uint32_t mma_promotion_interval = 4;
using RasterOrderOptions = typename cutlass::gemm::kernel::detail::PersistentTileSchedulerSm90::RasterOrderOptions;
using DecompositionMode = typename cutlass::gemm::kernel::detail::PersistentTileSchedulerSm90StreamKParams::DecompositionMode;
HostCollectiveMainloopType collective_mma_inputs;
CollectiveEpilogue collective_epilogue;
//
// Methods
//
TestbedImpl(
CheckEquality check_relative_equality_ = CheckEquality::EXACT,
ScalarLoc use_device_scalars_ = ScalarLoc::ON_HOST,
VectorBeta disable_vector_beta_ = VectorBeta::DISABLED,
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_scale_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_bias_ = cutlass::Distribution::Uniform,
uint64_t seed_ = kDefaultSeed
): collective_mma_inputs(HostCollectiveMainloopType(check_relative_equality_, init_A_, init_B_, seed_)),
collective_epilogue(CollectiveEpilogue(check_relative_equality_, use_device_scalars_, disable_vector_beta_, init_C_, init_scale_, init_bias_, seed_)) { }
TestbedImpl(
typename LayoutTagA::Stride stride_factor_A_,
typename LayoutTagB::Stride stride_factor_B_,
typename LayoutTagC::Stride stride_factor_C_,
typename LayoutTagD::Stride stride_factor_D_,
CheckEquality check_relative_equality_ = CheckEquality::EXACT,
ScalarLoc use_device_scalars_ = ScalarLoc::ON_HOST,
VectorBeta disable_vector_beta_ = VectorBeta::DISABLED,
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_scale_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_bias_ = cutlass::Distribution::Uniform,
uint64_t seed_ = kDefaultSeed
): collective_mma_inputs(HostCollectiveMainloopType(check_relative_equality_, stride_factor_A_, stride_factor_B_, init_A_, init_B_, seed_)),
collective_epilogue(CollectiveEpilogue(check_relative_equality_, use_device_scalars_, disable_vector_beta_, init_C_, init_scale_, init_bias_, seed_)) { }
/// Initializes data structures
bool initialize(ProblemShapeType problem_size, ElementScalar alpha_=1.f, ElementScalar beta_=0.f) {
collective_mma_inputs.initialize(problem_size);
collective_epilogue.initialize(problem_size, alpha_, beta_);
return true;
}
/// Compares computed reference with device reference and outputs to a file if incorrect
bool compare_reference(
cute::Shape<int,int,int,int> problem_shape_MNKL,
ElementScalar alpha,
ElementScalar beta)
{
auto [M, N, K, L] = problem_shape_MNKL;
bool passed = collective_mma_inputs.compare_reference(problem_shape_MNKL);
passed &= collective_epilogue.compare_reference(problem_shape_MNKL, alpha, beta);
EXPECT_TRUE(passed);
if (!passed) {
std::stringstream fname;
fname << "error_Gemm_device_"
<< M << "x" << N << "x" << K << "x" << L << "_"
<< cute::get<0>(typename Gemm::GemmKernel::TileShape{}) << "_"
<< cute::get<1>(typename Gemm::GemmKernel::TileShape{}) << "_"
<< cute::get<2>(typename Gemm::GemmKernel::TileShape{}) << ".txt";
std::ofstream file(fname.str());
file
<< "problem: " << ' ' << M << "x" << N << "x" << K << ", Batch count = " << L
<< ", alpha: " << alpha << ", beta: " << beta << "\n\n";
collective_mma_inputs.print_tensors(file);
collective_epilogue.print_tensors(file);
}
return passed;
}
/// Verifies the result is a GEMM
bool verify(
ProblemShapeType problem_size,
ElementScalar alpha,
ElementScalar beta)
{
using namespace cute;
auto problem_shape_MNKL = cute::append<4>(problem_size, 1);
auto mainloop_params = collective_mma_inputs.to_host_args(problem_size);
auto epilogue_params = collective_epilogue.to_host_args(problem_size);
cutlass::reference::host::Gemm3x(mainloop_params, epilogue_params);
bool passed = compare_reference(problem_shape_MNKL, alpha, beta);
return passed;
}
/// Determine if the CUDA device is sufficient to run the kernel
bool sufficient() {
//
// Determine SMEM requirements and waive if not satisfied
//
size_t smem_size = static_cast<size_t>(Gemm::GemmKernel::SharedStorageSize);
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
cudaDeviceProp properties;
result = cudaGetDeviceProperties(&properties, device_idx);
this->sm_count = properties.multiProcessorCount;
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.sharedMemPerBlockOptin < smem_size) {
printf("failed due to smem_size\n");
printf("hardware smem_size: %d, required smem_size: %d\n\n", int(properties.sharedMemPerBlockOptin), int(smem_size));
return false;
}
return true;
}
bool profile(
ProblemShapeType problem_size,
int iterations,
Gemm& gemm_op,
typename Gemm::Arguments& arguments,
cutlass::device_memory::allocation<uint8_t>& workspace) {
int M = cute::size<0>(problem_size);
int N = cute::size<1>(problem_size);
int K = cute::size<2>(problem_size);
int L = 1;
if constexpr(cute::rank(ProblemShapeType{}) == 4) {
L = cute::size<3>(problem_size);
}
cutlass::Status status;
//
// Run the GEMM
//
cudaError_t result;
for (int iter = 0; iter < iterations; ++iter) {
status = gemm_op(arguments, workspace.get());
if (status != cutlass::Status::kSuccess) {
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
return false;
}
}
result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
EXPECT_EQ(result, cudaSuccess) << "Error at Kernel Sync.";
return false;
}
return true;
}
/// Executes one test
bool run(
ProblemShapeType problem_size,
ElementScalar alpha = ElementScalar(1),
ElementScalar beta = ElementScalar(0),
bool profiling = false,
detail::Iterations iterations = detail::Iterations{},
RasterOrderOptions raster_order = RasterOrderOptions::Heuristic,
detail::MaxSwizzleSize max_swizzle = detail::MaxSwizzleSize{},
detail::Splits splits = detail::Splits{},
DecompositionMode decomposition_mode = DecompositionMode::Heuristic
)
{
// Fail test if insufficient CUDA device
if (!sufficient()) {
std::cout << "Test failed due to insufficient CUDA device." << std::endl;
return false;
}
if (!this->initialize(problem_size, alpha, beta)) {
std::cerr << "Initialization failed \n";
return false;
}
//
// Initialize the GEMM operator
//
typename Gemm::Arguments arguments;
cutlass::KernelHardwareInfo hw_info;
hw_info.device_id = 0;
if (not profiling) {
this->sm_count = std::min(MaxSmCount, cutlass::KernelHardwareInfo::query_device_multiprocessor_count(hw_info.device_id));
hw_info.sm_count = this->sm_count;
}
else {
this->sm_count = cutlass::KernelHardwareInfo::query_device_multiprocessor_count(hw_info.device_id);
hw_info.sm_count = this->sm_count;
}
typename Gemm::GemmKernel::TileScheduler::Arguments scheduler_args;
if constexpr (cute::is_same_v<typename Gemm::GemmKernel::TileSchedulerTag, cutlass::gemm::StreamKScheduler>) {
scheduler_args = { static_cast<int>(splits), static_cast<int>(max_swizzle), raster_order, decomposition_mode };
}
else {
scheduler_args = { static_cast<int>(max_swizzle), raster_order };
}
typename HostCollectiveMainloopType::Arguments mainloop_args;
mainloop_args = collective_mma_inputs.to_args();
arguments =
{
cutlass::gemm::GemmUniversalMode::kGemm,
problem_size,
mainloop_args,
collective_epilogue.to_args(problem_size),
hw_info,
scheduler_args
};
Gemm gemm_op;
size_t workspace_size = Gemm::get_workspace_size(arguments);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
cutlass::Status status = gemm_op.can_implement(arguments);
if (status != cutlass::Status::kSuccess) {
cudaError_t error = cudaGetLastError();
std::cerr << "This test is not supported: " << cudaGetErrorString(error) << "\n";
return true;
}
//
// Run the GEMM
//
if (profiling) {
return profile(problem_size, static_cast<int>(iterations), gemm_op, arguments, workspace);
}
else {
cudaError_t result;
status = gemm_op.initialize(arguments, workspace.get());
status = gemm_op.run();
result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
EXPECT_EQ(result, cudaSuccess) << "Error at Kernel Sync.";
return false;
}
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
//
// Verify
//
bool passed = this->verify(problem_size, alpha, beta);
if (!passed) {
std::cout << "Error : Failed : with alpha: " << alpha << ", beta: " << beta
<< "\n";
}
return passed;
}
}
};
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Gemm,
template <class T> class ActivationFunctor = cutlass::epilogue::thread::Identity,
bool force_legacy_epilogue = false,
typename ElementA = typename Gemm::GemmKernel::ElementA,
typename ElementB = typename Gemm::GemmKernel::ElementB
>
struct Testbed3x {
using TestBedImpl = typename detail::TestbedImpl<
Gemm,
ActivationFunctor,
force_legacy_epilogue,
ElementA,
ElementB
>;
using Kernel = typename Gemm::GemmKernel;
using Epilogue = typename Gemm::GemmKernel::CollectiveEpilogue;
using ElementAccumulator = typename TestBedImpl::ElementAccumulator;
using ElementCompute = typename TestBedImpl::ElementCompute;
using ElementScalar = typename TestBedImpl::ElementScalar;
using RasterOrderOptions = typename cutlass::gemm::kernel::detail::PersistentTileSchedulerSm90::RasterOrderOptions;
using DecompositionMode = typename cutlass::gemm::kernel::detail::PersistentTileSchedulerSm90StreamKParams::DecompositionMode;
// Detail Implementation
TestBedImpl impl_;
//
// Methods
//
Testbed3x(
CheckEquality check_relative_equality_ = CheckEquality::EXACT,
ScalarLoc use_device_scalars_ = ScalarLoc::ON_DEVICE,
VectorBeta disable_vector_beta_ = VectorBeta::DISABLED,
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_scale_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_bias_ = cutlass::Distribution::Uniform,
uint64_t seed_ = TestBedImpl::kDefaultSeed)
: impl_(check_relative_equality_, use_device_scalars_, disable_vector_beta_, init_A_, init_B_, init_C_, init_scale_, init_bias_, seed_) {}
/// Executes one test
bool run(
typename TestBedImpl::ProblemShapeType problem_size,
ElementScalar alpha = ElementScalar(1),
ElementScalar beta = ElementScalar(0),
RasterOrderOptions raster_order = RasterOrderOptions::Heuristic,
detail::MaxSwizzleSize max_swizzle = detail::MaxSwizzleSize{},
detail::Splits splits = detail::Splits{},
DecompositionMode decomposition_mode = DecompositionMode::Heuristic,
bool profiling = false,
detail::Iterations iterations = detail::Iterations{}
)
{
return impl_.run(
problem_size, alpha, beta, profiling, iterations, raster_order, max_swizzle, splits, decomposition_mode
);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Gemm>
bool TestGemmPerf3x(int iterations = 20) {
using ProblemShapeType = typename Gemm::GemmKernel::ProblemShape;
using ElementAccumulator = typename Gemm::GemmKernel::ElementAccumulator;
using ElementScalar = ElementAccumulator;
bool passed = true;
using DecompositionMode = typename cutlass::gemm::kernel::detail::PersistentTileSchedulerSm90StreamKParams::DecompositionMode;
using RasterOrderOptions = typename cutlass::gemm::kernel::detail::PersistentTileSchedulerSm90::RasterOrderOptions;
std::vector<int> problem_size_m = { 4608 };
std::vector<int> problem_size_n = { 4608 };
std::vector<int> problem_size_k = { 8192 };
Testbed3x<Gemm> testbed;
for (int m : problem_size_m) {
for (int n : problem_size_n) {
for (int k : problem_size_k) {
ProblemShapeType problem_size;
if constexpr (cute::rank(ProblemShapeType{}) == 4) {
problem_size = ProblemShapeType{m, n, k, /* l */ 1};
}
else {
problem_size = ProblemShapeType{m, n, k};
}
passed = testbed.run(
problem_size,
cutlass::from_real<ElementScalar>(1),
cutlass::from_real<ElementScalar>(0),
RasterOrderOptions{}, detail::MaxSwizzleSize(1), detail::Splits{1}, DecompositionMode{},
true, // profiling
detail::Iterations{iterations});
if (!passed) {
return false;
}
}
}
}
return true;
}
template <
typename Gemm,
template <class T> class ActivationFunctor = cutlass::epilogue::thread::Identity
>
bool TestAll(double alpha = 1.0, double beta = 0.0, CheckEquality check_relative_equality = CheckEquality::RELATIVE) {
using ElementScalar = typename Gemm::EpilogueOutputOp::ElementScalar;
using ProblemShapeType = typename Gemm::GemmKernel::ProblemShape;
Testbed3x<Gemm, ActivationFunctor> testbed(check_relative_equality, ScalarLoc::ON_HOST, VectorBeta::DISABLED);
int max_alignment = std::max(Gemm::kAlignmentA, Gemm::kAlignmentB);
std::vector<int> problem_size_m = {max_alignment, 512 - 3 * max_alignment};
std::vector<int> problem_size_n = {max_alignment, 512 - 2 * max_alignment};
if constexpr (cute::is_same_v<typename Gemm::GemmKernel::DispatchPolicy::Schedule,
cutlass::gemm::KernelTmaWarpSpecializedPingpong>) {
problem_size_m.push_back(768);
problem_size_n.push_back(768);
}
constexpr int Stages = Gemm::GemmKernel::DispatchPolicy::Stages;
constexpr int TileShapeK = cute::size<2>(typename Gemm::GemmKernel::TileShape{});
std::vector<int> problem_size_k = {max_alignment, TileShapeK * (Stages + 1) - max_alignment};
using DecompositionMode = typename cutlass::gemm::kernel::detail::PersistentTileSchedulerSm90StreamKParams::DecompositionMode;
std::vector<DecompositionMode> decomposition_modes = {DecompositionMode::Heuristic};
std::vector problem_splits = {detail::Splits{1}};
static constexpr bool UsesStreamKScheduler = cute::is_same_v<typename Gemm::GemmKernel::TileSchedulerTag, cutlass::gemm::StreamKScheduler>;
if constexpr (UsesStreamKScheduler) {
problem_splits.push_back(detail::Splits{2});
problem_splits.push_back(detail::Splits{3});
decomposition_modes.push_back(DecompositionMode::DataParallel);
decomposition_modes.push_back(DecompositionMode::SplitK);
decomposition_modes.push_back(DecompositionMode::StreamK);
// Use larger K sizes for stream-K tests
static constexpr int min_tiles_per_sk_unit = cutlass::gemm::kernel::detail::PersistentTileSchedulerSm90StreamKParams::min_iters_per_sk_unit_;
problem_size_k = {TileShapeK * min_tiles_per_sk_unit, TileShapeK * 3 * min_tiles_per_sk_unit - max_alignment};
}
using RasterOrderOptions = typename cutlass::gemm::kernel::detail::PersistentTileSchedulerSm90::RasterOrderOptions;
std::vector<RasterOrderOptions> raster_orders = {RasterOrderOptions::AlongM, RasterOrderOptions::AlongN};
std::vector max_swizzle_sizes{detail::MaxSwizzleSize{1}, detail::MaxSwizzleSize{4}};
bool passed = true;
for (int m : problem_size_m) {
for (int n : problem_size_n) {
for (int k : problem_size_k) {
for (auto raster_order : raster_orders) {
for (auto max_swizzle_size : max_swizzle_sizes) {
for (DecompositionMode decomp_mode : decomposition_modes) {
std::vector problem_splits = {detail::Splits{1}};
if (decomp_mode == DecompositionMode::Heuristic || decomp_mode == DecompositionMode::SplitK) {
auto max_splits = (k + TileShapeK - 1) / TileShapeK;
if (max_splits > 2) {
problem_splits.push_back(detail::Splits{2});
}
if (max_splits > 3) {
problem_splits.push_back(detail::Splits{3});
}
problem_splits.push_back(detail::Splits{max_splits});
// Test the case in which we ask for more splits than there are K tiles in the GEMM. In this
// case, split-K will fall back to a splitting factor of `max_splits`.
problem_splits.push_back(detail::Splits{max_splits + 1});
}
for (auto splits : problem_splits) {
ProblemShapeType problem_size;
if constexpr (cute::rank(ProblemShapeType{}) == 4) {
problem_size = ProblemShapeType{m, n, k, /* l */ 1};
}
else {
problem_size = ProblemShapeType{m, n, k};
}
passed = testbed.run(
problem_size,
cutlass::from_real<ElementScalar>(alpha),
cutlass::from_real<ElementScalar>(beta),
raster_order,
max_swizzle_size,
splits,
decomp_mode
);
if (!passed) {
std::cout << __FILE__ << ':' << __LINE__ << " : GEMM MNK " << m << " " << n << " " << k << " FAILED.\n";
return false;
}
} // splits
} // decomposition_mode
} // max_swizzle_size
} // raster_order
} // k
} // n
} // m
// if we do support batched GEMM, just run one test on it to save on test time
if constexpr (cute::rank(ProblemShapeType{}) == 4) {
auto problem_size = ProblemShapeType{256 + max_alignment, 256 + max_alignment, 160 + max_alignment, /* l */ 3};
passed = testbed.run(
problem_size,
cutlass::from_real<ElementScalar>(alpha),
cutlass::from_real<ElementScalar>(beta)
);
if (!passed) {
return false;
}
}
return passed;
}
template <typename Gemm>
bool TestAllBiasElementwise(double alpha = 1.0, double beta = 0.0, CheckEquality check_relative_equality = CheckEquality::EXACT) {
return TestAll<Gemm>(alpha, beta, check_relative_equality);
}
} // namespace device
} // namespace gemm
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
|
cutlass/test/unit/gemm/device/gemm_testbed_3x.hpp/0
|
{
"file_path": "cutlass/test/unit/gemm/device/gemm_testbed_3x.hpp",
"repo_id": "cutlass",
"token_count": 26680
}
| 58 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide GEMM interface
*/
#pragma once
#include <iostream>
#include <fstream>
#include <sstream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/util/reference/host/gemm_complex.h"
#include "testbed_utils.h"
namespace test {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Gemm, typename BinaryOp>
struct GemmWithReductionReference {
using ElementAccumulator = typename Gemm::ElementAccumulator;
using ElementCompute = typename Gemm::GemmKernel::Epilogue::ElementCompute;
using ElementC = typename Gemm::ElementC;
using ElementT = typename Gemm::GemmKernel::Epilogue::ElementTensor;
//
// Data members
//
BinaryOp binary_op;
//
// Methods
//
GemmWithReductionReference() { }
ElementCompute operator()(
ElementAccumulator d_y,
ElementT t) {
return binary_op(ElementCompute(d_y), ElementCompute(t));
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Gemm,
typename ReferenceOp
>
struct TestbedGemmWithReduction {
using ElementA = typename Gemm::ElementA;
using ElementB = typename Gemm::ElementB;
using ElementC = typename Gemm::ElementC;
using ElementAccumulator = typename Gemm::ElementAccumulator;
using ElementT = typename Gemm::GemmKernel::Epilogue::ElementTensor;
/// Initialization
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::Distribution::Kind init_C;
uint64_t seed;
cutlass::HostTensor<typename Gemm::ElementA, typename Gemm::LayoutA> tensor_A;
cutlass::HostTensor<typename Gemm::ElementB, typename Gemm::LayoutB> tensor_B;
cutlass::HostTensor<typename Gemm::ElementC, typename Gemm::LayoutC> tensor_C;
cutlass::HostTensor<typename Gemm::ElementC, typename Gemm::LayoutC> tensor_D;
cutlass::HostTensor<typename Gemm::ElementAccumulator, typename Gemm::LayoutC> tensor_Reduction;
cutlass::HostTensor<ElementT, typename Gemm::LayoutC> tensor_Tensor;
cutlass::HostTensor<ElementAccumulator, typename Gemm::LayoutC> tensor_C_ref;
cutlass::HostTensor<ElementAccumulator, typename Gemm::LayoutC> reference_d_Y;
cutlass::HostTensor<typename Gemm::ElementC, typename Gemm::LayoutC> reference_D;
cutlass::HostTensor<typename Gemm::ElementAccumulator, typename Gemm::LayoutC> reference_Reduction;
//
// Methods
//
TestbedGemmWithReduction(
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint64_t seed_ = 2080
):
init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { }
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
bool initialize_tensor(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
double scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
int bits_output = cutlass::sizeof_bits<typename Gemm::ElementC>::value;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
} else if (bits_input <= 8) {
scope_max = 2;
scope_min = -2;
} else if (bits_output == 16) {
scope_max = 5;
scope_min = -5;
} else {
scope_max = 8;
scope_min = -8;
}
cutlass::reference::host::TensorFillRandomUniform(
view, seed, scope_max, scope_min, 0);
}
else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5);
}
else if (dist_kind == cutlass::Distribution::Sequential) {
for (int m = 0; m < view.extent().row(); ++m) {
for (int n = 0; n < view.extent().column(); ++n) {
//view.at({m, n}) = Element(float(((idx ++) % 17) - 8));
view.at({m, n}) = (n == 0 ? Element(m) : Element());
}
}
}
else {
EXPECT_TRUE(false) << "Not implemented";
return false;
}
return true;
}
/// Initializes data structures
void initialize(cutlass::gemm::GemmCoord problem_size) {
//
// Allocate the GEMM workspace
//
tensor_A.resize(problem_size.mk());
tensor_B.resize(problem_size.kn());
tensor_C.resize(problem_size.mn());
tensor_D.resize(problem_size.mn());
tensor_Reduction.resize({
problem_size.m(),
(problem_size.n() - 1 + Gemm::ThreadblockShape::kN) / Gemm::ThreadblockShape::kN
});
tensor_Tensor.resize(problem_size.mn());
reference_D.resize(problem_size.mn(), false);
reference_d_Y.resize(problem_size.mn(), false);
tensor_C_ref.resize(problem_size.mn(), false);
reference_Reduction.resize({problem_size.m(), 1}, false);
EXPECT_TRUE(initialize_tensor(tensor_A.host_view(), init_A, seed + 2019));
EXPECT_TRUE(initialize_tensor(tensor_B.host_view(), init_B, seed + 2018));
EXPECT_TRUE(initialize_tensor(tensor_C.host_view(), init_C, seed + 2017));
EXPECT_TRUE(initialize_tensor(tensor_Tensor.host_view(), init_C, seed + 2020));
// It is possible to randomly initialize to all zeros, so override this with non-zeros
// in the upper left corner of each operand.
tensor_A.host_view().at({0, 0}) = typename Gemm::ElementA(1);
tensor_B.host_view().at({0, 0}) = typename Gemm::ElementB(1);
tensor_C.host_view().at({0, 0}) = typename Gemm::ElementC(1);
for (int m = 0; m < tensor_C_ref.extent().row(); ++m) {
for (int n = 0; n < tensor_C_ref.extent().column(); ++n) {
tensor_C_ref.at({m, n}) = ElementAccumulator(tensor_C.at({m, n}));
}
}
tensor_A.sync_device();
tensor_B.sync_device();
tensor_C.sync_device();
tensor_D.sync_device();
tensor_Reduction.sync_device();
tensor_Tensor.sync_device();
}
/// Compares computed reference with device reference and outputs to a file if incorrect
bool compare_reference(
cutlass::gemm::GemmCoord problem_size,
ElementAccumulator alpha,
ElementAccumulator beta) {
tensor_Reduction.sync_host();
tensor_D.sync_host();
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_A.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_B.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_C.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(reference_D.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_Reduction.host_view()), 0);
bool passed = true;
for (int m = 0; m < tensor_Reduction.extent().row(); ++m) {
ElementAccumulator reduced_value = ElementAccumulator();
for (int j = 0; j < tensor_Reduction.extent().column(); ++j) {
reduced_value += tensor_Reduction.at({m, j});
}
if (reduced_value != reference_Reduction.at({m, 0})) {
std::cout << "Error in bias[" << m << "] - Expected: " << reference_Reduction.at({m, 0}) << ", got: " << reduced_value << std::endl;
passed = false;
break;
}
}
EXPECT_TRUE(passed) << "Reduction is incorect.";
if (!cutlass::reference::host::TensorEquals(reference_D.host_view(), tensor_D.host_view())) {
EXPECT_TRUE(false) << " mismatched reference";
passed = false;
}
if (!passed) {
/*
std::stringstream fname;
fname << "error_Gemm_device_"
<< problem_size.m() << "x"
<< problem_size.n() << "x"
<< problem_size.k() << "_"
<< Gemm::ThreadblockShape::kM << "x"
<< Gemm::ThreadblockShape::kN << "x"
<< Gemm::ThreadblockShape::kK << "_"
<< Gemm::WarpShape::kM << "x"
<< Gemm::WarpShape::kN << "x"
<< Gemm::WarpShape::kK << ".txt";
std::ofstream file(fname.str());
*/
std::ofstream file("testbed_universal_errors_sm70.txt");
file
<< "problem: " << problem_size
<< ", alpha: " << alpha << ", beta: " << beta << "\n\n";
file
<< "A =\n" << tensor_A.host_view()
<< "\nB =\n" << tensor_B.host_view()
<< "\nC =\n" << tensor_C.host_view()
<< "\nT = \n" << tensor_Tensor.host_view()
<< "\n\nReference =\n" << reference_D.host_view()
<< "\nComputed =\n" << tensor_D.host_view()
<< "\n\nReduction =\n" << tensor_Reduction.host_view() << "\n"
<< "\nReference reduction =\n" << reference_Reduction.host_view() << "\n";
}
return passed;
}
/// Verifies the result is a GEMM
bool verify(
cutlass::gemm::GemmCoord problem_size,
ElementAccumulator alpha,
ElementAccumulator beta) {
//
// Verify
//
cutlass::reference::host::GemmComplex<
typename Gemm::ElementA, typename Gemm::LayoutA,
typename Gemm::ElementB, typename Gemm::LayoutB,
ElementAccumulator, typename Gemm::LayoutC,
ElementAccumulator, ElementAccumulator
>(
problem_size,
alpha,
tensor_A.host_ref(),
Gemm::kTransformA,
tensor_B.host_ref(),
Gemm::kTransformB,
beta,
tensor_C_ref.host_ref(),
reference_d_Y.host_ref(),
ElementAccumulator(0)
);
using ElementC = typename Gemm::ElementC;
ReferenceOp reference_op;
// compute backwards
for (int m = 0; m < problem_size.m(); ++m) {
ElementAccumulator reduced_value = ElementAccumulator();
for (int n = 0; n < problem_size.n(); ++n) {
ElementAccumulator d_full = reference_op(reference_d_Y.at({m, n}), tensor_Tensor.at({m, n}));
reduced_value += d_full;
reference_D.at({m, n}) = ElementC(d_full);
}
reference_Reduction.at({m, 0}) = reduced_value;
}
return compare_reference(problem_size, alpha, beta);
}
/// Returns true if the CUDA device is sufficient to execute the kernel.
bool sufficient() const {
//
// Determine SMEM requirements and waive if not satisfied
//
size_t smem_size = sizeof(typename Gemm::GemmKernel::SharedStorage);
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.sharedMemPerBlockOptin < smem_size) {
return false;
}
return true;
}
/// Executes one test
bool run(
cutlass::gemm::GemmUniversalMode mode,
cutlass::gemm::GemmCoord problem_size,
int batch_count = 1,
ElementAccumulator alpha = ElementAccumulator(1),
ElementAccumulator beta = ElementAccumulator(0)) {
// Waive test if insufficient CUDA device
if (!sufficient()) {
if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) {
std::cerr << "Test waived due to insufficient CUDA device." << std::endl;
}
return true;
}
this->initialize(problem_size);
//
// Initialize the GEMM operator
//
typename Gemm::Arguments arguments{
mode,
problem_size,
batch_count,
{alpha, beta},
tensor_A.device_data(),
tensor_B.device_data(),
tensor_C.device_data(),
tensor_D.device_data(),
tensor_Reduction.device_data(),
tensor_Tensor.device_data(),
problem_size.m() * problem_size.k(),
problem_size.n() * problem_size.k(),
problem_size.m() * problem_size.n(),
problem_size.m() * problem_size.n(),
problem_size.m(),
problem_size.m() * problem_size.n(),
tensor_A.layout().stride(0),
tensor_B.layout().stride(0),
tensor_C.layout().stride(0),
tensor_D.layout().stride(0),
tensor_Reduction.layout().stride(0),
tensor_Tensor.layout().stride(0),
};
Gemm gemm_op;
size_t workspace_size = Gemm::get_workspace_size(arguments);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
cutlass::Status status = gemm_op.initialize(arguments, workspace.get());
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
//
// Run the GEMM
//
status = gemm_op();
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
//
// Verify
//
bool passed = this->verify(problem_size, alpha, beta);
if (!passed) {
std::cout << "Failed with batch_count/split_k_slices = " << batch_count << std::endl;
}
//
// Profile
//
#if 0 // profiling disabled for now.
int const kWorkspaces = 100;
cutlass::DeviceAllocation<typename Gemm::ElementA> profiling_tensor_A(tensor_A.capacity() * kWorkspaces);
cutlass::DeviceAllocation<typename Gemm::ElementB> profiling_tensor_B(tensor_B.capacity() * kWorkspaces);
cutlass::DeviceAllocation<typename Gemm::ElementC> profiling_tensor_C(tensor_C.capacity() * kWorkspaces);
cutlass::DeviceAllocation<typename Gemm::ElementC> profiling_tensor_D(tensor_D.capacity() * kWorkspaces);
cutlass::DeviceAllocation<typename Gemm::ElementC> profiling_tensor_Reduction(tensor_Reduction.capacity() * kWorkspaces);
cutlass::DeviceAllocation<ElementT> profiling_tensor_Tensor(tensor_Tensor.capacity() * kWorkspaces);
cudaEvent_t events[2];
for (auto & event : events) {
cudaError_t result = cudaEventCreate(&event);
if (result != cudaSuccess) {
EXPECT_EQ(result, cudaSuccess) << " cudaEventCreate() failed with error " << cudaGetErrorString(result);
return false;
break;
}
}
int const kWarmupIterations = 5;
int const kProfilingIterations = 100;
for (int i = 0; i < kWarmupIterations; ++i) {
status = gemm_op();
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
}
cudaError_t result = cudaEventRecord(events[0]);
EXPECT_EQ(result, cudaSuccess);
for (int i = 0; i < kProfilingIterations; ++i) {
typename Gemm::Arguments arguments{
mode,
problem_size,
batch_count,
{alpha, beta},
profiling_tensor_A.get() + tensor_A.capacity() * (i % kWorkspaces),
profiling_tensor_B.get() + tensor_B.capacity() * (i % kWorkspaces),
profiling_tensor_C.get() + tensor_C.capacity() * (i % kWorkspaces),
profiling_tensor_D.get() + tensor_D.capacity() * (i % kWorkspaces),
profiling_tensor_Reduction.get() + tensor_Reduction.capacity() * (i % kWorkspaces),
profiling_tensor_Tensor.get() + tensor_Tensor.capacity() * (i % kWorkspaces),
problem_size.m() * problem_size.k(),
problem_size.n() * problem_size.k(),
problem_size.m() * problem_size.n(),
problem_size.m() * problem_size.n(),
problem_size.m(),
problem_size.m() * problem_size.n(),
tensor_A.layout().stride(0),
tensor_B.layout().stride(0),
tensor_C.layout().stride(0),
tensor_D.layout().stride(0),
tensor_Reduction.layout().stride(0),
tensor_Tensor.layout().stride(0),
};
gemm_op.initialize(arguments, workspace.get());
status = gemm_op();
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
}
result = cudaEventRecord(events[1]);
EXPECT_EQ(result, cudaSuccess);
result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess);
float elapsed_time = 0;
result = cudaEventElapsedTime(&elapsed_time, events[0], events[1]);
EXPECT_EQ(result, cudaSuccess);
double average_time = double(elapsed_time) / double(kProfilingIterations);
std::cout << problem_size << ": " << average_time << " ms" << std::endl;
for (auto & event : events) {
cudaEventDestroy(event);
}
#endif
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Gemm, typename ReferenceOp>
bool TestGemmWithReduction(
cutlass::gemm::GemmCoord const & problem_size,
cutlass::gemm::GemmUniversalMode mode,
int batch_count = 1,
double alpha = 1.0,
double beta = 2.0) {
bool passed = true;
TestbedGemmWithReduction<Gemm, ReferenceOp> testbed;
using ElementAccumulator = typename Gemm::ElementAccumulator;
passed = testbed.run(
mode,
problem_size,
batch_count,
cutlass::from_real<ElementAccumulator>(alpha),
cutlass::from_real<ElementAccumulator>(beta)
);
return passed;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace gemm
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
|
cutlass/test/unit/gemm/device/testbed_gemm_with_reduction.h/0
|
{
"file_path": "cutlass/test/unit/gemm/device/testbed_gemm_with_reduction.h",
"repo_id": "cutlass",
"token_count": 7541
}
| 59 |
/***************************************************************************************************
* Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Testbed for running device-level GEMMs with absolute maximum calculation and scaling
*/
#pragma once
#include <iostream>
#include <fstream>
#include <sstream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/reference/host/gemm_complex.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/reference/host/gemm.h"
#include "testbed.h"
#include "testbed_sparse.h"
#include "testbed_utils.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/matrix_coord.h"
namespace test {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Gemm,
typename GemmTestbed,
template<typename T> class ActivationFunctor
>
struct TestbedWithAmax {
static_assert(std::is_same_v<GemmTestbed, Testbed<Gemm>> || std::is_same_v<GemmTestbed, SparseTestbed<Gemm>>);
static constexpr bool IsSparseTestbed = std::is_same_v<GemmTestbed, SparseTestbed<Gemm>>;
using ElementAccumulator = typename Gemm::ElementAccumulator;
using ElementCompute = typename Gemm::GemmKernel::Epilogue::OutputOp::ElementCompute;
using ElementScalingFactor = typename Gemm::EpilogueOutputOp::ElementScalingFactor;
using ElementAbsmax = typename Gemm::EpilogueOutputOp::ElementAbsmax;
static bool const kScaleAux = Gemm::EpilogueOutputOp::kIsScalingAndAmaxAuxOutputNeeded;
static bool const kScaleOutput = Gemm::EpilogueOutputOp::kIsScalingAndAmaxOutputNeeded;
bool doScaleA;
bool doScaleB;
bool doScaleC;
GemmTestbed underlying_testbed;
cutlass::HostTensor<typename Gemm::EpilogueOutputOp::ElementAuxOutput, typename Gemm::LayoutC> tensor_Aux;
cutlass::HostTensor<typename Gemm::ElementC, typename Gemm::LayoutC> tensor_Vector;
cutlass::HostTensor<ElementAccumulator, typename Gemm::LayoutC> tmp_D;
cutlass::HostTensor<typename Gemm::EpilogueOutputOp::ElementOutput, typename Gemm::LayoutC> reference_D;
cutlass::HostTensor<typename Gemm::EpilogueOutputOp::ElementAuxOutput, typename Gemm::LayoutC> reference_Aux;
cutlass::HostTensor<ElementScalingFactor, typename Gemm::LayoutC> scale_A;
cutlass::HostTensor<ElementScalingFactor, typename Gemm::LayoutC> scale_B;
cutlass::HostTensor<ElementScalingFactor, typename Gemm::LayoutC> scale_C;
cutlass::HostTensor<ElementScalingFactor, typename Gemm::LayoutC> scale_D;
cutlass::HostTensor<ElementScalingFactor, typename Gemm::LayoutC> scale_Aux;
cutlass::HostTensor<ElementAbsmax, typename Gemm::LayoutC> abs_max_Aux;
cutlass::HostTensor<ElementAbsmax, typename Gemm::LayoutC> abs_max_D;
cutlass::HostTensor<ElementAbsmax, typename Gemm::LayoutC> reference_abs_max_Aux;
cutlass::HostTensor<ElementAbsmax, typename Gemm::LayoutC> reference_abs_max_D;
//
// Methods
//
TestbedWithAmax(
bool scaleA = true,
bool scaleB = true,
bool scaleC = true,
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform
):
doScaleA(scaleA), doScaleB(scaleB), doScaleC(scaleC),
underlying_testbed(init_A_, init_B_, init_C_) { }
/// Helper to initialize scaling factors
template <typename Element, typename Layout>
bool initialize_scale_factor(cutlass::TensorView<Element, Layout> view, uint64_t seed, int bits=0) {
cutlass::reference::host::TensorFillRandomUniform(view, seed, double(1.), double(0.), bits);
return true;
}
/// Initializes data structures
void initialize(cutlass::gemm::GemmCoord problem_size) {
//
// Allocate the GEMM workspace
//
underlying_testbed.initialize(problem_size);
tensor_Vector.resize({1, problem_size.n()});
reference_D.resize(problem_size.mn(), false);
tmp_D.resize(problem_size.mn(), false);
EXPECT_TRUE(
underlying_testbed.initialize_tensor(tensor_Vector.host_view(), underlying_testbed.init_C, underlying_testbed.seed + 2020)
);
// It is possible to randomly initialize to all zeros, so override this with non-zeros
// in the upper left corner of each operand.
cutlass::Coord<2> origin(0);
tensor_Vector.host_view().at(origin) = typename Gemm::ElementC(1);
cutlass::reference::host::TensorCopy(reference_D.host_view(), underlying_testbed.tensor_C.host_view());
tensor_Vector.sync_device();
int scale_bits = 2;
if (doScaleA) {
scale_A.resize({1, 1});
EXPECT_TRUE(initialize_scale_factor(scale_A.host_view(), underlying_testbed.seed + 2021, scale_bits));
scale_A.sync_device();
}
if (doScaleB) {
scale_B.resize({1, 1});
EXPECT_TRUE(initialize_scale_factor(scale_B.host_view(), underlying_testbed.seed + 2022, scale_bits));
scale_B.sync_device();
}
if (doScaleC) {
scale_C.resize({1, 1});
EXPECT_TRUE(initialize_scale_factor(scale_C.host_view(), underlying_testbed.seed + 2023, scale_bits));
scale_C.sync_device();
}
if (kScaleOutput) {
scale_D.resize({1, 1});
EXPECT_TRUE(initialize_scale_factor(scale_D.host_view(), underlying_testbed.seed + 2024, scale_bits));
scale_D.sync_device();
abs_max_D.resize({1, 1});
cutlass::reference::host::TensorFill(abs_max_D.host_view());
abs_max_D.sync_device();
reference_abs_max_D.resize({1, 1});
}
if (kScaleAux) {
tensor_Aux.resize(problem_size.mn());
cutlass::reference::host::TensorFill(tensor_Aux.host_view());
tensor_Aux.sync_device();
scale_Aux.resize({1, 1});
EXPECT_TRUE(initialize_scale_factor(scale_Aux.host_view(), underlying_testbed.seed + 2025, scale_bits));
scale_Aux.sync_device();
abs_max_Aux.resize({1, 1});
cutlass::reference::host::TensorFill(abs_max_Aux.host_view());
abs_max_Aux.sync_device();
reference_Aux.resize(problem_size.mn(), false);
reference_abs_max_Aux.resize({1, 1});
}
}
/// Compares computed reference with device reference and outputs to a file if incorrect
bool compare_reference(
cutlass::gemm::GemmCoord problem_size,
ElementCompute alpha,
ElementCompute beta) {
underlying_testbed.tensor_D.sync_host();
EXPECT_GT(cutlass::reference::host::TensorNorm(underlying_testbed.tensor_A.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(underlying_testbed.tensor_B.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(underlying_testbed.tensor_C.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(underlying_testbed.tensor_D.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(reference_D.host_view()), 0);
bool passed = cutlass::reference::host::TensorEquals(reference_D.host_view(), underlying_testbed.tensor_D.host_view());
if (kScaleAux) {
tensor_Aux.sync_host();
abs_max_Aux.sync_host();
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_Aux.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(abs_max_Aux.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(reference_Aux.host_view()), 0);
passed &= cutlass::reference::host::TensorEquals(reference_Aux.host_view(), tensor_Aux.host_view());
passed &= cutlass::reference::host::TensorEquals(abs_max_Aux.host_view(), reference_abs_max_Aux.host_view());
}
if (kScaleOutput) {
abs_max_D.sync_host();
EXPECT_GT(cutlass::reference::host::TensorNorm(abs_max_D.host_view()), 0);
passed &= cutlass::reference::host::TensorEquals(abs_max_D.host_view(), reference_abs_max_D.host_view());
}
EXPECT_TRUE(passed) << " mismatched reference";
if (!passed) {
std::ofstream file("testbed_with_amax_errors.txt");
file
<< "problem: " << problem_size
<< ", alpha: " << alpha << ", beta: " << beta << "\n\n";
file
<< "A =\n" << underlying_testbed.tensor_A.host_view()
<< "\nB =\n" << underlying_testbed.tensor_B.host_view()
<< "\nC =\n" << underlying_testbed.tensor_C.host_view()
<< "\nVector =\n" << tensor_Vector.host_view()
<< "\nScaleA = " << scale_A.host_view()
<< "\nScaleB = " << scale_B.host_view()
<< "\nScaleC = " << scale_C.host_view()
<< "\nScaleD = " << scale_D.host_view()
<< "\nScaleAux = " << scale_Aux.host_view()
<< "\n\nReference D =\n" << reference_D.host_view()
<< "\nComputed D =\n" << underlying_testbed.tensor_D.host_view();
if (kScaleAux) {
file
<< "\n\nReference Aux =\n" << reference_Aux.host_view()
<< "\nComputed Aux =\n" << tensor_Aux.host_view()
<< "\n\nReference Absmax Aux = " << reference_abs_max_Aux.host_view()
<< "\nComputed Absmax Aux = " << abs_max_Aux.host_view();
}
if (kScaleOutput) {
file
<< "\n\nReference Absmax D = " << reference_abs_max_D.host_view()
<< "\nComputed Absmax D = " << abs_max_D.host_view();
}
}
return passed;
}
/// Verifies the result is a GEMM
bool verify(
cutlass::gemm::GemmCoord problem_size,
ElementCompute alpha,
ElementCompute beta) {
cutlass::Coord<2> origin(0);
ElementCompute scaled_alpha = alpha;
if (doScaleA) {
scaled_alpha *= scale_A.host_view().at(origin);
}
if (doScaleB) {
scaled_alpha *= scale_B.host_view().at(origin);
}
ElementCompute scaled_beta = beta;
if (doScaleC) {
scaled_beta *= scale_C.host_view().at(origin);
}
//
// Verify
//
auto ref_tA = [&](){
if constexpr (IsSparseTestbed) {
cutlass::uncompress(
underlying_testbed.tensor_A_uncompressed.host_ref(),
underlying_testbed.tensor_A.host_ref(),
underlying_testbed.tensor_E.host_ref(),
problem_size.m(),
problem_size.k()
);
return underlying_testbed.tensor_A_uncompressed.host_ref();
}
else {
return underlying_testbed.tensor_A.host_ref();
}
}();
// Run reference kernel with ElementOutput of type ElementAccumulator
// so that we can compute the absmax epilogue on data that is of type
// ElementAccumulator (which is what the GEMM we are testing will do).
cutlass::reference::host::GemmComplex<
typename Gemm::ElementA, typename Gemm::LayoutA,
typename Gemm::ElementB, typename Gemm::LayoutB,
typename Gemm::ElementC, typename Gemm::LayoutC,
ElementCompute, ElementAccumulator, ElementAccumulator
>(
problem_size,
scaled_alpha,
ref_tA,
Gemm::kTransformA,
underlying_testbed.tensor_B.host_ref(),
Gemm::kTransformB,
scaled_beta,
underlying_testbed.tensor_C.host_ref(),
tmp_D.host_ref(),
ElementAccumulator(0)
);
ElementCompute tmp_abs_max_Aux(0.);
ElementCompute tmp_abs_max_D(0.);
cutlass::NumericConverter<ElementCompute, typename Gemm::ElementC> cvt_c_to_compute;
cutlass::NumericConverter<ElementCompute, ElementAccumulator> cvt_accum_to_compute;
cutlass::NumericConverter<ElementAbsmax, ElementCompute> cvt_compute_to_absmax;
cutlass::NumericConverter<typename Gemm::EpilogueOutputOp::ElementOutput, ElementCompute> cvt_compute_to_d;
cutlass::NumericConverter<typename Gemm::EpilogueOutputOp::ElementAuxOutput, ElementCompute> cvt_compute_to_aux;
cutlass::absolute_value_op<ElementCompute> abs;
cutlass::maximum_with_nan_propogation<ElementCompute> max;
ActivationFunctor<ElementCompute> act;
ElementScalingFactor d_scale = kScaleOutput ? scale_D.host_view().at(origin) : ElementScalingFactor(1.);
for (int m = 0; m < problem_size.m(); ++m) {
for (int n = 0; n < problem_size.n(); ++n) {
ElementCompute intermediate = cvt_accum_to_compute(tmp_D.host_view().at({m, n}));
ElementCompute bias = cvt_c_to_compute(tensor_Vector.host_view().at({0, n}));
ElementCompute aux = intermediate + bias;
ElementCompute d = act(aux);
tmp_abs_max_Aux = max(abs(aux), tmp_abs_max_Aux);
tmp_abs_max_D = max(abs(d), tmp_abs_max_D);
reference_D.host_view().at({m, n}) = cvt_compute_to_d(d * d_scale);
if (kScaleAux) {
reference_Aux.host_view().at({m, n}) = cvt_compute_to_aux(aux * scale_Aux.host_view().at(origin));
}
}
}
if (kScaleAux) {
reference_abs_max_Aux.host_view().at(origin) = cvt_compute_to_absmax(tmp_abs_max_Aux);
}
if (kScaleOutput) {
reference_abs_max_D.host_view().at(origin) = cvt_compute_to_absmax(tmp_abs_max_D);
}
return compare_reference(problem_size, alpha, beta);
}
/// Returns true if the CUDA device is sufficient to execute the kernel.
bool sufficient() const {
//
// Determine SMEM requirements and waive if not satisfied
//
return underlying_testbed.sufficient();
}
/// Executes one test
bool run(
cutlass::gemm::GemmUniversalMode mode,
cutlass::gemm::GemmCoord problem_size,
int batch_count = 1,
ElementCompute alpha = ElementCompute(1),
ElementCompute beta = ElementCompute(0))
{
// Waive test if insufficient CUDA device
if (!sufficient()) {
if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) {
std::cerr << "Test waived due to insufficient CUDA device." << std::endl;
}
return true;
}
this->initialize(problem_size);
//
// Initialize the GEMM operator
//
typename Gemm::EpilogueOutputOp::Params::ActivationParams activation_params{alpha, beta};
typename Gemm::EpilogueOutputOp::Params epilogue_params{
activation_params,
scale_A.device_data(),
scale_B.device_data(),
scale_C.device_data(),
scale_D.device_data(),
scale_Aux.device_data(),
abs_max_Aux.device_data(),
abs_max_D.device_data()
};
auto arguments = [&]() {
if constexpr (IsSparseTestbed) {
return typename Gemm::Arguments{
problem_size,
underlying_testbed.tensor_A.device_ref(),
underlying_testbed.tensor_B.device_ref(),
underlying_testbed.tensor_C.device_ref(),
underlying_testbed.tensor_D.device_ref(),
underlying_testbed.tensor_E_reordered.device_ref(),
tensor_Aux.device_ref(),
tensor_Vector.device_data(),
0, // stride vector
epilogue_params
};
}
else {
return typename Gemm::Arguments{
mode,
problem_size,
batch_count,
epilogue_params,
underlying_testbed.tensor_A.device_data(),
underlying_testbed.tensor_B.device_data(),
underlying_testbed.tensor_C.device_data(),
underlying_testbed.tensor_D.device_data(),
tensor_Aux.device_data(),
tensor_Vector.device_data(),
problem_size.m() * problem_size.k(),
problem_size.n() * problem_size.k(),
problem_size.m() * problem_size.n(),
problem_size.m() * problem_size.n(),
0, // stride vector
underlying_testbed.tensor_A.layout().stride(0),
underlying_testbed.tensor_B.layout().stride(0),
underlying_testbed.tensor_C.layout().stride(0),
underlying_testbed.tensor_D.layout().stride(0),
(int64_t)0 // Leading dimension of vector. This must be 0
};
}
}();
Gemm gemm_op;
cutlass::Status status = gemm_op.can_implement(arguments);
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
size_t workspace_size = Gemm::get_workspace_size(arguments);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
status = gemm_op.initialize(arguments, workspace.get());
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
//
// Run the GEMM
//
status = gemm_op();
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
cudaError_t cuda_error = cudaDeviceSynchronize();
EXPECT_TRUE(cuda_error == cudaSuccess) << cudaGetErrorString(cuda_error);
//
// Verify
//
bool passed = this->verify(problem_size, alpha, beta);
if (!passed) {
std::cout << "Failed with batch_count/split_k_slices = " << batch_count << std::endl;
}
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Gemm,
typename GemmTestbed,
template<typename T> class ActivationFunctor = cutlass::epilogue::thread::Identity
>
bool TestAllGemmWithAbsmax(bool scaleA=true, bool scaleB=true, bool scaleC=true) {
int const kMinimumOperandElementSize =
std::min(
int(cutlass::sizeof_bits<typename Gemm::ElementA>::value),
int(cutlass::sizeof_bits<typename Gemm::ElementB>::value));
int constexpr kAlignmentM = [&]() {
if constexpr (std::is_same_v<GemmTestbed, SparseTestbed<Gemm>>) {
// M dimension has to be multiple of 32 (sparse float) or 16 (sparse int)
// because of the reordering of operand E
return std::max(((sizeof(typename Gemm::ElementE) == 2) ? 32 : 16),
kMinimumOperandElementSize);
}
else {
return 128 / kMinimumOperandElementSize;
}
}();
int const kAlignmentN = 128 / kMinimumOperandElementSize;
int M_problems[] = {kAlignmentM, 128 + 32};
int N_problems[] = {kAlignmentN, 512 - 2 * kAlignmentN};
int K_problems[] = {Gemm::ThreadblockShape::kK, Gemm::ThreadblockShape::kK * (Gemm::kStages + 1)};
double alpha_problems[] = {1.};
double beta_problems[] = {0.};
bool passed = true;
for (int M : M_problems) {
for (int N : N_problems) {
for (int K : K_problems) {
for (double alpha : alpha_problems) {
for (double beta : beta_problems) {
TestbedWithAmax<Gemm, GemmTestbed, ActivationFunctor> testbed(scaleA, scaleB, scaleC);
using ElementAccumulator = typename Gemm::ElementAccumulator;
passed = testbed.run(
cutlass::gemm::GemmUniversalMode::kGemm,
{M, N, K},
1,
cutlass::from_real<ElementAccumulator>(alpha),
cutlass::from_real<ElementAccumulator>(beta)
);
EXPECT_TRUE(passed)
<< "M: " << M << ", N: " << N << ", K: " << K << ", alpha: " << alpha << ", beta: " << beta;
if (!passed) {
return passed;
}
}
}
}
}
}
return passed;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace gemm
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
|
cutlass/test/unit/gemm/device/testbed_with_absmax.h/0
|
{
"file_path": "cutlass/test/unit/gemm/device/testbed_with_absmax.h",
"repo_id": "cutlass",
"token_count": 8329
}
| 60 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for thread-level GEMM
*/
#include "../../common/cutlass_unit_test.h"
#include "cutlass/epilogue/epilogue_workspace.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Kernel computes accumulator data and stores it out
template <typename Epilogue>
__global__ void kernel_epilogue_workspace(typename Epilogue::Params params) {
__shared__ typename Epilogue::SharedStorage shared_storage;
int warp_id = threadIdx.y;
int lane_id = threadIdx.x;
Epilogue epilogue(params, shared_storage, warp_id, lane_id);
//
// Initialize accumulator tile
//
typename Epilogue::FragmentC accum;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Epilogue::FragmentC::kElements; ++i) {
accum[i] = Element(warp_id * blockDim.x + lane_id);
}
//
// Efficient epilogue
//
cutlass::GemmCoord tb_tile_coord{blockIdx.x, blockIdx.y, 0};
cutlass::GemmCoord problem_size =
tb_tile_coord *
cutlass::GemmCoord{Epilogue::Shape::kM, Epilogue::Shape::kN, 1};
// Store accumulators
epilogue(
problem_size,
tb_tile_coord,
accum);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_gemm_threadblock_epilogue_workspace, tensor_op_128x128_64x64) {
//
// Define an instance of the epilogue and see if it works
//
static int const kWarpCount = 4;
static int const kWarpSize = 32;
using Shape = cutlass::MatrixShape<128, 128>;
using FragmentC = cutlass::Array<int, Shape::kCount / (kWarpCount * kWarpSize)>;
using Epilogue = cutlass::gemm::threadblock::EpilogueWorkspace<
Shape,
kWarpCount,
FragmentC
>;
typename Epilogue::Params params(
);
// Launch the kernel
dim3 grid(1,1);
dim3 block(kWarpSize, kWarpCount);
test::gemm::threadblock::kernel_epilogue_workspace<Epilogue><<< grid, block >>>(
params
);
cudaError_t result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess) << "Kernel launch error - " << cudaGetErrorString(result);
//
//
//
}
/////////////////////////////////////////////////////////////////////////////////////////////////
|
cutlass/test/unit/gemm/threadblock/epilogue_workspace.cu/0
|
{
"file_path": "cutlass/test/unit/gemm/threadblock/epilogue_workspace.cu",
"repo_id": "cutlass",
"token_count": 1265
}
| 61 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include <complex>
#include "../common/cutlass_unit_test.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/util/reference/device/tensor_reduce.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/host_tensor.h"
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(TensorReduce, norm_rowmajor_f32) {
int const kM = 129;
int const kN = 91;
cutlass::HostTensor<float, cutlass::layout::RowMajor> tensor({kM, kN});
for (int m = 0; m < kM; ++m) {
for (int n = 0; n < kN; ++n) {
float x = float(((m * kN + m + 7) % 8) - 4);
tensor.at({m, n}) = x;
}
}
tensor.sync_device();
double device_norm = cutlass::reference::device::TensorNorm(tensor.device_view(), double());
double host_norm = cutlass::reference::host::TensorNorm(tensor.host_view(), double());
EXPECT_TRUE(std::abs(host_norm - device_norm) < 0.001);
}
TEST(TensorReduce, norm_nhwc_f32) {
int const kN = 19;
int const kH = 18;
int const kW = 17;
int const kC = 16;
cutlass::HostTensor<float, cutlass::layout::TensorNHWC> tensor({kN, kH, kW, kC});
int idx = 0;
double computed_norm = double();
for (int n = 0; n < kN; ++n) {
for (int h = 0; h < kH; ++h) {
for (int w = 0; w < kW; ++w) {
for (int c = 0; c < kC; ++c, ++idx) {
float x = float(((idx + 7) % 8) - 4);
computed_norm += double(x) * double(x);
tensor.at({n, h, w, c}) = x;
}
}
}
}
computed_norm = std::sqrt(computed_norm);
tensor.sync_device();
double device_norm = cutlass::reference::device::TensorNorm(tensor.device_view(), double());
double host_norm = cutlass::reference::host::TensorNorm(tensor.host_view(), double());
EXPECT_TRUE(std::abs(host_norm - device_norm) < 0.001 && std::abs(computed_norm - host_norm) < 0.001)
<< "computed norm: " << computed_norm << "\n"
<< " host norm: " << host_norm << "\n"
<< "device norm: " << device_norm << "\n";
}
TEST(TensorReduce, norm_nhwc_f16) {
int const kN = 69;
int const kH = 68;
int const kW = 67;
int const kC = 66;
cutlass::HostTensor<cutlass::half_t, cutlass::layout::TensorNHWC> tensor({kN, kH, kW, kC});
int idx = 0;
double computed_norm = double();
for (int n = 0; n < kN; ++n) {
for (int h = 0; h < kH; ++h) {
for (int w = 0; w < kW; ++w) {
for (int c = 0; c < kC; ++c, ++idx) {
float x = float(((idx + 7) % 8) - 4);
computed_norm += double(x) * double(x);
tensor.at({n, h, w, c}) = cutlass::half_t(x);
}
}
}
}
computed_norm = std::sqrt(computed_norm);
tensor.sync_device();
double device_norm = cutlass::reference::device::TensorNorm(tensor.device_view(), double());
double host_norm = cutlass::reference::host::TensorNorm(tensor.host_view(), double());
EXPECT_TRUE(std::abs(host_norm - device_norm) < 0.001 && std::abs(computed_norm - host_norm) < 0.001)
<< "computed norm: " << computed_norm << "\n"
<< " host norm: " << host_norm << "\n"
<< "device norm: " << device_norm << "\n";
}
TEST(TensorReduce, norm_diff_nhwc_f32) {
int const kN = 59;
int const kH = 24;
int const kW = 57;
int const kC = 78;
using Layout = cutlass::layout::TensorNHWC;
cutlass::HostTensor<float, Layout> tensor_A({kN, kH, kW, kC});
cutlass::HostTensor<float, Layout> tensor_B({kN, kH, kW, kC});
int idx = 0;
double sum_sq_diff = 0;
for (int n = 0; n < kN; ++n) {
for (int h = 0; h < kH; ++h) {
for (int w = 0; w < kW; ++w) {
for (int c = 0; c < kC; ++c, ++idx) {
float a = float(((idx * 5 + 7) % 8) - 4);
float b = float(((idx * 3 + 7) % 8) - 4);
sum_sq_diff += double(a - b) * double(a - b);
tensor_A.at({n, h, w, c}) = a;
tensor_B.at({n, h, w, c}) = b;
}
}
}
}
tensor_A.sync_device();
tensor_B.sync_device();
double device_norm = cutlass::reference::device::TensorNormDiff(
tensor_A.device_view(), tensor_B.device_view(), double());
double host_norm = std::sqrt(sum_sq_diff);
EXPECT_TRUE(std::abs(host_norm - device_norm) < 0.001f)
<< " host norm: " << host_norm << "\n"
<< "device norm: " << device_norm;
}
TEST(TensorReduce, norm_diff_nhwc_f16) {
int const kN = 59;
int const kH = 24;
int const kW = 57;
int const kC = 78;
using Layout = cutlass::layout::TensorNHWC;
cutlass::HostTensor<cutlass::half_t, Layout> tensor_A({kN, kH, kW, kC});
cutlass::HostTensor<cutlass::half_t, Layout> tensor_B({kN, kH, kW, kC});
int idx = 0;
double sum_sq_diff = 0;
for (int n = 0; n < kN; ++n) {
for (int h = 0; h < kH; ++h) {
for (int w = 0; w < kW; ++w) {
for (int c = 0; c < kC; ++c, ++idx) {
float a = float(((idx * 5 + 7) % 8) - 4);
float b = float(((idx * 3 + 7) % 8) - 4);
sum_sq_diff += double(a - b) * double(a - b);
tensor_A.at({n, h, w, c}) = cutlass::half_t(a);
tensor_B.at({n, h, w, c}) = cutlass::half_t(b);
}
}
}
}
tensor_A.sync_device();
tensor_B.sync_device();
double device_norm = cutlass::reference::device::TensorNormDiff(
tensor_A.device_view(), tensor_B.device_view(), double());
double host_norm = std::sqrt(sum_sq_diff);
EXPECT_TRUE(std::abs(host_norm - device_norm) < 0.001f)
<< " host norm: " << host_norm << "\n"
<< "device norm: " << device_norm;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
|
cutlass/test/unit/util/tensor_reduce.cu/0
|
{
"file_path": "cutlass/test/unit/util/tensor_reduce.cu",
"repo_id": "cutlass",
"token_count": 2939
}
| 62 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Defines operations for all GEMM operation kinds in CUTLASS Library.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/library/library.h"
#include "library_internal.h"
#include "cutlass/gemm/dispatch_policy.hpp"
#include <unordered_map>
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass::library {
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Operator_>
class GemmOperation3xBase : public Operation {
public:
using Operator = Operator_;
using OperatorArguments = typename Operator::Arguments;
using ElementA = typename Operator::ElementA;
using LayoutA = typename Operator::LayoutA;
using ElementB = typename Operator::ElementB;
using LayoutB = typename Operator::LayoutB;
using ElementC = typename Operator::ElementC;
using LayoutC = typename Operator::LayoutC;
using ElementD = typename Operator::ElementD;
using LayoutD = typename Operator::LayoutD;
// assuming all tensors use same type for StrideIndex
using StrideIndex = typename Operator::LayoutA::Index;
using ElementAccumulator = typename Operator::ElementAccumulator;
using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute;
private:
GemmDescription description_;
public:
/// Constructor
GemmOperation3xBase(char const *name = "unknown_gemm", GemmKind gemm_kind_ = GemmKind::kGemm) {
description_.name = name;
description_.provider = Provider::kCUTLASS;
description_.kind = OperationKind::kGemm;
description_.gemm_kind = gemm_kind_;
description_.tile_description.threadblock_shape = make_Coord(
Operator::ThreadblockShape::kM,
Operator::ThreadblockShape::kN,
Operator::ThreadblockShape::kK);
if constexpr (Operator::ArchTag::kMinComputeCapability >= 90) {
description_.tile_description.cluster_shape = make_Coord(
Operator::ClusterShape::kM,
Operator::ClusterShape::kN,
Operator::ClusterShape::kK);
}
description_.tile_description.threadblock_stages = Operator::kStages;
description_.tile_description.warp_count = make_Coord(
Operator::WarpCount::kM,
Operator::WarpCount::kN,
Operator::WarpCount::kK);
description_.tile_description.math_instruction.instruction_shape = make_Coord(
Operator::InstructionShape::kM,
Operator::InstructionShape::kN,
Operator::InstructionShape::kK);
description_.tile_description.math_instruction.element_accumulator =
NumericTypeMap<ElementAccumulator>::kId;
description_.tile_description.math_instruction.opcode_class =
OpcodeClassMap<typename Operator::OperatorClass>::kId;
description_.tile_description.math_instruction.math_operation =
MathOperationMap<typename Operator::MathOperator>::kId;
description_.tile_description.minimum_compute_capability =
ArchMap<typename Operator::ArchTag, typename Operator::OperatorClass>::kMin;
description_.tile_description.maximum_compute_capability =
ArchMap<typename Operator::ArchTag, typename Operator::OperatorClass>::kMax;
description_.A = make_TensorDescription<ElementA, LayoutA>(Operator::kAlignmentA);
description_.B = make_TensorDescription<ElementB, LayoutB>(Operator::kAlignmentB);
description_.C = make_TensorDescription<ElementC, LayoutC>(Operator::kAlignmentC);
description_.D = make_TensorDescription<ElementD, LayoutD>(Operator::kAlignmentD);
description_.element_epilogue = NumericTypeMap<ElementCompute>::kId;
description_.split_k_mode = SplitKMode::kNone;
description_.transform_A = ComplexTransformMap<Operator::kTransformA>::kId;
description_.transform_B = ComplexTransformMap<Operator::kTransformB>::kId;
}
/// Returns the description of the GEMM operation
virtual OperationDescription const & description() const {
return description_;
}
/// Returns the description of the GEMM operation
GemmDescription const& get_gemm_description() const {
return description_;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Operator_>
class GemmUniversal3xOperation : public GemmOperation3xBase<Operator_> {
public:
using Operator = Operator_;
using OperatorArguments = typename Operator::Arguments;
using ElementA = typename Operator::ElementA;
using LayoutA = typename Operator::LayoutA;
using ElementB = typename Operator::ElementB;
using LayoutB = typename Operator::LayoutB;
using ElementC = typename Operator::ElementC;
using LayoutC = typename Operator::LayoutC;
using ElementD = typename Operator::ElementD;
using LayoutD = typename Operator::LayoutD;
using ElementAccumulator = typename Operator::ElementAccumulator;
using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute;
using CollectiveMainloop = typename Operator::CollectiveMainloop;
using CollectiveEpilogue = typename Operator::CollectiveEpilogue;
using ThreadEpilogueOp = typename CollectiveEpilogue::ThreadEpilogueOp;
public:
/// Constructor
GemmUniversal3xOperation(char const *name = "unknown_gemm"):
GemmOperation3xBase<Operator_>(name, GemmKind::kUniversal) {}
protected:
/// Constructs the arguments structure given the configuration and arguments
static Status construct_arguments_(
OperatorArguments &operator_args, GemmUniversalConfiguration const *configuration) {
// NOTE: GemmUniversalConfiguration does not contain problem shapes or batch strides
// Do nothing here and construct kernel arguments in update_arguments_ instead
// We also cannot construct TMA descriptors without all the arguments available
operator_args.mode = configuration->mode;
return Status::kSuccess;
}
template<class FusionArgs, class = void>
struct UpdateFusionArgs {
static Status update_(FusionArgs const& fusion_args, GemmUniversalArguments const &arguments) {
// If a custom EVT is instantiated then it is the users's responsibility
// to ensure alpha and beta are updated appropriately
return Status::kSuccess;
}
};
template<class FusionArgs>
struct UpdateFusionArgs<FusionArgs, cute::void_t<decltype(FusionArgs{}.alpha)>> {
static Status update_(FusionArgs& fusion_args, GemmUniversalArguments const &arguments) {
if (arguments.pointer_mode == ScalarPointerMode::kHost) {
fusion_args.alpha = *static_cast<ElementCompute const *>(arguments.alpha);
fusion_args.beta = *static_cast<ElementCompute const *>(arguments.beta);
fusion_args.alpha_ptr = nullptr;
fusion_args.beta_ptr = nullptr;
return Status::kSuccess;
}
else if (arguments.pointer_mode == ScalarPointerMode::kDevice) {
fusion_args.alpha = 0;
fusion_args.beta = 0;
fusion_args.alpha_ptr = static_cast<ElementCompute const *>(arguments.alpha);
fusion_args.beta_ptr = static_cast<ElementCompute const *>(arguments.beta);
return Status::kSuccess;
}
else {
return Status::kErrorInvalidProblem;
}
}
};
/// Constructs the arguments structure given the configuration and arguments
static Status update_arguments_(
OperatorArguments &operator_args,
GemmUniversalArguments const *arguments) {
Status status = Status::kSuccess;
status = UpdateFusionArgs<decltype(operator_args.epilogue.thread)>::update_(
operator_args.epilogue.thread, *arguments);
if (status != Status::kSuccess) {
return status;
}
// TODO: type erase Arguments structure in 3.0 GEMM
operator_args.problem_shape = cute::make_shape(
arguments->problem_size.m(),
arguments->problem_size.n(),
arguments->problem_size.k(),
arguments->batch_count);
// update arguments
operator_args.mainloop.ptr_A = static_cast<ElementA const *>(arguments->A);
operator_args.mainloop.ptr_B = static_cast<ElementB const *>(arguments->B);
operator_args.epilogue.ptr_C = static_cast<ElementC const *>(arguments->C);
operator_args.epilogue.ptr_D = static_cast<ElementD *>(arguments->D);
operator_args.mainloop.dA = cute::make_int_tuple_from<typename Operator::GemmKernel::StrideA>(
arguments->lda, arguments->batch_stride_A);
operator_args.mainloop.dB = cute::make_int_tuple_from<typename Operator::GemmKernel::StrideB>(
arguments->ldb, arguments->batch_stride_B);
operator_args.epilogue.dC = cute::make_int_tuple_from<typename Operator::GemmKernel::StrideC>(
arguments->ldc, arguments->batch_stride_C);
operator_args.epilogue.dD = operator_args.epilogue.dC;
/* Query device SM count to pass onto the kernel as an argument, where needed */
operator_args.hw_info.sm_count = arguments->sm_count;
if constexpr (!std::is_const_v<decltype(operator_args.scheduler.raster_order)>) {
using Enum_t = decltype(operator_args.scheduler.raster_order);
switch (arguments->raster_order) {
case RasterOrder::kAlongN:
operator_args.scheduler.raster_order = Enum_t::AlongN;
break;
case RasterOrder::kAlongM:
operator_args.scheduler.raster_order = Enum_t::AlongM;
break;
default:
operator_args.scheduler.raster_order = Enum_t::Heuristic;
}
}
return status;
}
public:
/// Returns success if the operation can proceed
Status can_implement(
void const *configuration_ptr, void const *arguments_ptr) const override {
GemmUniversalConfiguration const *configuration =
static_cast<GemmUniversalConfiguration const *>(configuration_ptr);
GemmUniversalArguments const *arguments =
static_cast<GemmUniversalArguments const *>(arguments_ptr);
OperatorArguments args;
auto status = update_arguments_(args, arguments);
if (status != Status::kSuccess) {
return status;
}
// can_implement rules may need access to problem shape
args.problem_shape = cute::make_shape(
configuration->problem_size.m(),
configuration->problem_size.n(),
configuration->problem_size.k(),
configuration->batch_count);
return Operator::can_implement(args);
}
/// Gets the host-side workspace
uint64_t get_host_workspace_size(void const *configuration) const override {
return sizeof(Operator);
}
/// Gets the device-side workspace
uint64_t get_device_workspace_size(
void const *configuration_ptr,void const *arguments_ptr) const override {
OperatorArguments args;
auto status = update_arguments_(
args, static_cast<GemmUniversalArguments const *>(arguments_ptr));
if (status != Status::kSuccess) {
return 0;
}
uint64_t size = Operator::get_workspace_size(args);
return size;
}
/// Initializes the workspace
Status initialize(
void const *configuration_ptr,
void *host_workspace,
void *device_workspace,
cudaStream_t stream = nullptr) const override {
Operator *op = new (host_workspace) Operator;
return Status::kSuccess;
}
/// Runs the kernel
Status run(
void const *arguments_ptr,
void *host_workspace,
void *device_workspace = nullptr,
cudaStream_t stream = nullptr) const override {
OperatorArguments args;
Status status = update_arguments_(args, static_cast<GemmUniversalArguments const *>(arguments_ptr));
if (status != Status::kSuccess) {
return status;
}
Operator *op = static_cast<Operator *>(host_workspace);
// We need to call initialize() since we have to rebuild TMA desc for every new set of args
status = op->run(args, device_workspace, stream);
return status;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::library
///////////////////////////////////////////////////////////////////////////////////////////////////
|
cutlass/tools/library/src/gemm_operation_3x.hpp/0
|
{
"file_path": "cutlass/tools/library/src/gemm_operation_3x.hpp",
"repo_id": "cutlass",
"token_count": 4381
}
| 63 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include <iosfwd>
#include <complex>
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/complex.h"
#include "cutlass/blas3.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/library/library.h"
#include "cutlass/library/util.h"
namespace cutlass {
namespace library {
/////////////////////////////////////////////////////////////////////////////////////////////////
static struct {
char const *text;
char const *pretty;
Provider enumerant;
}
Provider_enumerants[] = {
{"none", "None", Provider::kNone},
{"cutlass", "CUTLASS", Provider::kCUTLASS},
{"host", "reference_host", Provider::kReferenceHost},
{"device", "reference_device", Provider::kReferenceDevice},
{"cublas", "cuBLAS", Provider::kCUBLAS},
{"cudnn", "cuDNN", Provider::kCUDNN},
};
/// Converts a Provider enumerant to a string
char const *to_string(Provider provider, bool pretty) {
for (auto const & possible : Provider_enumerants) {
if (provider == possible.enumerant) {
if (pretty) {
return possible.pretty;
}
else {
return possible.text;
}
}
}
return pretty ? "Invalid" : "invalid";
}
/// Parses a Provider enumerant from a string
template <>
Provider from_string<Provider>(std::string const &str) {
for (auto const & possible : Provider_enumerants) {
if ((str.compare(possible.text) == 0) ||
(str.compare(possible.pretty) == 0)) {
return possible.enumerant;
}
}
return Provider::kInvalid;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
static struct {
char const *text;
char const *pretty;
GemmKind enumerant;
}
GemmKind_enumerants[] = {
{"gemm", "<Gemm>", GemmKind::kGemm},
{"spgemm", "<Sparse>", GemmKind::kSparse},
{"universal", "<Universal>", GemmKind::kUniversal},
{"planar_complex", "<PlanarComplex>", GemmKind::kPlanarComplex},
{"planar_complex_array", "<PlanarComplexArray>", GemmKind::kPlanarComplexArray},
{"grouped", "<Grouped>", GemmKind::kGrouped},
};
/// Converts a GemmKind enumerant to a string
char const *to_string(GemmKind type, bool pretty) {
for (auto const & possible : GemmKind_enumerants) {
if (type == possible.enumerant) {
if (pretty) {
return possible.pretty;
}
else {
return possible.text;
}
}
}
return pretty ? "Invalid" : "invalid";
}
///////////////////////////////////////////////////////////////////////////////////////////////////
static struct {
char const *text;
char const *pretty;
RankKKind enumerant;
}
RankKKind_enumerants[] = {
{"universal", "<Universal>", RankKKind::kUniversal},
};
/// Converts a SyrkKind enumerant to a string
char const *to_string(RankKKind type, bool pretty) {
for (auto const & possible :RankKKind_enumerants) {
if (type == possible.enumerant) {
if (pretty) {
return possible.pretty;
}
else {
return possible.text;
}
}
}
return pretty ? "Invalid" : "invalid";
}
///////////////////////////////////////////////////////////////////////////////////////////////////
static struct {
char const *text;
char const *pretty;
TrmmKind enumerant;
}
TrmmKind_enumerants[] = {
{"universal", "<Universal>", TrmmKind::kUniversal},
};
/// Converts a TrmmKind enumerant to a string
char const *to_string(TrmmKind type, bool pretty) {
for (auto const & possible :TrmmKind_enumerants) {
if (type == possible.enumerant) {
if (pretty) {
return possible.pretty;
}
else {
return possible.text;
}
}
}
return pretty ? "Invalid" : "invalid";
}
///////////////////////////////////////////////////////////////////////////////////////////////////
static struct {
char const *text;
char const *pretty;
SymmKind enumerant;
}
SymmKind_enumerants[] = {
{"universal", "<Universal>", SymmKind::kUniversal},
};
/// Converts a SymmKind enumerant to a string
char const *to_string(SymmKind type, bool pretty) {
for (auto const & possible :SymmKind_enumerants) {
if (type == possible.enumerant) {
if (pretty) {
return possible.pretty;
}
else {
return possible.text;
}
}
}
return pretty ? "Invalid" : "invalid";
}
///////////////////////////////////////////////////////////////////////////////////////////////////
static struct {
char const *text;
char const *pretty;
SideMode enumerant;
}
SideMode_enumerants[] = {
{"left", "Left", SideMode::kLeft},
{"right", "Right", SideMode::kRight}
};
/// Converts a SideMode enumerant to a string
char const *to_string(SideMode type, bool pretty) {
for (auto const & possible :SideMode_enumerants) {
if (type == possible.enumerant) {
if (pretty) {
return possible.pretty;
}
else {
return possible.text;
}
}
}
return pretty ? "Invalid" : "invalid";
}
///////////////////////////////////////////////////////////////////////////////////////////////////
static struct {
char const *text;
char const *pretty;
FillMode enumerant;
}
FillMode_enumerants[] = {
{"lower", "Lower", FillMode::kLower},
{"upper", "Upper", FillMode::kUpper}
};
/// Converts a FillMode enumerant to a string
char const *to_string(FillMode type, bool pretty) {
for (auto const & possible :FillMode_enumerants) {
if (type == possible.enumerant) {
if (pretty) {
return possible.pretty;
}
else {
return possible.text;
}
}
}
return pretty ? "Invalid" : "invalid";
}
///////////////////////////////////////////////////////////////////////////////////////////////////
static struct {
char const *text;
char const *pretty;
BlasMode enumerant;
}
BlasMode_enumerants[] = {
{"symmetric", "Symmetric", BlasMode::kSymmetric},
{"hermitian", "Hermitian", BlasMode::kHermitian}
};
/// Converts a BlasMode enumerant to a string
char const *to_string(BlasMode type, bool pretty) {
for (auto const & possible :BlasMode_enumerants) {
if (type == possible.enumerant) {
if (pretty) {
return possible.pretty;
}
else {
return possible.text;
}
}
}
return pretty ? "Invalid" : "invalid";
}
///////////////////////////////////////////////////////////////////////////////////////////////////
static struct {
char const *text;
char const *pretty;
DiagType enumerant;
}
DiagType_enumerants[] = {
{"nonunit", "NonUnit", DiagType::kNonUnit},
{"unit", "Unit", DiagType::kUnit}
};
/// Converts a DiagType enumerant to a string
char const *to_string(DiagType type, bool pretty) {
for (auto const & possible :DiagType_enumerants) {
if (type == possible.enumerant) {
if (pretty) {
return possible.pretty;
}
else {
return possible.text;
}
}
}
return pretty ? "Invalid" : "invalid";
}
/////////////////////////////////////////////////////////////////////////////////////////////////
static struct {
char const *text;
char const *pretty;
OperationKind enumerant;
}
OperationKind_enumerants[] = {
{"eq_gemm", "EqGemm", OperationKind::kEqGemm},
{"gemm", "Gemm", OperationKind::kGemm},
{"rank_k", "RankK", OperationKind::kRankK},
{"rank_2k", "Rank2K", OperationKind::kRank2K},
{"trmm", "Trmm", OperationKind::kTrmm},
{"symm", "Symm", OperationKind::kSymm},
{"conv2d", "Conv2d", OperationKind::kConv2d},
{"conv3d", "Conv3d", OperationKind::kConv3d},
{"spgemm", "SparseGemm", OperationKind::kSparseGemm},
};
/// Converts a Status enumerant to a string
char const *to_string(OperationKind enumerant, bool pretty) {
for (auto const & possible : OperationKind_enumerants) {
if (enumerant == possible.enumerant) {
if (pretty) {
return possible.pretty;
}
else {
return possible.text;
}
}
}
return pretty ? "Invalid" : "invalid";
}
/// Converts a Status enumerant from a string
template <>
OperationKind from_string<OperationKind>(std::string const &str) {
for (auto const & possible : OperationKind_enumerants) {
if ((str.compare(possible.text) == 0) ||
(str.compare(possible.pretty) == 0)) {
return possible.enumerant;
}
}
return OperationKind::kInvalid;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
static struct {
char const *text;
char const *pretty;
Status enumerant;
}
Status_enumerants[] = {
{"success", "Success", Status::kSuccess},
{"misaligned_operand", "Error: misaligned operand", Status::kErrorMisalignedOperand},
{"invalid_problem", "Error: invalid problem", Status::kErrorInvalidProblem},
{"not_supported", "Error: not supported", Status::kErrorNotSupported},
{"internal", "Error: internal", Status::kErrorInternal}
};
/// Converts a Status enumerant to a string
char const *to_string(Status status, bool pretty) {
for (auto const & possible : Status_enumerants) {
if (status == possible.enumerant) {
if (pretty) {
return possible.pretty;
}
else {
return possible.text;
}
}
}
return pretty ? "Invalid" : "invalid";
}
/// Converts a Status enumerant from a string
template <>
Status from_string<Status>(std::string const &str) {
for (auto const & possible : Status_enumerants) {
if ((str.compare(possible.text) == 0) ||
(str.compare(possible.pretty) == 0)) {
return possible.enumerant;
}
}
return Status::kInvalid;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
static struct {
char const *text;
char const *pretty;
NumericTypeID enumerant;
}
NumericTypeID_enumerants[] = {
{"unknown", "<unknown>", NumericTypeID::kUnknown},
{"void", "Void", NumericTypeID::kVoid},
{"b1", "B1", NumericTypeID::kB1},
{"u2", "U2", NumericTypeID::kU2},
{"u4", "U4", NumericTypeID::kU4},
{"u8", "U8", NumericTypeID::kU8},
{"u16", "U16", NumericTypeID::kU16},
{"u32", "U32", NumericTypeID::kU32},
{"u64", "U64", NumericTypeID::kU64},
{"s2", "S2", NumericTypeID::kS2},
{"s4", "S4", NumericTypeID::kS4},
{"s8", "S8", NumericTypeID::kS8},
{"s16", "S16", NumericTypeID::kS16},
{"s32", "S32", NumericTypeID::kS32},
{"s64", "S64", NumericTypeID::kS64},
{"fe4m3", "FE4M3", NumericTypeID::kFE4M3},
{"fe5m2", "FE5M2", NumericTypeID::kFE5M2},
{"f16", "F16", NumericTypeID::kF16},
{"bf16", "BF16", NumericTypeID::kBF16},
{"f32", "F32", NumericTypeID::kF32},
{"tf32", "TF32", NumericTypeID::kTF32},
{"f64", "F64", NumericTypeID::kF64},
{"cf16", "CF16", NumericTypeID::kCF16},
{"cbf16", "CBF16", NumericTypeID::kCBF16},
{"cf32", "CF32", NumericTypeID::kCF32},
{"ctf32", "CTF32", NumericTypeID::kCTF32},
{"cf64", "CF64", NumericTypeID::kCF64},
{"cu2", "CU2", NumericTypeID::kCU2},
{"cu4", "CU4", NumericTypeID::kCU4},
{"cu8", "CU8", NumericTypeID::kCU8},
{"cu16", "CU16", NumericTypeID::kCU16},
{"cu32", "CU32", NumericTypeID::kCU32},
{"cu64", "CU64", NumericTypeID::kCU64},
{"cs2", "CS2", NumericTypeID::kCS2},
{"cs4", "CS4", NumericTypeID::kCS4},
{"cs8", "CS8", NumericTypeID::kCS8},
{"cs16", "CS16", NumericTypeID::kCS16},
{"cs32", "CS32", NumericTypeID::kCS32},
{"cs64", "CS64", NumericTypeID::kCS64},
{"*", "<unknown/enumerate all>", NumericTypeID::kUnknown}
};
/// Converts a NumericTypeID enumerant to a string
char const *to_string(NumericTypeID type, bool pretty) {
for (auto const & possible : NumericTypeID_enumerants) {
if (type == possible.enumerant) {
if (pretty) {
return possible.pretty;
}
else {
return possible.text;
}
}
}
return pretty ? "Invalid" : "invalid";
}
/// Parses a NumericTypeID enumerant from a string
template <>
NumericTypeID from_string<NumericTypeID>(std::string const &str) {
for (auto const & possible : NumericTypeID_enumerants) {
if ((str.compare(possible.text) == 0) ||
(str.compare(possible.pretty) == 0)) {
return possible.enumerant;
}
}
return NumericTypeID::kInvalid;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns the size of a data type in bits
int sizeof_bits(NumericTypeID type) {
switch (type) {
case NumericTypeID::kFE4M3: return 8;
case NumericTypeID::kFE5M2: return 8;
case NumericTypeID::kF16: return 16;
case NumericTypeID::kBF16: return 16;
case NumericTypeID::kTF32: return 32;
case NumericTypeID::kF32: return 32;
case NumericTypeID::kF64: return 64;
case NumericTypeID::kCF16: return 32;
case NumericTypeID::kCBF16: return 32;
case NumericTypeID::kCF32: return 64;
case NumericTypeID::kCTF32: return 64;
case NumericTypeID::kCF64: return 128;
case NumericTypeID::kS2: return 2;
case NumericTypeID::kS4: return 4;
case NumericTypeID::kS8: return 8;
case NumericTypeID::kS16: return 16;
case NumericTypeID::kS32: return 32;
case NumericTypeID::kS64: return 64;
case NumericTypeID::kU2: return 2;
case NumericTypeID::kU4: return 4;
case NumericTypeID::kU8: return 8;
case NumericTypeID::kU16: return 16;
case NumericTypeID::kU32: return 32;
case NumericTypeID::kU64: return 64;
case NumericTypeID::kB1: return 1;
default: break;
}
return 0;
}
/// Returns true if the numeric type is a complex data type or false if real-valued.
bool is_complex_type(NumericTypeID type) {
switch (type) {
case NumericTypeID::kCF16: return true;
case NumericTypeID::kCF32: return true;
case NumericTypeID::kCF64: return true;
case NumericTypeID::kCBF16: return true;
case NumericTypeID::kCTF32: return true;
default: break;
}
return false;
}
/// Returns the field underlying a complex valued type
NumericTypeID get_real_type(NumericTypeID type) {
switch (type) {
case NumericTypeID::kCF16: return NumericTypeID::kF16;
case NumericTypeID::kCF32: return NumericTypeID::kF32;
case NumericTypeID::kCF64: return NumericTypeID::kF64;
case NumericTypeID::kCBF16: return NumericTypeID::kBF16;
case NumericTypeID::kCTF32: return NumericTypeID::kTF32;
default: break;
}
return type;
}
/// Returns true if numeric type is integer
bool is_integer_type(NumericTypeID type) {
switch (type) {
case NumericTypeID::kS2: return true;
case NumericTypeID::kS4: return true;
case NumericTypeID::kS8: return true;
case NumericTypeID::kS16: return true;
case NumericTypeID::kS32: return true;
case NumericTypeID::kS64: return true;
case NumericTypeID::kU2: return true;
case NumericTypeID::kU4: return true;
case NumericTypeID::kU8: return true;
case NumericTypeID::kU16: return true;
case NumericTypeID::kU32: return true;
case NumericTypeID::kU64: return true;
default: break;
}
return false;
}
/// Returns true if numeric type is signed
bool is_signed_type(NumericTypeID type) {
switch (type) {
case NumericTypeID::kFE4M3: return true;
case NumericTypeID::kFE5M2: return true;
case NumericTypeID::kF16: return true;
case NumericTypeID::kBF16: return true;
case NumericTypeID::kTF32: return true;
case NumericTypeID::kF32: return true;
case NumericTypeID::kF64: return true;
case NumericTypeID::kS2: return true;
case NumericTypeID::kS4: return true;
case NumericTypeID::kS8: return true;
case NumericTypeID::kS16: return true;
case NumericTypeID::kS32: return true;
case NumericTypeID::kS64: return true;
default: break;
}
return false;
}
/// Returns true if numeric type is a signed integer
bool is_signed_integer(NumericTypeID type) {
return is_integer_type(type) && is_signed_type(type);
}
/// returns true if numeric type is an unsigned integer
bool is_unsigned_integer(NumericTypeID type) {
return is_integer_type(type) && !is_signed_type(type);
}
/// Returns true if numeric type is floating-point type
bool is_float_type(NumericTypeID type) {
switch (type) {
case NumericTypeID::kFE4M3: return true;
case NumericTypeID::kFE5M2: return true;
case NumericTypeID::kF16: return true;
case NumericTypeID::kBF16: return true;
case NumericTypeID::kTF32: return true;
case NumericTypeID::kF32: return true;
case NumericTypeID::kF64: return true;
case NumericTypeID::kCF16: return true;
case NumericTypeID::kCBF16: return true;
case NumericTypeID::kCTF32: return true;
case NumericTypeID::kCF32: return true;
case NumericTypeID::kCF64: return true;
default: break;
}
return false;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
static struct {
LayoutTypeID layout;
char const *alias;
}
layout_aliases[] = {
{LayoutTypeID::kUnknown, "unknown"},
{LayoutTypeID::kRowMajor, "row"},
{LayoutTypeID::kRowMajor, "t"},
{LayoutTypeID::kColumnMajor, "column"},
{LayoutTypeID::kColumnMajor, "col"},
{LayoutTypeID::kColumnMajor, "n"},
{LayoutTypeID::kColumnMajorInterleavedK2, "nk2"},
{LayoutTypeID::kRowMajorInterleavedK2, "tk2"},
{LayoutTypeID::kColumnMajorInterleavedK4, "nk4"},
{LayoutTypeID::kRowMajorInterleavedK4, "tk4"},
{LayoutTypeID::kColumnMajorInterleavedK16, "nk16"},
{LayoutTypeID::kRowMajorInterleavedK16, "tk16"},
{LayoutTypeID::kColumnMajorInterleavedK32, "nk32"},
{LayoutTypeID::kRowMajorInterleavedK32, "tk32"},
{LayoutTypeID::kColumnMajorInterleavedK64, "nk64"},
{LayoutTypeID::kRowMajorInterleavedK64, "tk64"},
{LayoutTypeID::kTensorNCHW, "nchw"},
{LayoutTypeID::kTensorNCDHW, "ncdhw"},
{LayoutTypeID::kTensorNHWC, "nhwc"},
{LayoutTypeID::kTensorNDHWC, "ndhwc"},
{LayoutTypeID::kTensorNC32HW32, "nc32hw32"},
{LayoutTypeID::kTensorNC64HW64, "nc64hw64"},
{LayoutTypeID::kTensorC32RSK32, "c32rsk32"},
{LayoutTypeID::kTensorC64RSK64, "c64rsk64"},
{LayoutTypeID::kUnknown, "*"},
{LayoutTypeID::kInvalid, nullptr}
};
/// Converts a LayoutTypeID enumerant to a string
char const *to_string(LayoutTypeID layout, bool pretty) {
for (auto const & alias : layout_aliases) {
if (alias.layout == layout) {
return alias.alias;
}
}
return pretty ? "Invalid" : "invalid";
}
/// Parses a LayoutTypeID enumerant from a string
template <>
LayoutTypeID from_string<LayoutTypeID>(std::string const &str) {
for (auto const & alias : layout_aliases) {
if (str.compare(alias.alias) == 0) {
return alias.layout;
}
}
return LayoutTypeID::kInvalid;
}
/// Gets stride rank for the layout_id (static function)
int get_layout_stride_rank(LayoutTypeID layout_id) {
switch (layout_id) {
case LayoutTypeID::kColumnMajor:
return cutlass::layout::ColumnMajor::kStrideRank;
case LayoutTypeID::kRowMajor:
return cutlass::layout::RowMajor::kStrideRank;
case LayoutTypeID::kColumnMajorInterleavedK2:
return cutlass::layout::ColumnMajorInterleaved<2>::kStrideRank;
case LayoutTypeID::kRowMajorInterleavedK2:
return cutlass::layout::RowMajorInterleaved<2>::kStrideRank;
case LayoutTypeID::kColumnMajorInterleavedK4:
return cutlass::layout::ColumnMajorInterleaved<4>::kStrideRank;
case LayoutTypeID::kRowMajorInterleavedK4:
return cutlass::layout::RowMajorInterleaved<4>::kStrideRank;
case LayoutTypeID::kColumnMajorInterleavedK16:
return cutlass::layout::ColumnMajorInterleaved<16>::kStrideRank;
case LayoutTypeID::kRowMajorInterleavedK16:
return cutlass::layout::RowMajorInterleaved<16>::kStrideRank;
case LayoutTypeID::kColumnMajorInterleavedK32:
return cutlass::layout::ColumnMajorInterleaved<32>::kStrideRank;
case LayoutTypeID::kRowMajorInterleavedK32:
return cutlass::layout::RowMajorInterleaved<32>::kStrideRank;
case LayoutTypeID::kColumnMajorInterleavedK64:
return cutlass::layout::ColumnMajorInterleaved<64>::kStrideRank;
case LayoutTypeID::kRowMajorInterleavedK64:
return cutlass::layout::RowMajorInterleaved<64>::kStrideRank;
case LayoutTypeID::kTensorNCHW:
return cutlass::layout::TensorNCHW::kStrideRank;
case LayoutTypeID::kTensorNHWC:
return cutlass::layout::TensorNHWC::kStrideRank;
case LayoutTypeID::kTensorNDHWC:
return cutlass::layout::TensorNDHWC::kStrideRank;
case LayoutTypeID::kTensorNC32HW32:
return cutlass::layout::TensorNCxHWx<32>::kStrideRank;
case LayoutTypeID::kTensorNC64HW64:
return cutlass::layout::TensorNCxHWx<64>::kStrideRank;
case LayoutTypeID::kTensorC32RSK32:
return cutlass::layout::TensorCxRSKx<32>::kStrideRank;
case LayoutTypeID::kTensorC64RSK64:
return cutlass::layout::TensorCxRSKx<64>::kStrideRank;
default:
throw std::runtime_error("Unsupported LayoutTypeID in LayoutType::get_stride_rank");
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
static struct {
char const *text;
char const *pretty;
OpcodeClassID enumerant;
}
OpcodeClassID_enumerants[] = {
{"simt", "<simt>", OpcodeClassID::kSimt},
{"tensorop", "<tensorop>", OpcodeClassID::kTensorOp},
{"wmmatensorop", "<wmmatensorop>", OpcodeClassID::kWmmaTensorOp},
{"wmma", "<wmma>", OpcodeClassID::kWmmaTensorOp},
};
/// Converts a OpcodeClassID enumerant to a string
char const *to_string(OpcodeClassID type, bool pretty) {
for (auto const & possible : OpcodeClassID_enumerants) {
if (type == possible.enumerant) {
if (pretty) {
return possible.pretty;
}
else {
return possible.text;
}
}
}
return pretty ? "Invalid" : "invalid";
}
/// Converts a OpcodeClassID enumerant from a string
template <>
OpcodeClassID from_string<OpcodeClassID>(std::string const &str) {
for (auto const & possible : OpcodeClassID_enumerants) {
if ((str.compare(possible.text) == 0) ||
(str.compare(possible.pretty) == 0)) {
return possible.enumerant;
}
}
return OpcodeClassID::kInvalid;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
static struct {
char const *text;
char const *pretty;
ComplexTransform enumerant;
}
ComplexTransform_enumerants[] = {
{"n", "none", ComplexTransform::kNone},
{"c", "conj", ComplexTransform::kConjugate}
};
/// Converts a ComplexTransform enumerant to a string
char const *to_string(ComplexTransform type, bool pretty) {
for (auto const & possible : ComplexTransform_enumerants) {
if (type == possible.enumerant) {
if (pretty) {
return possible.pretty;
}
else {
return possible.text;
}
}
}
return pretty ? "Invalid" : "invalid";
}
/// Converts a ComplexTransform enumerant from a string
template <>
ComplexTransform from_string<ComplexTransform>(std::string const &str) {
for (auto const & possible : ComplexTransform_enumerants) {
if ((str.compare(possible.text) == 0) ||
(str.compare(possible.pretty) == 0)) {
return possible.enumerant;
}
}
return ComplexTransform::kInvalid;
}
static struct {
char const *text;
char const *pretty;
SplitKMode enumerant;
}
SplitKMode_enumerants[] = {
{"serial", "<serial>", SplitKMode::kSerial},
{"parallel", "<parallel>", SplitKMode::kParallel},
};
/// Converts a SplitKMode enumerant to a string
char const *to_string(SplitKMode type, bool pretty) {
for (auto const & possible : SplitKMode_enumerants) {
if (type == possible.enumerant) {
if (pretty) {
return possible.pretty;
}
else {
return possible.text;
}
}
}
return pretty ? "Invalid" : "invalid";
}
/// Converts a SplitKMode enumerant from a string
template <>
SplitKMode from_string<SplitKMode>(std::string const &str) {
for (auto const & possible : SplitKMode_enumerants) {
if ((str.compare(possible.text) == 0) ||
(str.compare(possible.pretty) == 0)) {
return possible.enumerant;
}
}
return SplitKMode::kInvalid;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
static struct {
char const *text;
char const *pretty;
ConvModeID enumerant;
}
ConvModeID_enumerants[] = {
{"cross", "<cross>", ConvModeID::kCrossCorrelation},
{"conv", "<conv>", ConvModeID::kConvolution},
};
/// Converts a ConvModeID enumerant to a string
char const *to_string(ConvModeID type, bool pretty) {
for (auto const & possible : ConvModeID_enumerants) {
if (type == possible.enumerant) {
if (pretty) {
return possible.pretty;
}
else {
return possible.text;
}
}
}
return pretty ? "Invalid" : "invalid";
}
/// Converts a ConvModeID enumerant from a string
template <>
ConvModeID from_string<ConvModeID>(std::string const &str) {
for (auto const & possible : ConvModeID_enumerants) {
if ((str.compare(possible.text) == 0) ||
(str.compare(possible.pretty) == 0)) {
return possible.enumerant;
}
}
return ConvModeID::kInvalid;
}
static struct {
char const *text;
char const *pretty;
IteratorAlgorithmID enumerant;
}
IteratorAlgorithmID_enumerants[] = {
{"none", "<none>", IteratorAlgorithmID::kNone},
{"analytic", "<analytic>", IteratorAlgorithmID::kAnalytic},
{"optimized", "<optimized>", IteratorAlgorithmID::kOptimized},
{"fixed_channels", "<fixed_channels>", IteratorAlgorithmID::kFixedChannels},
{"few_channels", "<few_channels>", IteratorAlgorithmID::kFewChannels},
};
/// Converts a ConvModeID enumerant to a string
char const *to_string(IteratorAlgorithmID type, bool pretty) {
for (auto const & possible : IteratorAlgorithmID_enumerants) {
if (type == possible.enumerant) {
if (pretty) {
return possible.pretty;
}
else {
return possible.text;
}
}
}
return pretty ? "Invalid" : "invalid";
}
/// Converts a ConvModeID enumerant from a string
template <>
IteratorAlgorithmID from_string<IteratorAlgorithmID>(std::string const &str) {
for (auto const & possible : IteratorAlgorithmID_enumerants) {
if ((str.compare(possible.text) == 0) ||
(str.compare(possible.pretty) == 0)) {
return possible.enumerant;
}
}
return IteratorAlgorithmID::kInvalid;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
static struct {
char const *text;
char const *pretty;
ConvKind enumerant;
}
ConvKind_enumerants[] = {
{"unknown", "<unknown>", ConvKind::kUnknown},
{"fprop", "<fprop>", ConvKind::kFprop},
{"dgrad", "<dgrad>", ConvKind::kDgrad},
{"wgrad", "<wgrad>", ConvKind::kWgrad},
};
/// Converts a ConvKind enumerant to a string
char const *to_string(ConvKind type, bool pretty) {
for (auto const & possible : ConvKind_enumerants) {
if (type == possible.enumerant) {
if (pretty) {
return possible.pretty;
}
else {
return possible.text;
}
}
}
return pretty ? "Invalid" : "invalid";
}
/// Converts a ConvKind enumerant from a string
template <>
ConvKind from_string<ConvKind>(std::string const &str) {
for (auto const & possible : ConvKind_enumerants) {
if ((str.compare(possible.text) == 0) ||
(str.compare(possible.pretty) == 0)) {
return possible.enumerant;
}
}
return ConvKind::kInvalid;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
static struct {
char const *text;
char const *pretty;
RasterOrder enumerant;
}
RasterOrder_enumerants[] = {
{"along_n", "<along_n>", RasterOrder::kAlongN},
{"along_m", "<along_m>", RasterOrder::kAlongM},
{"heuristic", "<heuristic>", RasterOrder::kHeuristic},
};
/// Converts a RasterOrder enumerant to a string
char const *to_string(RasterOrder type, bool pretty) {
for (auto const & possible : RasterOrder_enumerants) {
if (type == possible.enumerant) {
if (pretty) {
return possible.pretty;
}
else {
return possible.text;
}
}
}
return pretty ? "Invalid" : "invalid";
}
/// Converts a RasterOrder enumerant from a string
template <>
RasterOrder from_string<RasterOrder>(std::string const &str) {
for (auto const & possible : RasterOrder_enumerants) {
if ((str.compare(possible.text) == 0) ||
(str.compare(possible.pretty) == 0)) {
return possible.enumerant;
}
}
return RasterOrder::kInvalid;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Lexical cast a string to a byte array. Returns true if cast is successful or false if invalid.
bool lexical_cast(std::vector<uint8_t> &bytes, NumericTypeID type, std::string const &str) {
int size_bytes = sizeof_bits(type) / 8;
if (!size_bytes) {
return false;
}
bytes.resize(size_bytes, 0);
std::stringstream ss;
ss << str;
switch (type) {
case NumericTypeID::kU8:
{
ss >> *reinterpret_cast<uint8_t *>(bytes.data());
}
break;
case NumericTypeID::kU16:
{
ss >> *reinterpret_cast<uint16_t *>(bytes.data());
}
break;
case NumericTypeID::kU32:
{
ss >> *reinterpret_cast<uint32_t *>(bytes.data());
}
break;
case NumericTypeID::kU64:
{
ss >> *reinterpret_cast<uint64_t *>(bytes.data());
}
break;
case NumericTypeID::kS8:
{
ss >> *reinterpret_cast<int8_t *>(bytes.data());
}
break;
case NumericTypeID::kS16:
{
ss >> *reinterpret_cast<int16_t *>(bytes.data());
}
break;
case NumericTypeID::kS32:
{
ss >> *reinterpret_cast<int32_t *>(bytes.data());
}
break;
case NumericTypeID::kS64:
{
ss >> *reinterpret_cast<int64_t *>(bytes.data());
}
break;
case NumericTypeID::kFE4M3:
{
float tmp;
ss >> tmp;
*reinterpret_cast<float_e4m3_t *>(bytes.data()) = static_cast<float_e4m3_t>(tmp);
}
break;
case NumericTypeID::kFE5M2:
{
float tmp;
ss >> tmp;
*reinterpret_cast<float_e5m2_t *>(bytes.data()) = static_cast<float_e5m2_t>(tmp);
}
break;
case NumericTypeID::kF16:
{
float tmp;
ss >> tmp;
*reinterpret_cast<half_t *>(bytes.data()) = static_cast<half_t>(tmp);
}
break;
case NumericTypeID::kBF16:
{
float tmp;
ss >> tmp;
*reinterpret_cast<bfloat16_t *>(bytes.data()) = static_cast<bfloat16_t>(tmp);
}
break;
case NumericTypeID::kTF32:
{
float tmp;
ss >> tmp;
*reinterpret_cast<tfloat32_t *>(bytes.data()) = static_cast<tfloat32_t>(tmp);
}
break;
case NumericTypeID::kF32:
{
ss >> *reinterpret_cast<float *>(bytes.data());
}
break;
case NumericTypeID::kF64:
{
ss >> *reinterpret_cast<double *>(bytes.data());
}
break;
case NumericTypeID::kCF16:
{
std::complex<float> tmp;
ss >> tmp;
cutlass::complex<cutlass::half_t> *x = reinterpret_cast<cutlass::complex<half_t> *>(bytes.data());
x->real() = static_cast<half_t>(std::real(tmp));
x->imag() = static_cast<half_t>(std::imag(tmp));
}
break;
case NumericTypeID::kCBF16:
{
std::complex<float> tmp;
ss >> tmp;
cutlass::complex<cutlass::bfloat16_t> *x = reinterpret_cast<cutlass::complex<bfloat16_t> *>(bytes.data());
x->real() = static_cast<bfloat16_t>(std::real(tmp));
x->imag() = static_cast<bfloat16_t>(std::imag(tmp));
}
break;
case NumericTypeID::kCF32:
{
ss >> *reinterpret_cast<std::complex<float>*>(bytes.data());
}
break;
case NumericTypeID::kCTF32:
{
std::complex<float> tmp;
ss >> tmp;
cutlass::complex<cutlass::tfloat32_t> *x = reinterpret_cast<cutlass::complex<tfloat32_t> *>(bytes.data());
x->real() = static_cast<tfloat32_t>(std::real(tmp));
x->imag() = static_cast<tfloat32_t>(std::imag(tmp));
}
break;
case NumericTypeID::kCF64:
{
ss >> *reinterpret_cast<std::complex<double>*>(bytes.data());
}
break;
default:
return false;
}
return true;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
std::string lexical_cast(int64_t int_value) {
std::stringstream ss;
ss << int_value;
return ss.str();
}
/// Lexical cast TO a string FROM a byte array. Returns true if cast is successful or false if invalid.
std::string lexical_cast(std::vector<uint8_t> &bytes, NumericTypeID type) {
size_t size_bytes = sizeof_bits(type) / 8;
if (!size_bytes || size_bytes != bytes.size()) {
return "<invalid>";
}
bytes.resize(size_bytes, 0);
std::stringstream ss;
switch (type) {
case NumericTypeID::kU8:
{
ss << *reinterpret_cast<uint8_t *>(bytes.data());
}
break;
case NumericTypeID::kU16:
{
ss << *reinterpret_cast<uint16_t *>(bytes.data());
}
break;
case NumericTypeID::kU32:
{
ss << *reinterpret_cast<uint32_t *>(bytes.data());
}
break;
case NumericTypeID::kU64:
{
ss << *reinterpret_cast<uint64_t *>(bytes.data());
}
break;
case NumericTypeID::kS8:
{
ss << *reinterpret_cast<int8_t *>(bytes.data());
}
break;
case NumericTypeID::kS16:
{
ss << *reinterpret_cast<int16_t *>(bytes.data());
}
break;
case NumericTypeID::kS32:
{
ss << *reinterpret_cast<int32_t *>(bytes.data());
}
break;
case NumericTypeID::kS64:
{
ss << *reinterpret_cast<int64_t *>(bytes.data());
}
break;
case NumericTypeID::kFE4M3:
{
float tmp = *reinterpret_cast<float_e4m3_t *>(bytes.data());
ss << tmp;
}
break;
case NumericTypeID::kFE5M2:
{
float tmp = *reinterpret_cast<float_e5m2_t *>(bytes.data());
ss << tmp;
}
break;
case NumericTypeID::kF16:
{
float tmp = *reinterpret_cast<half_t *>(bytes.data());
ss << tmp;
}
break;
case NumericTypeID::kBF16:
{
float tmp = *reinterpret_cast<bfloat16_t *>(bytes.data());
ss << tmp;
}
break;
case NumericTypeID::kTF32:
{
float tmp = *reinterpret_cast<tfloat32_t *>(bytes.data());
ss << tmp;
}
break;
case NumericTypeID::kF32:
{
ss << *reinterpret_cast<float *>(bytes.data());
}
break;
case NumericTypeID::kF64:
{
ss << *reinterpret_cast<double *>(bytes.data());
}
break;
case NumericTypeID::kCF16:
{
cutlass::complex<half_t> const *x =
reinterpret_cast<cutlass::complex<half_t> const *>(bytes.data());
ss << float(x->real());
if (x->imag() != cutlass::half_t()) {
ss << "+i" << float(x->imag());
}
}
break;
case NumericTypeID::kCBF16:
{
cutlass::complex<bfloat16_t> const *x =
reinterpret_cast<cutlass::complex<bfloat16_t> const *>(bytes.data());
ss << float(x->real());
if (x->imag() != cutlass::bfloat16_t()) {
ss << "+i" << float(x->imag());
}
}
break;
case NumericTypeID::kCF32:
{
cutlass::complex<float> const * x = reinterpret_cast<cutlass::complex<float> const *>(bytes.data());
ss << x->real();
if (x->imag() != float()) {
ss << "+i" << x->imag();
}
}
break;
case NumericTypeID::kCTF32:
{
cutlass::complex<tfloat32_t> const * x = reinterpret_cast<cutlass::complex<tfloat32_t> const *>(bytes.data());
ss << float(x->real());
if (x->imag() != tfloat32_t()) {
ss << "+i" << float(x->imag());
}
}
break;
case NumericTypeID::kCF64:
{
cutlass::complex<double> const * x = reinterpret_cast<cutlass::complex<double> const *>(bytes.data());
ss << x->real();
if (x->imag() != double()) {
ss << "+i" << x->imag();
}
}
break;
default:
return "<unknown>";
}
return ss.str();
}
/// Casts from a signed int64 to the destination type. Returns true if successful.
bool cast_from_int64(std::vector<uint8_t> &bytes, NumericTypeID type, int64_t src) {
int size_bytes = sizeof_bits(type) / 8;
if (!size_bytes) {
return false;
}
bytes.resize(size_bytes, 0);
switch (type) {
case NumericTypeID::kU8:
{
*reinterpret_cast<uint8_t *>(bytes.data()) = static_cast<uint8_t>(src);
}
break;
case NumericTypeID::kU16:
{
*reinterpret_cast<uint16_t *>(bytes.data()) = static_cast<uint16_t>(src);
}
break;
case NumericTypeID::kU32:
{
*reinterpret_cast<uint32_t *>(bytes.data()) = static_cast<uint32_t>(src);
}
break;
case NumericTypeID::kU64:
{
*reinterpret_cast<uint64_t *>(bytes.data()) = static_cast<uint64_t>(src);
}
break;
case NumericTypeID::kS8:
{
*reinterpret_cast<int8_t *>(bytes.data()) = static_cast<int8_t>(src);
}
break;
case NumericTypeID::kS16:
{
*reinterpret_cast<int16_t *>(bytes.data()) = static_cast<int16_t>(src);
}
break;
case NumericTypeID::kS32:
{
*reinterpret_cast<int32_t *>(bytes.data()) = static_cast<int32_t>(src);
}
break;
case NumericTypeID::kS64:
{
*reinterpret_cast<int64_t *>(bytes.data()) = static_cast<int64_t>(src);
}
break;
case NumericTypeID::kFE4M3:
{
*reinterpret_cast<float_e4m3_t *>(bytes.data()) = static_cast<float_e4m3_t>(float(src));
}
break;
case NumericTypeID::kFE5M2:
{
*reinterpret_cast<float_e5m2_t *>(bytes.data()) = static_cast<float_e5m2_t>(float(src));
}
break;
case NumericTypeID::kF16:
{
*reinterpret_cast<half_t *>(bytes.data()) = static_cast<half_t>(float(src));
}
break;
case NumericTypeID::kBF16:
{
*reinterpret_cast<bfloat16_t *>(bytes.data()) = static_cast<bfloat16_t>(float(src));
}
break;
case NumericTypeID::kTF32:
{
*reinterpret_cast<tfloat32_t *>(bytes.data()) = static_cast<tfloat32_t>(float(src));
}
break;
case NumericTypeID::kF32:
{
*reinterpret_cast<float *>(bytes.data()) = static_cast<float>(src);
}
break;
case NumericTypeID::kF64:
{
*reinterpret_cast<double *>(bytes.data()) = double(src);
}
break;
case NumericTypeID::kCF16:
{
cutlass::complex<cutlass::half_t> *x = reinterpret_cast<cutlass::complex<half_t> *>(bytes.data());
x->real() = static_cast<half_t>(float(src));
x->imag() = static_cast<half_t>(float(0));
}
break;
case NumericTypeID::kCF32:
{
*reinterpret_cast<cutlass::complex<float>*>(bytes.data()) = cutlass::complex<float>(float(src), float(0));
}
break;
case NumericTypeID::kCF64:
{
*reinterpret_cast<cutlass::complex<double>*>(bytes.data()) = cutlass::complex<double>(double(src), double(0));
}
break;
default:
return false;
}
return true;
}
/// Casts from an unsigned int64 to the destination type. Returns true if successful.
bool cast_from_uint64(std::vector<uint8_t> &bytes, NumericTypeID type, uint64_t src) {
int size_bytes = sizeof_bits(type) / 8;
if (!size_bytes) {
return false;
}
bytes.resize(size_bytes, 0);
switch (type) {
case NumericTypeID::kU8:
{
*reinterpret_cast<uint8_t *>(bytes.data()) = static_cast<uint8_t>(src);
}
break;
case NumericTypeID::kU16:
{
*reinterpret_cast<uint16_t *>(bytes.data()) = static_cast<uint16_t>(src);
}
break;
case NumericTypeID::kU32:
{
*reinterpret_cast<uint32_t *>(bytes.data()) = static_cast<uint32_t>(src);
}
break;
case NumericTypeID::kU64:
{
*reinterpret_cast<uint64_t *>(bytes.data()) = static_cast<uint64_t>(src);
}
break;
case NumericTypeID::kS8:
{
*reinterpret_cast<int8_t *>(bytes.data()) = static_cast<int8_t>(src);
}
break;
case NumericTypeID::kS16:
{
*reinterpret_cast<int16_t *>(bytes.data()) = static_cast<int16_t>(src);
}
break;
case NumericTypeID::kS32:
{
*reinterpret_cast<int32_t *>(bytes.data()) = static_cast<int32_t>(src);
}
break;
case NumericTypeID::kS64:
{
*reinterpret_cast<int64_t *>(bytes.data()) = static_cast<int64_t>(src);
}
break;
case NumericTypeID::kFE4M3:
{
*reinterpret_cast<float_e4m3_t *>(bytes.data()) = static_cast<float_e4m3_t>(float(src));
}
break;
case NumericTypeID::kFE5M2:
{
*reinterpret_cast<float_e5m2_t *>(bytes.data()) = static_cast<float_e5m2_t>(float(src));
}
break;
case NumericTypeID::kF16:
{
*reinterpret_cast<half_t *>(bytes.data()) = static_cast<half_t>(float(src));
}
break;
case NumericTypeID::kBF16:
{
*reinterpret_cast<bfloat16_t *>(bytes.data()) = static_cast<bfloat16_t>(float(src));
}
break;
case NumericTypeID::kTF32:
{
*reinterpret_cast<tfloat32_t *>(bytes.data()) = static_cast<tfloat32_t>(float(src));
}
break;
case NumericTypeID::kF32:
{
*reinterpret_cast<float *>(bytes.data()) = static_cast<float>(src);
}
break;
case NumericTypeID::kF64:
{
*reinterpret_cast<double *>(bytes.data()) = double(src);
}
break;
case NumericTypeID::kCF16:
{
cutlass::complex<cutlass::half_t> *x = reinterpret_cast<cutlass::complex<half_t> *>(bytes.data());
x->real() = static_cast<half_t>(float(src));
x->imag() = static_cast<half_t>(float(0));
}
break;
case NumericTypeID::kCF32:
{
*reinterpret_cast<std::complex<float>*>(bytes.data()) = std::complex<float>(float(src), float(0));
}
break;
case NumericTypeID::kCF64:
{
*reinterpret_cast<std::complex<double>*>(bytes.data()) = std::complex<double>(double(src), double(0));
}
break;
default:
return false;
}
return true;
}
/// Lexical cast a string to a byte array. Returns true if cast is successful or false if invalid.
bool cast_from_double(std::vector<uint8_t> &bytes, NumericTypeID type, double src) {
int size_bytes = sizeof_bits(type) / 8;
if (!size_bytes) {
return false;
}
bytes.resize(size_bytes, 0);
switch (type) {
case NumericTypeID::kU8:
{
*reinterpret_cast<uint8_t *>(bytes.data()) = static_cast<uint8_t>(src);
}
break;
case NumericTypeID::kU16:
{
*reinterpret_cast<uint16_t *>(bytes.data()) = static_cast<uint16_t>(src);
}
break;
case NumericTypeID::kU32:
{
*reinterpret_cast<uint32_t *>(bytes.data()) = static_cast<uint32_t>(src);
}
break;
case NumericTypeID::kU64:
{
*reinterpret_cast<uint64_t *>(bytes.data()) = static_cast<uint64_t>(src);
}
break;
case NumericTypeID::kS8:
{
*reinterpret_cast<int8_t *>(bytes.data()) = static_cast<int8_t>(src);
}
break;
case NumericTypeID::kS16:
{
*reinterpret_cast<int16_t *>(bytes.data()) = static_cast<int16_t>(src);
}
break;
case NumericTypeID::kS32:
{
*reinterpret_cast<int32_t *>(bytes.data()) = static_cast<int32_t>(src);
}
break;
case NumericTypeID::kS64:
{
*reinterpret_cast<int64_t *>(bytes.data()) = static_cast<int64_t>(src);
}
break;
case NumericTypeID::kFE4M3:
{
*reinterpret_cast<float_e4m3_t *>(bytes.data()) = static_cast<float_e4m3_t>(float(src));
}
break;
case NumericTypeID::kFE5M2:
{
*reinterpret_cast<float_e5m2_t *>(bytes.data()) = static_cast<float_e5m2_t>(float(src));
}
break;
case NumericTypeID::kF16:
{
*reinterpret_cast<half_t *>(bytes.data()) = static_cast<half_t>(float(src));
}
break;
case NumericTypeID::kBF16:
{
*reinterpret_cast<bfloat16_t *>(bytes.data()) = static_cast<bfloat16_t>(float(src));
}
break;
case NumericTypeID::kTF32:
{
*reinterpret_cast<tfloat32_t *>(bytes.data()) = static_cast<tfloat32_t>(float(src));
}
break;
case NumericTypeID::kF32:
{
*reinterpret_cast<float *>(bytes.data()) = static_cast<float>(src);
}
break;
case NumericTypeID::kF64:
{
*reinterpret_cast<double *>(bytes.data()) = src;
}
break;
case NumericTypeID::kCF16:
{
cutlass::complex<cutlass::half_t> *x = reinterpret_cast<cutlass::complex<half_t> *>(bytes.data());
x->real() = static_cast<half_t>(float(src));
x->imag() = static_cast<half_t>(float(0));
}
break;
case NumericTypeID::kCBF16:
{
cutlass::complex<cutlass::bfloat16_t> *x = reinterpret_cast<cutlass::complex<bfloat16_t> *>(bytes.data());
x->real() = static_cast<bfloat16_t>(bfloat16_t(src));
x->imag() = static_cast<bfloat16_t>(bfloat16_t(0));
}
break;
case NumericTypeID::kCF32:
{
*reinterpret_cast<cutlass::complex<float>*>(bytes.data()) = cutlass::complex<float>(float(src), float());
}
break;
case NumericTypeID::kCTF32:
{
*reinterpret_cast<cutlass::complex<tfloat32_t>*>(bytes.data()) = cutlass::complex<tfloat32_t>(tfloat32_t(src), tfloat32_t());
}
break;
case NumericTypeID::kCF64:
{
*reinterpret_cast<cutlass::complex<double>*>(bytes.data()) = cutlass::complex<double>(src, double());
}
break;
default:
return false;
}
return true;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
|
cutlass/tools/library/src/util.cu/0
|
{
"file_path": "cutlass/tools/library/src/util.cu",
"repo_id": "cutlass",
"token_count": 17804
}
| 64 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Defines a math function
*/
#pragma once
#include <vector>
#include "cutlass/cutlass.h"
// CUTLASS Profiler includes
#include "enumerated_types.h"
// CUTLASS Library includes
#include "cutlass/library/library.h"
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Performance result object
struct PerformanceResult {
/// Index of problem
size_t problem_index;
/// library::Provider
library::Provider provider;
/// Operation kind
library::OperationKind op_kind;
/// CUTLASS status result from kernels (success or failure)
// Status does information on verification
Status status;
/// Outcome of verification (worst case verification result)
Disposition disposition;
/// Outcome of verification (all verification results)
DispositionMap verification_map;
/// Operation name
std::string operation_name;
/// Stringified vector of argument values
std::vector<std::pair<std::string, std::string> > arguments;
/// Number of bytes read or written
int64_t bytes;
/// Number of DL flops performed by the math function
int64_t flops;
/// Average runtime in ms
double runtime;
//
// Members
//
/// Ctor
PerformanceResult():
problem_index(0),
op_kind(library::OperationKind::kInvalid),
provider(library::Provider::kInvalid),
disposition(Disposition::kNotRun),
status(Status::kInvalid),
bytes(0),
flops(0),
runtime(0)
{ }
/// Returns true if the runtime is valid
bool good() const {
return runtime > 0;
}
/// Math throughput in units of GFLOP/s
double gflops_per_sec() const {
return double(flops) / runtime / 1.0e6;
}
/// memory bandwidth in units of GiB/s
double gbytes_per_sec() const {
return double(bytes) / double(1 << 30) / runtime * 1000.0;
}
};
using PerformanceResultVector = std::vector<PerformanceResult>;
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
|
cutlass/tools/profiler/include/cutlass/profiler/performance_result.h/0
|
{
"file_path": "cutlass/tools/profiler/include/cutlass/profiler/performance_result.h",
"repo_id": "cutlass",
"token_count": 1063
}
| 65 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Execution environment
*/
#include <iostream>
#include <stdexcept>
#include <iomanip>
#include <ios>
#include <vector>
#include "cutlass/core_io.h"
#include "cutlass/profiler/cublas_helpers.h"
#include "cutlass/profiler/gemm_operation_profiler.h"
#include "cutlass/profiler/gpu_timer.h"
#include "cutlass/library/singleton.h"
#include "cutlass/library/library.h"
#include "cutlass/library/handle.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Ctor
GemmOperationProfiler::GemmOperationProfiler(Options const &options):
OperationProfiler(
options,
library::OperationKind::kGemm,
{
{ArgumentTypeID::kEnumerated, {"gemm_kind"}, "Variant of GEMM (universal, gemm, planar_complex, planar_complex_array)"},
{ArgumentTypeID::kInteger, {"m", "problem-size::m"}, "M dimension of the GEMM problem space"},
{ArgumentTypeID::kInteger, {"n", "problem-size::n"}, "N dimension of the GEMM problem space"},
{ArgumentTypeID::kInteger, {"k", "problem-size::k"}, "K dimension of the GEMM problem space"},
{ArgumentTypeID::kTensor, {"A"}, "Tensor storing the A operand"},
{ArgumentTypeID::kTensor, {"B"}, "Tensor storing the B operand"},
{ArgumentTypeID::kTensor, {"C"}, "Tensor storing the C operand"},
{ArgumentTypeID::kTensor, {"D"}, "Tensor storing the D output"},
{ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"},
{ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"},
{ArgumentTypeID::kEnumerated, {"split_k_mode", "split-k-mode"}, "Variant of split K mode(serial, parallel)"},
{ArgumentTypeID::kInteger, {"split_k_slices", "split-k-slices"}, "Number of partitions of K dimension"},
{ArgumentTypeID::kInteger, {"batch_count", "batch-count"}, "Number of GEMMs computed in one batch"},
{ArgumentTypeID::kEnumerated, {"raster_order", "raster-order"}, "Raster order (heuristic, along_n, along_m)"},
},
{ library::Provider::kCUBLAS}
) {
description_ = " General matrix-matrix product. D = alpha * A*B + beta * C";
}
/// Destructor
GemmOperationProfiler::~GemmOperationProfiler() {
}
/// Prints usage statement for the math function
void GemmOperationProfiler::print_usage(std::ostream &out) const {
out << "GEMM" << "\n\n";
OperationProfiler::print_usage(out);
}
/// Prints examples
void GemmOperationProfiler::print_examples(std::ostream &out) const {
out << "\nExamples:\n\n"
<< "Profile a particular problem size:\n"
<< " $ cutlass_profiler --operation=Gemm --m=1024 --n=1024 --k=128\n\n"
<< "Schmoo over problem size and beta:\n"
<< " $ cutlass_profiler --operation=Gemm --m=1024:4096:256 --n=1024:4096:256 --k=128:8192:128 --beta=0,1,2.5\n\n"
<< "Schmoo over accumulator types:\n"
<< " $ cutlass_profiler --operation=Gemm --accumulator-type=f16,f32\n\n"
<< "Run when A is f16 with column-major and B is any datatype with row-major (For column major, use column, col, or n. For row major use, row or t):\n"
<< " $ cutlass_profiler --operation=Gemm --A=f16:column --B=*:row\n\n"
<< "Profile a particular problem size with split K and parallel reduction:\n"
<< " $ cutlass_profiler --operation=Gemm --split_k_mode=parallel --split_k_slices=2 --m=1024 --n=1024 --k=128\n\n"
<< "Using various input value distribution:\n"
<< " $ cutlass_profiler --operation=Gemm --dist=uniform,min:0,max:3\n"
<< " $ cutlass_profiler --operation=Gemm --dist=gaussian,mean:0,stddev:3\n"
<< " $ cutlass_profiler --operation=Gemm --dist=sequential,start:0,delta:1\n\n"
<< "Run a kernel with cta tile size of 256x128x32 and save workspace if results are incorrect (note that --cta-tile::k=32 is default cta-tile size):\n"
<< " $ cutlass_profiler --operation=Gemm --cta_m=256 --cta_n=128 --cta_k=32 --save-workspace=incorrect\n\n"
<< "Test your changes to gemm kernels with a quick functional test and save results in functional-test.csv:\n"
<< " $ cutlass_profiler --operation=Gemm \\ \n"
<< " --m=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n"
<< " --n=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n"
<< " --k=8,16,32,64,128,256,288,384,504,512,520 \\ \n"
<< " --beta=0,1,2 --profiling-iterations=1 \\ \n"
<< " --providers=cutlass --output=functional-test.csv\n\n";
}
/////////////////////////////////////////////////////////////////////////////////////////////////
#if 0
// used this for debugging
static std::string byte_string(std::vector<uint8_t> const &bytes) {
std::stringstream ss;
ss << "0x";
for (size_t idx = bytes.size(); idx > 0; --idx) {
ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1));
}
return ss.str();
}
#endif
Status GemmOperationProfiler::GemmProblem::parse(
library::GemmDescription const &operation_desc,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
this->mode = library::GemmUniversalMode::kGemm;
if (!arg_as_int(this->m, "m", problem_space, problem)) {
// default value
this->m = 1024;
}
if (!arg_as_int(this->n, "n", problem_space, problem)) {
// default value
this->n = 1024;
}
if (!arg_as_int(this->k, "k", problem_space, problem)) {
// default value
this->k = 1024;
}
if (!arg_as_SplitKModeID(this->split_k_mode, "split_k_mode", problem_space, problem)) {
// default value
this->split_k_mode = library::SplitKMode::kSerial;
}
this->mode = library::GemmUniversalMode::kGemm;
if (this->split_k_mode == library::SplitKMode::kParallel) {
this->mode = library::GemmUniversalMode::kGemmSplitKParallel;
}
if (!arg_as_int(this->split_k_slices, "split_k_slices", problem_space, problem)) {
// default value
this->split_k_slices = 1;
}
if (!arg_as_int(this->batch_count, "batch_count", problem_space, problem)) {
// default value
this->batch_count = 1;
} else if (this->batch_count > 1) {
this->mode = library::GemmUniversalMode::kBatched;
}
if (!arg_as_RasterOrder(this->raster_order, "raster_order", problem_space, problem)) {
// default value
this->raster_order = library::RasterOrder::kHeuristic;
}
if (this->split_k_slices > 1 && this->batch_count > 1) {
// At least one of these must be one
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.A, "A", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.B, "B", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.C, "C", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.D, "D", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!arg_as_scalar(
this->alpha,
operation_desc.element_epilogue,
"alpha",
problem_space,
problem)) {
if (!cast_from_double(this->alpha, operation_desc.element_epilogue, 1)) {
return Status::kErrorInternal;
}
}
if (!arg_as_scalar(
this->beta,
operation_desc.element_epilogue,
"beta",
problem_space,
problem)) {
if (!cast_from_double(this->beta, operation_desc.element_epilogue, 0)) {
return Status::kErrorInternal;
}
}
this->lda = DeviceAllocation::get_packed_layout(
operation_desc.A.layout, {int(this->m), int(this->k)}).front();
this->ldb = DeviceAllocation::get_packed_layout(
operation_desc.B.layout, {int(this->k), int(this->n)}).front();
this->ldc = DeviceAllocation::get_packed_layout(
operation_desc.C.layout, {int(this->m), int(this->n)}).front();
return Status::kSuccess;
}
/// Total number of bytes loaded
int64_t GemmOperationProfiler::GemmProblem::bytes(library::GemmDescription const &operation_desc) const {
// Input bytes read and Output bytes written for the gemm problem
int64_t bytes =
int64_t(library::sizeof_bits(operation_desc.A.element) * m / 8) * k +
int64_t(library::sizeof_bits(operation_desc.B.element) * n / 8) * k +
int64_t(library::sizeof_bits(operation_desc.C.element) * m / 8) * n;
// Set is_beta_zero true if beta is zero
bool is_beta_zero = std::all_of(beta.begin(), beta.end(), [](uint8_t i) { return i==0; });
// Output bytes read for the gemm problem for non-zero beta values
if (!is_beta_zero) {
bytes += int64_t(library::sizeof_bits(operation_desc.C.element) * m / 8) * n;
}
bytes *= batch_count;
return bytes;
}
/// Total number of flops computed
int64_t GemmOperationProfiler::GemmProblem::flops(library::GemmDescription const &operation_desc) const {
int64_t flops_ = (int64_t(m) * n * k + m * n) * 2 * batch_count;
// complex-valued support
switch (operation_desc.tile_description.math_instruction.math_operation) {
case library::MathOperationID::kMultiplyAddComplex:
flops_ *= 4;
break;
case library::MathOperationID::kMultiplyAddComplexFastF32:
flops_ *= 4;
break;
case library::MathOperationID::kMultiplyAddGaussianComplex:
flops_ *= 3;
break;
default: break;
}
return flops_;
}
/// Initializes a performance result
void GemmOperationProfiler::GemmProblem::initialize_result(
PerformanceResult &result,
library::GemmDescription const &operation_desc,
ProblemSpace const &problem_space) {
result.arguments.resize(problem_space.rank());
set_argument(result, "gemm_kind", problem_space, library::to_string(operation_desc.gemm_kind));
set_argument(result, "A", problem_space,
std::string(library::to_string(operation_desc.A.element)) + ":" + library::to_string(operation_desc.A.layout));
set_argument(result, "B", problem_space,
std::string(library::to_string(operation_desc.B.element)) + ":" + library::to_string(operation_desc.B.layout));
set_argument(result, "C", problem_space,
std::string(library::to_string(operation_desc.C.element)) + ":" + library::to_string(operation_desc.C.layout));
set_argument(result, "D", problem_space,
std::string(library::to_string(operation_desc.D.element)) + ":" + library::to_string(operation_desc.D.layout));
set_argument(result, "m", problem_space, m);
set_argument(result, "n", problem_space, n);
set_argument(result, "k", problem_space, k);
set_argument(result, "split_k_mode", problem_space, library::to_string(split_k_mode));
set_argument(result, "split_k_slices", problem_space, split_k_slices);
set_argument(result, "batch_count", problem_space, batch_count);
set_argument(result, "raster_order", problem_space, library::to_string(raster_order));
set_argument(result, "alpha", problem_space,
library::lexical_cast(alpha, operation_desc.element_epilogue));
set_argument(result, "beta", problem_space,
library::lexical_cast(beta, operation_desc.element_epilogue));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Extracts the problem dimensions
Status GemmOperationProfiler::initialize_configuration(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
library::GemmDescription const &operation_desc =
static_cast<library::GemmDescription const &>(operation->description());
if (operation_desc.gemm_kind != library::GemmKind::kUniversal) {
return Status::kErrorInvalidProblem;
}
Status status = problem_.parse(operation_desc, problem_space, problem);
if (status != Status::kSuccess) {
return status;
}
gemm_workspace_.configuration.mode = problem_.mode;
gemm_workspace_.configuration.problem_size.m() = int(problem_.m);
gemm_workspace_.configuration.problem_size.n() = int(problem_.n);
gemm_workspace_.configuration.problem_size.k() = int(problem_.k);
gemm_workspace_.configuration.lda = problem_.lda;
gemm_workspace_.configuration.ldb = problem_.ldb;
gemm_workspace_.configuration.ldc = problem_.ldc;
gemm_workspace_.configuration.ldd = problem_.ldc;
if (problem_.mode == library::GemmUniversalMode::kBatched) {
gemm_workspace_.configuration.batch_count = problem_.batch_count;
}
else {
gemm_workspace_.configuration.batch_count = problem_.split_k_slices;
}
gemm_workspace_.arguments.A = nullptr;
gemm_workspace_.arguments.B = nullptr;
gemm_workspace_.arguments.C = nullptr;
gemm_workspace_.arguments.D = nullptr;
gemm_workspace_.arguments.alpha = problem_.alpha.data();
gemm_workspace_.arguments.beta = problem_.beta.data();
gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
gemm_workspace_.arguments.raster_order = problem_.raster_order;
// initialize reduction operation for parallel splitKMode
if (problem_.split_k_mode == library::SplitKMode::kParallel) {
if (!initialize_reduction_configuration_(operation, problem)) {
return Status::kErrorInternal;
}
}
initialize_result_(this->model_result_, options, operation_desc, problem_space);
return operation->can_implement(&gemm_workspace_.configuration, &gemm_workspace_.arguments);
}
/// Initializes the performance result
void GemmOperationProfiler::initialize_result_(
PerformanceResult &result,
Options const &options,
library::GemmDescription const &operation_desc,
ProblemSpace const &problem_space) {
result.provider = library::Provider::kCUTLASS;
result.disposition = Disposition::kNotRun;
result.status = Status::kSuccess;
result.operation_name = operation_desc.name;
problem_.initialize_result(result, operation_desc, problem_space);
OperationProfiler::initialize_result_(result, operation_desc, problem_space);
result.bytes = problem_.bytes(operation_desc);
result.flops = problem_.flops(operation_desc);
result.runtime = 0;
}
/// Initialize reduction problem dimensions and library::Operation
bool GemmOperationProfiler::initialize_reduction_configuration_(
library::Operation const *operation,
ProblemSpace::Problem const &problem) {
library::GemmDescription const &gemm_desc =
static_cast<library::GemmDescription const&>(operation->description());
if (!cast_from_double(problem_.alpha_one, gemm_desc.element_epilogue, 1)) {
return false;
}
if (!cast_from_double(problem_.beta_zero, gemm_desc.element_epilogue, 0)) {
return false;
}
/// initialize library::ReductionConfiguration
gemm_workspace_.reduction_configuration.problem_size = gemm::GemmCoord(int(problem_.n), int(problem_.m), int(problem_.k)).mn();
gemm_workspace_.reduction_configuration.partitions = int(problem_.split_k_slices);
gemm_workspace_.reduction_configuration.partition_stride = gemm::GemmCoord(int(problem_.n), int(problem_.m), int(problem_.k)).mn().product();
gemm_workspace_.reduction_configuration.ldw = problem_.ldc;
gemm_workspace_.reduction_configuration.lds = problem_.ldc;
gemm_workspace_.reduction_configuration.ldd = problem_.ldc;
// find reduction operation
library::ReductionFunctionalKey reduction_key(
library::Provider::kCUTLASS,
gemm_desc.tile_description.math_instruction.element_accumulator, // element workspace
gemm_desc.tile_description.math_instruction.element_accumulator, // element accumulator
gemm_desc.D.element, // element output
gemm_desc.element_epilogue // element compute
);
auto reduction_it = library::Singleton::get().operation_table.reduction_operations.find(reduction_key);
if (reduction_it == library::Singleton::get().operation_table.reduction_operations.end()) {
return false;
}
// initialize reduction operation required for parallel split-k operator
reduction_op_ = reduction_it->second;
// reduction operation found and initialized
return true;
}
/// Initializes workspace
Status GemmOperationProfiler::initialize_workspace(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
library::Operation const* underlying_operation = operation;
if (problem_.split_k_mode == library::SplitKMode::kParallel) {
if (!(underlying_operation = library::find_gemm_operation_for_parallel_reduction(operation))) {
return Status::kErrorNotSupported;
}
}
library::GemmDescription const &operation_desc =
static_cast<library::GemmDescription const &>(operation->description());
// Compute the number of copies of the problem to avoid L2 camping.
if (!options.profiling.workspace_count) {
int64_t bytes = problem_.bytes(operation_desc);
if (bytes < 3 * int64_t(options.device.properties.l2CacheSize)) {
gemm_workspace_.problem_count =
1 + int((3 * int64_t(options.device.properties.l2CacheSize)) / bytes);
}
else {
gemm_workspace_.problem_count = 1;
}
}
else {
gemm_workspace_.problem_count = options.profiling.workspace_count;
}
bool allocate_device_tensors = options.execution_mode != ExecutionMode::kDryRun;
if (allocate_device_tensors) {
int seed_shift = 0;
gemm_workspace_.A = device_context.allocate_tensor(
options,
"A",
operation_desc.A.element,
operation_desc.A.layout,
{int(problem_.m), int(problem_.k)},
{int(problem_.lda)},
problem_.batch_count * gemm_workspace_.problem_count,
seed_shift++
);
gemm_workspace_.B = device_context.allocate_tensor(
options,
"B",
operation_desc.B.element,
operation_desc.B.layout,
{int(problem_.k), int(problem_.n)},
{int(problem_.ldb)},
problem_.batch_count * gemm_workspace_.problem_count,
seed_shift++
);
gemm_workspace_.C = device_context.allocate_tensor(
options,
"C",
operation_desc.C.element,
operation_desc.C.layout,
{int(problem_.m), int(problem_.n)},
{int(problem_.ldc)},
problem_.batch_count * gemm_workspace_.problem_count,
seed_shift++
);
gemm_workspace_.Computed = device_context.allocate_tensor(
"D",
operation_desc.D.element,
operation_desc.D.layout,
{int(problem_.m), int(problem_.n)},
{int(problem_.ldc)},
problem_.batch_count * gemm_workspace_.problem_count
);
gemm_workspace_.Reference = device_context.allocate_tensor(
"Reference",
operation_desc.D.element,
operation_desc.D.layout,
{int(problem_.m), int(problem_.n)},
{int(problem_.ldc)},
problem_.batch_count * gemm_workspace_.problem_count
);
}
if (options.execution_mode != ExecutionMode::kDryRun) {
// NOTE: the leading non-batch strides are duplicated here for 3.0 API kernels
gemm_workspace_.arguments.problem_size = {int(problem_.m), int(problem_.n), int(problem_.k)};
gemm_workspace_.arguments.batch_count = problem_.batch_count;
gemm_workspace_.arguments.lda = problem_.lda;
gemm_workspace_.arguments.ldb = problem_.ldb;
gemm_workspace_.arguments.ldc = problem_.ldc;
gemm_workspace_.arguments.ldd = problem_.ldc;
gemm_workspace_.arguments.batch_stride_A = gemm_workspace_.A->batch_stride();
gemm_workspace_.arguments.batch_stride_B = gemm_workspace_.B->batch_stride();
gemm_workspace_.arguments.batch_stride_C = gemm_workspace_.C->batch_stride();
gemm_workspace_.arguments.batch_stride_D = gemm_workspace_.Computed->batch_stride();
/* Query device SM count to pass onto the kernel as an argument, where needed */
gemm_workspace_.arguments.sm_count = options.device.properties.multiProcessorCount;
}
//
// Initialize the CUTLASS operation
//
Status status = Status::kSuccess;
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
if (options.execution_mode != ExecutionMode::kDryRun) {
uint64_t workspace_size = underlying_operation->get_host_workspace_size(&gemm_workspace_.configuration);
gemm_workspace_.host_workspace.resize(workspace_size, 0);
workspace_size = underlying_operation->get_device_workspace_size(&gemm_workspace_.configuration,
&gemm_workspace_.arguments);
gemm_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size);
status = underlying_operation->initialize(
&gemm_workspace_.configuration,
gemm_workspace_.host_workspace.data(),
gemm_workspace_.device_workspace.data());
if (status != Status::kSuccess) {
return status;
}
if (problem_.split_k_mode == library::SplitKMode::kParallel) {
workspace_size = reduction_op_->get_host_workspace_size(&gemm_workspace_.reduction_configuration);
gemm_workspace_.reduction_host_workspace.resize(workspace_size, 0);
status = reduction_op_->initialize(
&gemm_workspace_.reduction_configuration,
gemm_workspace_.reduction_host_workspace.data(),
nullptr);
if (status != Status::kSuccess) {
return status;
}
}
}
//
// If CUTLASS is enabled, generate a result for it
//
results_.push_back(model_result_);
results_.back().provider = library::Provider::kCUTLASS;
results_.back().op_kind = library::OperationKind::kGemm;
results_.back().disposition = Disposition::kNotRun;
for (auto provider : verification_providers_) {
results_.back().verification_map[provider] = Disposition::kNotRun;
}
}
return status;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Verifies CUTLASS against references
bool GemmOperationProfiler::verify_cutlass(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
return true;
}
if (options.execution_mode == ExecutionMode::kDryRun) {
return true;
}
// Initialize structure containing GEMM arguments
gemm_workspace_.arguments.A = gemm_workspace_.A->data();
gemm_workspace_.arguments.B = gemm_workspace_.B->data();
gemm_workspace_.arguments.C = gemm_workspace_.C->data();
gemm_workspace_.arguments.D = gemm_workspace_.Computed->data();
gemm_workspace_.arguments.alpha = problem_.alpha.data();
gemm_workspace_.arguments.beta = problem_.beta.data();
gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
gemm_workspace_.arguments.batch_stride_A = gemm_workspace_.A->batch_stride();
gemm_workspace_.arguments.batch_stride_B = gemm_workspace_.B->batch_stride();
gemm_workspace_.arguments.batch_stride_C = gemm_workspace_.C->batch_stride();
gemm_workspace_.arguments.batch_stride_D = gemm_workspace_.Computed->batch_stride();
if (problem_.split_k_mode == library::SplitKMode::kParallel) {
gemm_workspace_.arguments.D = gemm_workspace_.device_workspace.data();
gemm_workspace_.arguments.alpha = problem_.alpha_one.data();
gemm_workspace_.arguments.beta = problem_.beta_zero.data();
gemm_workspace_.reduction_arguments.workspace = gemm_workspace_.device_workspace.data();
gemm_workspace_.reduction_arguments.source = gemm_workspace_.C->data();
gemm_workspace_.reduction_arguments.destination = gemm_workspace_.Computed->data();
gemm_workspace_.reduction_arguments.alpha = problem_.alpha.data();
gemm_workspace_.reduction_arguments.beta = problem_.beta.data();
gemm_workspace_.reduction_arguments.pointer_mode = library::ScalarPointerMode::kHost;
}
//
// Run the CUTLASS operation
//
// initialize gemm underlying operation to handle parallel reduction
library::Operation const * underlying_operation = operation;
if (problem_.split_k_mode == library::SplitKMode::kParallel) {
if (!(underlying_operation = library::find_gemm_operation_for_parallel_reduction(operation))) {
results_.back().disposition = Disposition::kFailed;
return false;
}
}
results_.back().status = underlying_operation->run(
&gemm_workspace_.arguments,
gemm_workspace_.host_workspace.data(),
gemm_workspace_.device_workspace.data());
if (results_.back().status != Status::kSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
// Run parallel reduction kernel for parallel split_k_mode
if (problem_.split_k_mode == library::SplitKMode::kParallel) {
results_.back().status = reduction_op_->run(
&gemm_workspace_.reduction_arguments,
gemm_workspace_.reduction_host_workspace.data(),
nullptr);
if (results_.back().status != Status::kSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
}
cudaError_t result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
// CUTLASS op ran the but not yet verified against any verification provider
results_.back().disposition = Disposition::kNotVerified;
//
// Run verification providers
//
if (options.verification.enabled) {
#if CUTLASS_ENABLE_CUBLAS
if (options.verification.provider_enabled(library::Provider::kCUBLAS)) {
// Guard against unsupported cases
auto const & gemm_desc = static_cast<library::GemmDescription const &>(operation->description());
if (cublas_satisfies(gemm_desc) == Status::kSuccess) {
// call cublas verification if supported
verify_with_cublas_(
options,
report,
device_context,
operation,
problem_space,
problem);
}
else {
// set verification map for cublas to not supported
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotSupported;
}
}
#endif // #if CUTLASS_ENABLE_CUBLAS
library::GemmDescription const &gemm_desc =
static_cast<library::GemmDescription const &>(operation->description());
cutlass::library::NumericTypeID element_A = gemm_desc.A.element;
cutlass::library::NumericTypeID element_B = gemm_desc.B.element;
bool verification_status = verify_with_reference_(options, report, device_context, operation, problem_space, problem, element_A, element_B);
// Update disposition to worst case verification outcome among all
// verification providers which are supported
bool is_any_verification_run_passed = false;
for (auto &m : results_.back().verification_map) {
if (m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) {
results_.back().disposition = m.second;
return true;
}
if (!is_any_verification_run_passed && m.second == Disposition::kPassed) {
is_any_verification_run_passed = true;
}
}
if (is_any_verification_run_passed) {
results_.back().disposition = Disposition::kPassed;
}
}
// if verification.required is set, then return success iff at least one ref-check was run
if (options.verification.required) {
bool did_any_verification_run = false;
for (auto provider : options.verification.providers) {
did_any_verification_run |= (Disposition::kNotRun != results_.back().verification_map[provider]);
}
if (not did_any_verification_run) {
results_.back().status = Status::kErrorNotSupported;
return false;
}
}
// Return true means continue profiling
return true;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Verifies CUTLASS against references
bool GemmOperationProfiler::verify_with_cublas_(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
#if CUTLASS_ENABLE_CUBLAS
library::GemmDescription const &gemm_desc =
static_cast<library::GemmDescription const &>(operation->description());
//
// Construct cuBLAS operators
//
CublasCreate handle;
cublasStatus_t status = handle.get_cublas_create_status();
if (status != CUBLAS_STATUS_SUCCESS) {
results_.back().verification_map[library::Provider::kCUBLAS] = get_cutlass_disposition(status);
return true;
}
std::vector<cublasGemmAlgo_t> algorithms;
detail::select_cublas_algorithms(
algorithms,
options,
gemm_desc);
if (algorithms.empty()) {
// no algorithm selected
return true;
}
//
// Initialize state
//
try {
//
// Construct dispatcher to cublasGemmEx()
//
// Initialize structure containing GEMM arguments
gemm_workspace_.arguments.A = gemm_workspace_.A->data();
gemm_workspace_.arguments.batch_stride_A = gemm_workspace_.A->batch_stride();
gemm_workspace_.arguments.B = gemm_workspace_.B->data();
gemm_workspace_.arguments.batch_stride_B = gemm_workspace_.B->batch_stride();
gemm_workspace_.arguments.C = gemm_workspace_.Reference->data();
gemm_workspace_.arguments.batch_stride_C = gemm_workspace_.Reference->batch_stride();
gemm_workspace_.arguments.D = gemm_workspace_.Reference->data();
gemm_workspace_.arguments.batch_stride_D = gemm_workspace_.Reference->batch_stride();
gemm_workspace_.arguments.alpha = problem_.alpha.data();
gemm_workspace_.arguments.beta = problem_.beta.data();
gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
detail::cublasGemmExDispatcher gemm_op(
gemm_desc,
gemm_workspace_.configuration,
gemm_workspace_.arguments,
algorithms.front()
);
if (gemm_op.status != Status::kSuccess) {
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotRun;
return true;
}
results_.back().status = Status::kSuccess;
status = gemm_op(handle);
// Handle errors
if (status != CUBLAS_STATUS_SUCCESS) {
results_.back().verification_map[library::Provider::kCUBLAS] = get_cutlass_disposition(status);
return true;
}
//
// Verify results
//
results_.back().verification_map[library::Provider::kCUBLAS] = compare_tensors(
options,
*gemm_workspace_.Computed,
*gemm_workspace_.Reference,
gemm_workspace_.Computed->batch_stride()
);
// Save workspace if incorrect
if (options.verification.save_workspace == SaveWorkspace::kIncorrect &&
results_.back().verification_map[library::Provider::kCUBLAS] == Disposition::kIncorrect) {
save_workspace(
device_context,
options,
gemm_desc,
library::Provider::kCUTLASS,
library::Provider::kCUBLAS);
}
}
catch (...) {
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed;
}
#endif
// Return true means continue profiling
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Verifies CUTLASS against host and device references
bool GemmOperationProfiler::verify_with_reference_(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem,
cutlass::library::NumericTypeID element_A,
cutlass::library::NumericTypeID element_B)
{
library::GemmDescription const &gemm_desc =
static_cast<library::GemmDescription const &>(operation->description());
//
// Initialize state
//
for (auto provider : options.verification.providers) {
// Skip providers that are not enabled
if (!options.verification.provider_enabled(provider)) {
continue;
}
void *ptr_A = gemm_workspace_.A->data();
void *ptr_B = gemm_workspace_.B->data();
void *ptr_C = gemm_workspace_.C->data();
void *ptr_D = gemm_workspace_.Reference->data();
// To support the host-side reference, conditionally allocate and
// copy tensors to host memory.
std::vector<uint8_t> host_data_A;
std::vector<uint8_t> host_data_B;
std::vector<uint8_t> host_data_C;
std::vector<uint8_t> host_data_D;
if (provider == library::Provider::kReferenceHost) {
host_data_A.resize(gemm_workspace_.A->bytes());
ptr_A = host_data_A.data();
gemm_workspace_.A->copy_to_host(ptr_A);
host_data_B.resize(gemm_workspace_.B->bytes());
ptr_B = host_data_B.data();
gemm_workspace_.B->copy_to_host(ptr_B);
host_data_C.resize(gemm_workspace_.C->bytes());
ptr_C = host_data_C.data();
gemm_workspace_.C->copy_to_host(ptr_C);
host_data_D.resize(gemm_workspace_.Reference->bytes());
ptr_D = host_data_D.data();
}
//
// Launch
//
library::Handle handle;
handle.set_provider(provider);
Status status = handle.gemm_universal(
problem_.mode,
gemm_workspace_.configuration.problem_size.m(),
gemm_workspace_.configuration.problem_size.n(),
gemm_workspace_.configuration.problem_size.k(),
gemm_desc.tile_description.math_instruction.element_accumulator,
gemm_desc.element_epilogue,
problem_.alpha.data(),
element_A,
gemm_desc.A.layout,
gemm_desc.transform_A,
ptr_A,
int(gemm_workspace_.configuration.lda),
element_B,
gemm_desc.B.layout,
gemm_desc.transform_B,
ptr_B,
int(gemm_workspace_.configuration.ldb),
problem_.beta.data(),
gemm_desc.C.element,
gemm_desc.C.layout,
ptr_C,
int(gemm_workspace_.configuration.ldc),
gemm_desc.D.element,
gemm_desc.D.layout,
ptr_D,
int(gemm_workspace_.configuration.ldd),
gemm_workspace_.configuration.batch_count,
gemm_workspace_.A->batch_stride(),
gemm_workspace_.B->batch_stride(),
gemm_workspace_.C->batch_stride(),
gemm_workspace_.Reference->batch_stride());
if (status != Status::kSuccess) {
results_.back().verification_map[provider] = Disposition::kNotRun;
continue;
}
results_.back().status = status;
if (provider == library::Provider::kReferenceHost) {
gemm_workspace_.Reference->copy_from_host(ptr_D);
}
//
// Verify results
//
results_.back().verification_map[provider] = compare_tensors(
options,
*gemm_workspace_.Computed,
*gemm_workspace_.Reference,
gemm_workspace_.Computed->batch_stride()
);
// Save workspace if incorrect
if (options.verification.save_workspace == SaveWorkspace::kIncorrect &&
results_.back().verification_map[provider] == Disposition::kIncorrect) {
save_workspace(
device_context,
options,
gemm_desc,
library::Provider::kCUTLASS,
provider);
}
}
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Measures performance results
bool GemmOperationProfiler::profile(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
// Initialize structure containing GEMM arguments
gemm_workspace_.arguments.A = gemm_workspace_.A->data();
gemm_workspace_.arguments.B = gemm_workspace_.B->data();
gemm_workspace_.arguments.C = gemm_workspace_.C->data();
gemm_workspace_.arguments.D = gemm_workspace_.Computed->data();
gemm_workspace_.arguments.alpha = problem_.alpha.data();
gemm_workspace_.arguments.beta = problem_.beta.data();
gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
gemm_workspace_.arguments.batch_stride_A = gemm_workspace_.A->batch_stride();
gemm_workspace_.arguments.batch_stride_B = gemm_workspace_.B->batch_stride();
gemm_workspace_.arguments.batch_stride_C = gemm_workspace_.C->batch_stride();
gemm_workspace_.arguments.batch_stride_D = gemm_workspace_.Computed->batch_stride();
if (problem_.split_k_mode == library::SplitKMode::kParallel) {
gemm_workspace_.arguments.D = gemm_workspace_.device_workspace.data();
gemm_workspace_.arguments.alpha = problem_.alpha_one.data();
gemm_workspace_.arguments.beta = problem_.beta_zero.data();
gemm_workspace_.reduction_arguments.workspace = gemm_workspace_.device_workspace.data();
gemm_workspace_.reduction_arguments.source = gemm_workspace_.C->data();
gemm_workspace_.reduction_arguments.destination = gemm_workspace_.Computed->data();
gemm_workspace_.reduction_arguments.alpha = problem_.alpha.data();
gemm_workspace_.reduction_arguments.beta = problem_.beta.data();
gemm_workspace_.reduction_arguments.pointer_mode = library::ScalarPointerMode::kHost;
}
results_.back().status = profile_cutlass_(
results_.back().runtime,
options,
operation,
&gemm_workspace_.arguments,
gemm_workspace_.host_workspace.data(),
gemm_workspace_.device_workspace.data()
);
}
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Method to profile a CUTLASS Operation
Status GemmOperationProfiler::profile_cutlass_(
double &runtime,
Options const &options,
library::Operation const *operation,
void *arguments,
void *host_workspace,
void *device_workspace) {
GpuTimer timer;
// initialize gemm underlying operation to handle parallel reduction
library::Operation const * underlying_operation = operation;
if (problem_.split_k_mode == library::SplitKMode::kParallel) {
if (!(underlying_operation = library::find_gemm_operation_for_parallel_reduction(operation))) {
return Status::kErrorNotSupported;
}
}
//
// Optional sleep to limit power consumption and thermals
//
sleep(options.profiling.sleep_duration);
//
// Warmup loop
//
Status status;
for (int iteration = 0; iteration < options.profiling.warmup_iterations; ++iteration) {
int problem_idx = (iteration % gemm_workspace_.problem_count) * problem_.batch_count;
gemm_workspace_.arguments.A = gemm_workspace_.A->batch_data(problem_idx);
gemm_workspace_.arguments.B = gemm_workspace_.B->batch_data(problem_idx);
gemm_workspace_.arguments.C = gemm_workspace_.C->batch_data(problem_idx);
gemm_workspace_.arguments.D = gemm_workspace_.Computed->batch_data(problem_idx);
if (problem_.split_k_mode == library::SplitKMode::kParallel) {
gemm_workspace_.arguments.D = gemm_workspace_.device_workspace.data();
gemm_workspace_.reduction_arguments.workspace = gemm_workspace_.device_workspace.data();
gemm_workspace_.reduction_arguments.source = gemm_workspace_.C->batch_data(problem_idx);
gemm_workspace_.reduction_arguments.destination = gemm_workspace_.Computed->batch_data(problem_idx);
}
// Execute the CUTLASS operation
status = underlying_operation->run(
&gemm_workspace_.arguments,
host_workspace,
device_workspace);
if (status != Status::kSuccess) {
return status;
}
// Run parallel reduction kernel for parallel split_k_mode
if (problem_.split_k_mode == library::SplitKMode::kParallel) {
status = reduction_op_->run(
&gemm_workspace_.reduction_arguments,
gemm_workspace_.reduction_host_workspace.data(),
nullptr);
if (status != Status::kSuccess) {
return status;
}
}
}
//
// Initialize GPU timer
//
timer.start();
//
// Profiling loop
//
int Iterations = options.profiling.iterations;
int iteration = 0;
for (; iteration < Iterations; ++iteration) {
// Iterate over copies of the problem in memory
int workspace_idx = options.profiling.warmup_iterations + iteration;
int problem_idx = (workspace_idx % gemm_workspace_.problem_count) * problem_.batch_count;
gemm_workspace_.arguments.A = gemm_workspace_.A->batch_data(problem_idx);
gemm_workspace_.arguments.B = gemm_workspace_.B->batch_data(problem_idx);
gemm_workspace_.arguments.C = gemm_workspace_.C->batch_data(problem_idx);
gemm_workspace_.arguments.D = gemm_workspace_.Computed->batch_data(problem_idx);
if (problem_.split_k_mode == library::SplitKMode::kParallel) {
gemm_workspace_.arguments.D = gemm_workspace_.device_workspace.data();
gemm_workspace_.reduction_arguments.workspace = gemm_workspace_.device_workspace.data();
gemm_workspace_.reduction_arguments.source = gemm_workspace_.C->batch_data(problem_idx);
gemm_workspace_.reduction_arguments.destination = gemm_workspace_.Computed->batch_data(problem_idx);
}
status = underlying_operation->run(
arguments,
host_workspace,
device_workspace);
if (status != Status::kSuccess) {
return status;
}
// Run parallel reduction kernel for parallel split_k_mode
if (problem_.split_k_mode == library::SplitKMode::kParallel) {
status = reduction_op_->run(
&gemm_workspace_.reduction_arguments,
gemm_workspace_.reduction_host_workspace.data(),
nullptr);
if (status != Status::kSuccess) {
return status;
}
}
}
//
// Wait for completion
//
timer.stop_and_wait();
//
// Update performance result
//
runtime = timer.duration(iteration);
return status;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
|
cutlass/tools/profiler/src/gemm_operation_profiler.cu/0
|
{
"file_path": "cutlass/tools/profiler/src/gemm_operation_profiler.cu",
"repo_id": "cutlass",
"token_count": 16026
}
| 66 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cuda_runtime.h>
#include <cublas_v2.h>
//-- BLAM_DEBUG_OUT ---------------------------------------------------------
#ifdef BLAM_DEBUG
# include <iostream>
# ifndef BLAM_DEBUG_OUT
# define BLAM_DEBUG_OUT(msg) std::cerr << "BLAM: " << msg << std::endl
# define BLAM_DEBUG_OUT_2(msg) std::cerr << msg << std::endl
# endif // BLAM_DEBUG_OUT
#else
# ifndef BLAM_DEBUG_OUT
# define BLAM_DEBUG_OUT(msg)
# define BLAM_DEBUG_OUT_2(msg)
# endif // BLAM_DEBUG_OUT
#endif // BLAM_DEBUG
// User could potentially define ComplexFloat/ComplexDouble instead of std::
#ifndef BLAM_COMPLEX_TYPES
#define BLAM_COMPLEX_TYPES 1
#include <cuda/std/complex>
namespace blam {
template <typename T>
using Complex = cuda::std::complex<T>;
using ComplexFloat = cuda::std::complex<float>;
using ComplexDouble = cuda::std::complex<double>;
}
#endif // BLAM_COMPLEX_TYPES
// User could potentially define Half instead of cute::
#ifndef BLAM_HALF_TYPE
#define BLAM_HALF_TYPE 1
#include <cute/numeric/numeric_types.hpp>
namespace blam {
using Half = cute::half_t;
}
#endif // BLAM_HALF_TYPE
namespace blam
{
namespace cublas
{
inline const char*
cublas_get_error(cublasStatus_t status)
{
switch (status) {
case CUBLAS_STATUS_SUCCESS:
return "CUBLAS_STATUS_SUCCESS";
case CUBLAS_STATUS_NOT_INITIALIZED:
return "CUBLAS_STATUS_NOT_INITIALIZED -- The cuBLAS library was not initialized.";
case CUBLAS_STATUS_ALLOC_FAILED:
return "CUBLAS_STATUS_ALLOC_FAILED -- Resource allocation failed inside the cuBLAS library.";
case CUBLAS_STATUS_INVALID_VALUE:
return "CUBLAS_STATUS_INVALID_VALUE -- An unsupported value or parameter was passed to the function.";
case CUBLAS_STATUS_ARCH_MISMATCH:
return "CUBLAS_STATUS_ARCH_MISMATCH -- The function requires a feature absent from the device architecture.";
case CUBLAS_STATUS_MAPPING_ERROR:
return "CUBLAS_STATUS_MAPPING_ERROR -- An access to GPU memory space failed.";
case CUBLAS_STATUS_EXECUTION_FAILED:
return "CUBLAS_STATUS_EXECUTION_FAILED -- The GPU program failed to execute.";
case CUBLAS_STATUS_INTERNAL_ERROR:
return "CUBLAS_STATUS_INTERNAL_ERROR -- An internal cuBLAS operation failed.";
case CUBLAS_STATUS_NOT_SUPPORTED:
return "CUBLAS_STATUS_NOT_SUPPORTED -- The functionality requested is not supported.";
case CUBLAS_STATUS_LICENSE_ERROR:
return "CUBLAS_STATUS_LICENSE_ERROR -- An error was detected when checking the current licensing.";
default:
return "CUBLAS_ERROR -- <unknown>";
}
}
inline bool
cublas_is_error(cublasStatus_t status)
{
return status != CUBLAS_STATUS_SUCCESS;
}
// hgemm
inline cublasStatus_t
gemm(cublasHandle_t handle,
cublasOperation_t transA, cublasOperation_t transB,
int m, int n, int k,
const Half* alpha,
const Half* A, int ldA,
const Half* B, int ldB,
const Half* beta,
Half* C, int ldC)
{
BLAM_DEBUG_OUT("cublasHgemm");
return cublasGemmEx(handle, transA, transB,
m, n, k,
reinterpret_cast<const __half*>(alpha),
reinterpret_cast<const __half*>(A), CUDA_R_16F, ldA,
reinterpret_cast<const __half*>(B), CUDA_R_16F, ldB,
reinterpret_cast<const __half*>(beta),
reinterpret_cast< __half*>(C), CUDA_R_16F, ldC,
CUDA_R_16F, CUBLAS_GEMM_DEFAULT_TENSOR_OP);
}
// mixed hf gemm
inline cublasStatus_t
gemm(cublasHandle_t handle,
cublasOperation_t transA, cublasOperation_t transB,
int m, int n, int k,
const float* alpha,
const Half* A, int ldA,
const Half* B, int ldB,
const float* beta,
float* C, int ldC)
{
BLAM_DEBUG_OUT("cublasGemmEx mixed half-float");
return cublasGemmEx(handle, transA, transB,
m, n, k,
alpha,
reinterpret_cast<const __half*>(A), CUDA_R_16F, ldA,
reinterpret_cast<const __half*>(B), CUDA_R_16F, ldB,
beta,
C, CUDA_R_32F, ldC,
CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP);
}
// igemm
inline cublasStatus_t
gemm(cublasHandle_t handle,
cublasOperation_t transA, cublasOperation_t transB,
int m, int n, int k,
const int32_t* alpha,
const int8_t* A, int ldA,
const int8_t* B, int ldB,
const int32_t* beta,
int32_t* C, int ldC)
{
BLAM_DEBUG_OUT("cublasIgemm");
return cublasGemmEx(handle, transA, transB,
m, n, k,
alpha,
A, CUDA_R_8I, ldA,
B, CUDA_R_8I, ldB,
beta,
C, CUDA_R_32I, ldC,
CUDA_R_32I, CUBLAS_GEMM_DEFAULT_TENSOR_OP);
}
// sgemm
inline cublasStatus_t
gemm(cublasHandle_t handle,
cublasOperation_t transA, cublasOperation_t transB,
int m, int n, int k,
const float* alpha,
const float* A, int ldA,
const float* B, int ldB,
const float* beta,
float* C, int ldC)
{
BLAM_DEBUG_OUT("cublasSgemm");
return cublasSgemm(handle, transA, transB,
m, n, k,
alpha,
A, ldA,
B, ldB,
beta,
C, ldC);
}
// dgemm
inline cublasStatus_t
gemm(cublasHandle_t handle,
cublasOperation_t transA, cublasOperation_t transB,
int m, int n, int k,
const double* alpha,
const double* A, int ldA,
const double* B, int ldB,
const double* beta,
double* C, int ldC)
{
BLAM_DEBUG_OUT("cublasDgemm");
return cublasDgemm(handle, transA, transB,
m, n, k,
alpha,
A, ldA,
B, ldB,
beta,
C, ldC);
}
// cgemm
inline cublasStatus_t
gemm(cublasHandle_t handle,
cublasOperation_t transA, cublasOperation_t transB,
int m, int n, int k,
const ComplexFloat* alpha,
const ComplexFloat* A, int ldA,
const ComplexFloat* B, int ldB,
const ComplexFloat* beta,
ComplexFloat* C, int ldC)
{
BLAM_DEBUG_OUT("cublasCgemm");
return cublasCgemm(handle, transA, transB,
m, n, k,
reinterpret_cast<const cuFloatComplex*>(alpha),
reinterpret_cast<const cuFloatComplex*>(A), ldA,
reinterpret_cast<const cuFloatComplex*>(B), ldB,
reinterpret_cast<const cuFloatComplex*>(beta),
reinterpret_cast<cuFloatComplex*>(C), ldC);
}
// zgemm
inline cublasStatus_t
gemm(cublasHandle_t handle,
cublasOperation_t transA, cublasOperation_t transB,
int m, int n, int k,
const ComplexDouble* alpha,
const ComplexDouble* A, int ldA,
const ComplexDouble* B, int ldB,
const ComplexDouble* beta,
ComplexDouble* C, int ldC)
{
BLAM_DEBUG_OUT("cublasZgemm");
return cublasZgemm(handle, transA, transB,
m, n, k,
reinterpret_cast<const cuDoubleComplex*>(alpha),
reinterpret_cast<const cuDoubleComplex*>(A), ldA,
reinterpret_cast<const cuDoubleComplex*>(B), ldB,
reinterpret_cast<const cuDoubleComplex*>(beta),
reinterpret_cast<cuDoubleComplex*>(C), ldC);
}
// hgemm
inline cublasStatus_t
gemm_batch(cublasHandle_t handle,
cublasOperation_t transA, cublasOperation_t transB,
int m, int n, int k,
const Half* alpha,
const Half* A, int ldA, int loA,
const Half* B, int ldB, int loB,
const Half* beta,
Half* C, int ldC, int loC,
int batch_size)
{
BLAM_DEBUG_OUT("cublasHgemmStridedBatched");
return cublasHgemmStridedBatched(handle, transA, transB,
m, n, k,
reinterpret_cast<const __half*>(alpha),
reinterpret_cast<const __half*>(A), ldA, loA,
reinterpret_cast<const __half*>(B), ldB, loB,
reinterpret_cast<const __half*>(beta),
reinterpret_cast<__half*>(C), ldC, loC,
batch_size);
}
// sgemm
inline cublasStatus_t
gemm_batch(cublasHandle_t handle,
cublasOperation_t transA, cublasOperation_t transB,
int m, int n, int k,
const float* alpha,
const float* A, int ldA, int loA,
const float* B, int ldB, int loB,
const float* beta,
float* C, int ldC, int loC,
int batch_size)
{
BLAM_DEBUG_OUT("cublasSgemmStridedBatched");
return cublasSgemmStridedBatched(handle, transA, transB,
m, n, k,
alpha,
A, ldA, loA,
B, ldB, loB,
beta,
C, ldC, loC,
batch_size);
}
// dgemm
inline cublasStatus_t
gemm_batch(cublasHandle_t handle,
cublasOperation_t transA, cublasOperation_t transB,
int m, int n, int k,
const double* alpha,
const double* A, int ldA, int loA,
const double* B, int ldB, int loB,
const double* beta,
double* C, int ldC, int loC,
int batch_size)
{
BLAM_DEBUG_OUT("cublasDgemmStridedBatched");
return cublasDgemmStridedBatched(handle, transA, transB,
m, n, k,
alpha,
A, ldA, loA,
B, ldB, loB,
beta,
C, ldC, loC,
batch_size);
}
// cgemm
inline cublasStatus_t
gemm_batch(cublasHandle_t handle,
cublasOperation_t transA, cublasOperation_t transB,
int m, int n, int k,
const ComplexFloat* alpha,
const ComplexFloat* A, int ldA, int loA,
const ComplexFloat* B, int ldB, int loB,
const ComplexFloat* beta,
ComplexFloat* C, int ldC, int loC,
int batch_size)
{
BLAM_DEBUG_OUT("cublasCgemmStridedBatched");
return cublasCgemmStridedBatched(handle, transA, transB,
m, n, k,
reinterpret_cast<const cuFloatComplex*>(alpha),
reinterpret_cast<const cuFloatComplex*>(A), ldA, loA,
reinterpret_cast<const cuFloatComplex*>(B), ldB, loB,
reinterpret_cast<const cuFloatComplex*>(beta),
reinterpret_cast<cuFloatComplex*>(C), ldC, loC,
batch_size);
}
// zgemm
inline cublasStatus_t
gemm_batch(cublasHandle_t handle,
cublasOperation_t transA, cublasOperation_t transB,
int m, int n, int k,
const ComplexDouble* alpha,
const ComplexDouble* A, int ldA, int loA,
const ComplexDouble* B, int ldB, int loB,
const ComplexDouble* beta,
ComplexDouble* C, int ldC, int loC,
int batch_size)
{
BLAM_DEBUG_OUT("cublasZgemmStridedBatched");
return cublasZgemmStridedBatched(handle, transA, transB,
m, n, k,
reinterpret_cast<const cuDoubleComplex*>(alpha),
reinterpret_cast<const cuDoubleComplex*>(A), ldA, loA,
reinterpret_cast<const cuDoubleComplex*>(B), ldB, loB,
reinterpret_cast<const cuDoubleComplex*>(beta),
reinterpret_cast<cuDoubleComplex*>(C), ldC, loC,
batch_size);
}
// hgemm
inline cublasStatus_t
gemm_batch(cublasHandle_t handle,
cublasOperation_t transA, cublasOperation_t transB,
int m, int n, int k,
const Half* alpha,
const Half* const A[], int ldA,
const Half* const B[], int ldB,
const Half* beta,
Half* const C[], int ldC,
int batch_size)
{
BLAM_DEBUG_OUT("cublasHgemmBatched");
return cublasHgemmBatched(handle, transA, transB,
m, n, k,
reinterpret_cast<const __half*>(alpha),
reinterpret_cast<const __half**>(const_cast<const Half**>(A)), ldA,
// A, ldA, // cuBLAS 9.2
reinterpret_cast<const __half**>(const_cast<const Half**>(B)), ldB,
// B, ldB, // cuBLAS 9.2
reinterpret_cast<const __half*>(beta),
reinterpret_cast<__half**>(const_cast<Half**>(C)), ldC,
// C, ldC, // cuBLAS 9.2
batch_size);
}
// sgemm
inline cublasStatus_t
gemm_batch(cublasHandle_t handle,
cublasOperation_t transA, cublasOperation_t transB,
int m, int n, int k,
const float* alpha,
const float* const A[], int ldA,
const float* const B[], int ldB,
const float* beta,
float* const C[], int ldC,
int batch_size)
{
BLAM_DEBUG_OUT("cublasSgemmBatched");
return cublasSgemmBatched(handle, transA, transB,
m, n, k,
alpha,
const_cast<const float**>(A), ldA,
// A, ldA, // cuBLAS 9.2
const_cast<const float**>(B), ldB,
// B, ldB, // cuBLAS 9.2
beta,
const_cast<float**>(C), ldC,
// C, ldC, // cuBLAS 9.2
batch_size);
}
// dgemm
inline cublasStatus_t
gemm_batch(cublasHandle_t handle,
cublasOperation_t transA, cublasOperation_t transB,
int m, int n, int k,
const double* alpha,
const double* const A[], int ldA,
const double* const B[], int ldB,
const double* beta,
double* const C[], int ldC,
int batch_size)
{
BLAM_DEBUG_OUT("cublasDgemmBatched");
return cublasDgemmBatched(handle, transA, transB,
m, n, k,
alpha,
const_cast<const double**>(A), ldA,
// A, ldA, // cuBLAS 9.2
const_cast<const double**>(B), ldB,
// B, ldB, // cuBLAS 9.2
beta,
const_cast<double**>(C), ldC,
// C, ldC, // cuBLAS 9.2
batch_size);
}
// cgemm
inline cublasStatus_t
gemm_batch(cublasHandle_t handle,
cublasOperation_t transA, cublasOperation_t transB,
int m, int n, int k,
const ComplexFloat* alpha,
const ComplexFloat* const A[], int ldA,
const ComplexFloat* const B[], int ldB,
const ComplexFloat* beta,
ComplexFloat* const C[], int ldC,
int batch_size)
{
BLAM_DEBUG_OUT("cublasCgemmBatched");
return cublasCgemmBatched(handle, transA, transB,
m, n, k,
reinterpret_cast<const cuFloatComplex*>(alpha),
const_cast<const cuFloatComplex**>(reinterpret_cast<const cuFloatComplex* const *>(A)), ldA,
//reinterpret_cast<const cuFloatComplex* const *>(A), ldA, // cuBLAS 9.2
const_cast<const cuFloatComplex**>(reinterpret_cast<const cuFloatComplex* const *>(B)), ldB,
//reinterpret_cast<const cuFloatComplex* const *>(B), ldB, // cuBLAS 9.2
reinterpret_cast<const cuFloatComplex*>(beta),
const_cast<cuFloatComplex**>(reinterpret_cast<cuFloatComplex* const *>(C)), ldC,
//reinterpret_cast<cuFloatComplex* const *>(C), ldC, // cuBLAS 9.2
batch_size);
}
// zgemm
inline cublasStatus_t
gemm_batch(cublasHandle_t handle,
cublasOperation_t transA, cublasOperation_t transB,
int m, int n, int k,
const ComplexDouble* alpha,
const ComplexDouble* const A[], int ldA,
const ComplexDouble* const B[], int ldB,
const ComplexDouble* beta,
ComplexDouble* const C[], int ldC,
int batch_size)
{
BLAM_DEBUG_OUT("cublasZgemmBatched");
return cublasZgemmBatched(handle, transA, transB,
m, n, k,
reinterpret_cast<const cuDoubleComplex*>(alpha),
const_cast<const cuDoubleComplex**>(reinterpret_cast<const cuDoubleComplex* const *>(A)), ldA,
//reinterpret_cast<const cuDoubleComplex* const *>(A), ldA, // cuBLAS 9.2
const_cast<const cuDoubleComplex**>(reinterpret_cast<const cuDoubleComplex* const *>(B)), ldB,
//reinterpret_cast<const cuDoubleComplex* const *>(B), ldB, // cuBLAS 9.2
reinterpret_cast<const cuDoubleComplex*>(beta),
const_cast<cuDoubleComplex**>(reinterpret_cast<cuDoubleComplex* const *>(C)), ldC,
//reinterpret_cast<cuDoubleComplex* const *>(C), ldC, // cuBLAS 9.2
batch_size);
}
} // end namespace cublas
} // end namespace blam
|
cutlass/tools/util/include/cutlass/util/cublas_wrappers.hpp/0
|
{
"file_path": "cutlass/tools/util/include/cutlass/util/cublas_wrappers.hpp",
"repo_id": "cutlass",
"token_count": 10297
}
| 67 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief reorder data from the host side
*/
#pragma once
#include "cutlass/coord.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/tensor_view.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/host/gemm.h"
namespace cutlass {
/// This is needed for the interleaved integer tensor core kernels. The purpose
/// is to use skip the shared memory part in the epilogue.
template <int Interleaved, typename Element, typename Layout>
void reorder_column(TensorRef<Element, Layout> dest,
TensorRef<Element, Layout> src,
cutlass::gemm::GemmCoord problem_size) {
const int InstructionShapeCol = 8;
// 4 threads per Quad
const int ElementsPerThread = InstructionShapeCol / 4;
// 4 threads per Quad
const int ReorderedElementsPerThread =
Interleaved / 4;
for (int n = 0; n < problem_size.n(); n++) {
for (int k = 0; k < problem_size.k(); k++) {
dest.at({k, (n / Interleaved) * Interleaved +
((n % ReorderedElementsPerThread) / ElementsPerThread) *
InstructionShapeCol +
((n % Interleaved) / ReorderedElementsPerThread) *
ElementsPerThread +
(n % ElementsPerThread)}) = src.at({k, n});
}
}
}
template <int ColumnInterleaved, int LayoutInterleaved = ColumnInterleaved, typename Element, typename Layout>
void reorder_convK(TensorRef<Element, Layout> dest,
TensorRef<Element, Layout> src,
cutlass::gemm::GemmCoord problem_size) {
TensorRef<Element, layout::RowMajorInterleaved<LayoutInterleaved>> mappedDest(dest.data(), dest.stride(0));
TensorRef<Element, layout::RowMajorInterleaved<LayoutInterleaved>> mappedSrc(src.data(), src.stride(0));
reorder_column<ColumnInterleaved>(
mappedDest, mappedSrc, problem_size);
}
/// This is needed for the sparse tensor core kernels. The purpose
/// is to use ldmatrix to load from shared memory to the register file.
template <typename Element, typename LayoutDest, typename LayoutSrc>
void reorder_meta(TensorRef<Element, LayoutDest> dest,
TensorRef<Element, LayoutSrc> src,
cutlass::gemm::GemmCoord problem_size) {
for (int m = 0; m < problem_size.m(); m++) {
for (int k = 0; k < problem_size.k(); k++) {
// First reorder the rows.
int group = (sizeof(Element) == 2) ? 32 : 16;
int interweave = (sizeof(Element) == 2) ? 4 : 2;
int dest_row = m / group * group + (m % 8) * interweave + (m % group) / 8;
int dest_col = k;
// Next swizzle the 2x2 blocks from Z to N.
if (((dest_row % 2) == 0) && ((dest_col % 2) == 1)) {
++dest_row;
--dest_col;
} else if (((dest_row % 2) == 1) && ((dest_col % 2) == 0)) {
--dest_row;
++dest_col;
}
dest.at({dest_row, dest_col}) = src.at({m, k});
}
}
}
} // namespace cutlass
|
cutlass/tools/util/include/cutlass/util/host_reorder.h/0
|
{
"file_path": "cutlass/tools/util/include/cutlass/util/host_reorder.h",
"repo_id": "cutlass",
"token_count": 1717
}
| 68 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/coord.h"
#include "cutlass/subbyte_reference.h"
#include "cutlass/fast_math.h"
namespace cutlass {
namespace reference {
namespace device {
namespace kernel {
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines several helpers
namespace detail {
/// Helper to perform for-each operation
template <typename Func, int Rank, int RankRemaining>
struct TensorForEachHelper {
/// Constructor for general rank
__inline__ __device__
TensorForEachHelper(Func &func, Coord<Rank> const &size, Coord<Rank> &coord, int64_t index) {
int64_t product = 1;
CUTLASS_PRAGMA_UNROLL
for (int i = Rank - RankRemaining; i < Rank; ++i) {
product *= size[i];
}
coord[Rank - 1 - RankRemaining] = index / product;
int64_t remaining = index % product;
TensorForEachHelper<Func, Rank, RankRemaining-1>(func, size, coord, remaining);
}
};
/// Helper to perform for-each operation
template <typename Func, int Rank>
struct TensorForEachHelper<Func, Rank, 0> {
/// Constructor for fastest changing rank
__inline__ __device__
TensorForEachHelper(Func &func, Coord<Rank> const &size, Coord<Rank> &coord, int64_t index) {
coord[Rank - 1] = index;
if (coord < size) {
func(coord);
}
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Kernel calls a functor for each element in a tensor's index space
template <typename Func, int Rank, typename Params>
__global__ void TensorForEach(Coord<Rank> size, Params params = Params()) {
Func func(params);
int64_t index = threadIdx.x + blockIdx.x * blockDim.x;
int64_t max_index = 1;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Rank; ++i) {
max_index *= size[i];
}
CUTLASS_PRAGMA_NO_UNROLL
while (index < max_index) {
Coord<Rank> coord;
detail::TensorForEachHelper<Func, Rank, Rank - 1>(func, size, coord, index);
index += blockDim.x * gridDim.x;
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Kernel calls a functor for each element along a tensor's diagonal
template <typename Func, int Rank, typename Params>
__global__ void TensorDiagonalForEach(Coord<Rank> size, Params params, int start, int end) {
Func func(params);
int64_t index = threadIdx.x + blockIdx.x * blockDim.x + start;
if (index < end) {
Coord<Rank> coord;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Rank; ++i) {
coord[i] = index;
}
func(coord);
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Element, typename Func>
__global__ void BlockForEach(
Element *ptr,
size_t capacity,
typename Func::Params params) {
Func func(params);
size_t index = threadIdx.x + blockIdx.x * blockDim.x;
for (; index < capacity; index += blockDim.x * gridDim.x) {
ReferenceFactory<Element>::get(ptr, index) = func();
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace device
} // namespace reference
} // namespace cutlass
|
cutlass/tools/util/include/cutlass/util/reference/device/kernel/tensor_foreach.h/0
|
{
"file_path": "cutlass/tools/util/include/cutlass/util/reference/device/kernel/tensor_foreach.h",
"repo_id": "cutlass",
"token_count": 1521
}
| 69 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Reference implementation for complex-valued Rank 2K update in host-side code.
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/complex.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/tensor_view.h"
#include "cutlass/gemm/gemm.h"
#include <assert.h>
namespace cutlass {
namespace reference {
namespace host {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
///
/// Explicitly naming types needed by this template can be cumbersome, particularly for the
/// accumulator type, so a function argument 'initial_accum' is exposed. Passing
/// AccumulatorType(0) as the last function argument can be easier than naming all template
/// arguments explicitly.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType,
typename ComputeType,
typename ConvertOp = NumericConverter<ElementC, ScalarType>,
typename InnerProductOp = multiply_add<ComputeType>
>
void Rank2KComplex(
gemm::GemmCoord problem_size,
ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
ComplexTransform transform_a,
TensorRef<ElementB, LayoutB> tensor_b,
ComplexTransform transform_b,
ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
ComputeType initial_accum,
FillMode fill_mode_c,
BlasMode blas_mode,
int batch_count = 1,
int64_t batch_stride_A = 0,
int64_t batch_stride_B = 0,
int64_t batch_stride_C = 0,
int64_t batch_stride_D = 0) {
static_assert(
LayoutA::kRank == 2 &&
LayoutB::kRank == 2 &&
LayoutC::kRank == 2, "Tensors must be of rank 2");
// Note: batch is ignored.
int const M = problem_size.m();
int const N = problem_size.n();
int const K = problem_size.k();
// Rank2K update operates on A=NxK, B=NxK, and C=NxN
assert(M==N);
// Blocking necessary to speedup reference implementation
int const Mblock = 16;
int const Nblock = 16;
ConvertOp convert_op;
InnerProductOp inner_product_op;
for (int batch_idx = 0; batch_idx < batch_count; ++batch_idx) {
// Compute matrix product using blocks
for (int row_block = 0; row_block < M; row_block += Mblock) {
for (int col_block = 0; col_block < N; col_block += Nblock) {
ComputeType accum[Mblock][Nblock];
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Mblock; i++) {
accum[i][j] = initial_accum;
}
}
for (int k_block = 0; k_block < K; ++k_block) {
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Mblock; i++) {
int row = row_block + i;
int col = col_block + j;
if (row < M && col < N &&
( (fill_mode_c == FillMode::kLower && row >= col) ||
(fill_mode_c == FillMode::kUpper && row <= col) )
) {
// A x B^T (Symmetric) or A x B^H (Hermitian)
// complex conjugation on operandB (b_t) is function of blas3 computation
ElementA a = tensor_a.at(MatrixCoord(row, k_block));
ElementB b_t = (blas_mode == BlasMode::kHermitian) ?
conj(tensor_b.at(MatrixCoord(col, k_block))) :
tensor_b.at(MatrixCoord(col, k_block));
ComputeType a_ik = ComputeType(a);
ComputeType b_jk = ComputeType(b_t);
// complex conjugation is a function of operand layouts
if (transform_a == ComplexTransform::kConjugate) {
a_ik = conj(a_ik);
}
// complex conjugation is a function of operand layouts
if (transform_b == ComplexTransform::kConjugate) {
b_jk = conj(b_jk);
}
accum[i][j] = inner_product_op(a_ik, b_jk, accum[i][j]);
}
}
}
}
/* HER2K need two epilogues to handle complex alpha value */
if ( blas_mode == BlasMode::kHermitian ) {
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Mblock; i++) {
int row = row_block + i;
int col = col_block + j;
MatrixCoord coord = MatrixCoord(row, col);
if (row < M && col < N &&
((fill_mode_c == FillMode::kLower && row >= col) ||
(fill_mode_c == FillMode::kUpper && row <= col))
) {
ScalarType c = tensor_c.at(coord);
// The imaginary parts of the diagonal elements of
// a complex data type are assumed and set to zero
if (blas_mode == BlasMode::kHermitian) {
c = (row == col) ? real(c) : c;
}
tensor_d.at(coord) = convert_op(alpha *
ScalarType(accum[i][j]) +
beta * c);
}
}
}
/* Zeoring out accum for second HERK */
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Mblock; i++) {
accum[i][j] = initial_accum;
}
}
}
for (int k_block = 0; k_block < K; ++k_block) {
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Mblock; i++) {
int row = row_block + i;
int col = col_block + j;
if (row < M && col < N &&
( (fill_mode_c == FillMode::kLower && row >= col) ||
(fill_mode_c == FillMode::kUpper && row <= col) )
) {
// B x A^T (Symmetric) or B x A^H (Hermitian)
// complex conjugation on operandB (a_t) is function of blas3 computation
ElementB b = tensor_b.at(MatrixCoord(row, k_block));
ElementA a_t = (blas_mode == BlasMode::kHermitian) ?
conj(tensor_a.at(MatrixCoord(col, k_block))):
tensor_a.at(MatrixCoord(col, k_block));
ComputeType b_ik = ComputeType(b);
ComputeType a_jk = ComputeType(a_t);
// complex conjugation here is a function of operand layouts
if (transform_b == ComplexTransform::kConjugate) {
b_ik = conj(b_ik);
}
// complex conjugation here is a function of operand layouts
if (transform_a == ComplexTransform::kConjugate) {
a_jk = conj(a_jk);
}
accum[i][j] = inner_product_op(b_ik, a_jk, accum[i][j]);
}
}
}
}
ScalarType alpha_hermitian = (blas_mode == BlasMode::kHermitian) ?
conj(alpha) : alpha;
ScalarType beta_hermitian = (blas_mode == BlasMode::kHermitian) ?
1 : beta;
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Mblock; i++) {
int row = row_block + i;
int col = col_block + j;
MatrixCoord coord = MatrixCoord(row, col);
if (row < M && col < N &&
((fill_mode_c == FillMode::kLower && row >= col) ||
(fill_mode_c == FillMode::kUpper && row <= col))
) {
ScalarType d = (blas_mode == BlasMode::kHermitian) ?
tensor_d.at(coord) : tensor_c.at(coord);
ScalarType tmp_d = convert_op(
alpha_hermitian * ScalarType(accum[i][j]) +
beta_hermitian * d);
if (blas_mode == BlasMode::kHermitian && row == col ) {
tensor_d.at(coord) = real(tmp_d);
} else {
tensor_d.at(coord) = tmp_d;
}
}
}
}
} // for (col_block)
} // for (row_block)
tensor_a.add_pointer_offset(batch_stride_A);
tensor_b.add_pointer_offset(batch_stride_B);
tensor_c.add_pointer_offset(batch_stride_C);
tensor_d.add_pointer_offset(batch_stride_D);
} // for (batch_idx)
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
///
/// This assumes the accumulator type is the same type as the scalars.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType
>
void Rank2KComplex(
gemm::GemmCoord problem_size,
ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
ComplexTransform transform_a,
TensorRef<ElementB, LayoutB> tensor_b,
ComplexTransform transform_b,
ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
FillMode fill_mode_c,
BlasMode blas_mode) {
Rank2KComplex(
problem_size, alpha,
tensor_a, transform_a,
tensor_b, transform_b,
beta, tensor_c, tensor_d,
ScalarType(0),
fill_mode_c,
blas_mode);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace host
} // namespace reference
} // namespace cutlass
|
cutlass/tools/util/include/cutlass/util/reference/host/rank_2k_complex.h/0
|
{
"file_path": "cutlass/tools/util/include/cutlass/util/reference/host/rank_2k_complex.h",
"repo_id": "cutlass",
"token_count": 5115
}
| 70 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/core_io.h"
#include "cutlass/tensor_view.h"
#include "cutlass/tensor_view_planar_complex.h"
#include "cutlass/complex.h"
namespace cutlass {
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
/// Helper to write the least significant rank of a TensorView
template <
typename Element,
typename Layout
>
inline std::ostream & TensorView_WriteLeastSignificantRank(
std::ostream& out,
TensorView<Element, Layout> const& view,
Coord<Layout::kRank> const &start_coord,
int rank,
std::streamsize width) {
for (int idx = 0; idx < view.extent(rank); ++idx) {
Coord<Layout::kRank> coord(start_coord);
coord[rank] = idx;
if (idx) {
out.width(0);
out << ", ";
}
if (idx || coord) {
out.width(width);
}
out << ScalarIO<Element>(view.at(coord));
}
return out;
}
/// Helper to write a rank of a TensorView
template <
typename Element,
typename Layout
>
inline std::ostream & TensorView_WriteRank(
std::ostream& out,
TensorView<Element, Layout> const& view,
Coord<Layout::kRank> const &start_coord,
int rank,
std::streamsize width) {
// If called on the least significant rank, write the result as a row
if (rank + 1 == Layout::kRank) {
return TensorView_WriteLeastSignificantRank(out, view, start_coord, rank, width);
}
// Otherwise, write a sequence of rows and newlines
for (int idx = 0; idx < view.extent(rank); ++idx) {
Coord<Layout::kRank> coord(start_coord);
coord[rank] = idx;
if (rank + 2 == Layout::kRank) {
// Write least significant ranks asa matrix with rows delimited by "\n"
if (idx) {
out << ",\n";
}
TensorView_WriteLeastSignificantRank(out, view, coord, rank + 1, width);
}
else {
// Higher ranks are separated by newlines
if (idx) {
out << ",\n\n";
}
TensorView_WriteRank(out, view, coord, rank + 1, width);
}
}
return out;
}
/// Helper to write the least significant rank of a TensorView
template <
typename Element,
typename Layout
>
inline std::ostream & TensorViewPlanarComplex_WriteLeastSignificantRank(
std::ostream& out,
TensorViewPlanarComplex<Element, Layout> const& view,
Coord<Layout::kRank> const &start_coord,
int rank,
std::streamsize width) {
for (int idx = 0; idx < view.extent(rank); ++idx) {
Coord<Layout::kRank> coord(start_coord);
coord[rank] = idx;
if (idx) {
out.width(0);
out << ", ";
}
if (idx || coord) {
out.width(width);
}
complex<Element> x = view.at(coord);
out << x;
}
return out;
}
/// Helper to write a rank of a TensorView
template <
typename Element,
typename Layout
>
inline std::ostream & TensorViewPlanarComplex_WriteRank(
std::ostream& out,
TensorViewPlanarComplex<Element, Layout> const& view,
Coord<Layout::kRank> const &start_coord,
int rank,
std::streamsize width) {
// If called on the least significant rank, write the result as a row
if (rank + 1 == Layout::kRank) {
return TensorViewPlanarComplex_WriteLeastSignificantRank(out, view, start_coord, rank, width);
}
// Otherwise, write a sequence of rows and newlines
for (int idx = 0; idx < view.extent(rank); ++idx) {
Coord<Layout::kRank> coord(start_coord);
coord[rank] = idx;
if (rank + 2 == Layout::kRank) {
// Write least significant ranks asa matrix with rows delimited by ";\n"
if (idx) {
out << ";\n";
}
TensorViewPlanarComplex_WriteLeastSignificantRank(out, view, coord, rank + 1, width);
}
else {
// Higher ranks are separated by newlines
if (idx) {
out << "\n";
}
TensorViewPlanarComplex_WriteRank(out, view, coord, rank + 1, width);
}
}
return out;
}
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Prints human-readable representation of a TensorView to an ostream
template <
typename Element,
typename Layout
>
inline std::ostream& TensorViewWrite(
std::ostream& out,
TensorView<Element, Layout> const& view) {
// Prints a TensorView according to the following conventions:
// - least significant rank is printed as rows separated by ";\n"
// - all greater ranks are delimited with newlines
//
// The result is effectively a whitespace-delimited series of 2D matrices.
return detail::TensorView_WriteRank(out, view, Coord<Layout::kRank>(), 0, out.width());
}
/// Prints human-readable representation of a TensorView to an ostream
template <
typename Element,
typename Layout
>
inline std::ostream& operator<<(
std::ostream& out,
TensorView<Element, Layout> const& view) {
// Prints a TensorView according to the following conventions:
// - least significant rank is printed as rows separated by ";\n"
// - all greater ranks are delimited with newlines
//
// The result is effectively a whitespace-delimited series of 2D matrices.
return TensorViewWrite(out, view);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Prints human-readable representation of a TensorView to an ostream
template <
typename Element,
typename Layout
>
inline std::ostream& TensorViewWrite(
std::ostream& out,
TensorViewPlanarComplex<Element, Layout> const& view) {
// Prints a TensorView according to the following conventions:
// - least significant rank is printed as rows separated by ";\n"
// - all greater ranks are delimited with newlines
//
// The result is effectively a whitespace-delimited series of 2D matrices.
return detail::TensorViewPlanarComplex_WriteRank(out, view, Coord<Layout::kRank>(), 0, out.width());
}
/// Prints human-readable representation of a TensorView to an ostream
template <
typename Element,
typename Layout
>
inline std::ostream& operator<<(
std::ostream& out,
TensorViewPlanarComplex<Element, Layout> const& view) {
// Prints a TensorView according to the following conventions:
// - least significant rank is printed as rows separated by ";\n"
// - all greater ranks are delimited with newlines
//
// The result is effectively a whitespace-delimited series of 2D matrices.
return TensorViewWrite(out, view);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
|
cutlass/tools/util/include/cutlass/util/tensor_view_io.h/0
|
{
"file_path": "cutlass/tools/util/include/cutlass/util/tensor_view_io.h",
"repo_id": "cutlass",
"token_count": 2639
}
| 71 |
# Copyright (c) 2019 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# A small utility function which generates a C-header from an input file
function(FILE_TO_C_STRING FILENAME VARIABLE_NAME OUTPUT_STRING ZERO_TERMINATED)
FILE(READ "${FILENAME}" HEX_INPUT HEX)
if (${ZERO_TERMINATED})
string(APPEND HEX_INPUT "00")
endif()
string(REGEX REPLACE "(....)" "\\1\n" HEX_OUTPUT ${HEX_INPUT})
string(REGEX REPLACE "([0-9a-f][0-9a-f])" "char(0x\\1)," HEX_OUTPUT ${HEX_OUTPUT})
set(HEX_OUTPUT "static char const ${VARIABLE_NAME}[] = {\n ${HEX_OUTPUT}\n};\n")
set(${OUTPUT_STRING} "${HEX_OUTPUT}" PARENT_SCOPE)
endfunction()
# message("Create header file for ${FILE_IN}")
# message("Create header file for ${FILE_OUT}")
file_to_c_string(${FILE_IN} ${VARIABLE_NAME} OUTPUT_STRING ZERO_TERMINATED)
set(RESULT "#pragma once\n")
string(APPEND RESULT "namespace cutlass {\n")
string(APPEND RESULT "namespace nvrtc {\n")
string(APPEND RESULT "${OUTPUT_STRING}")
string(APPEND RESULT "} // namespace nvrtc\n")
string(APPEND RESULT "} // namespace cutlass\n")
file(WRITE "${FILE_OUT}" "${RESULT}")
|
cutlass/bin2hex.cmake/0
|
{
"file_path": "cutlass/bin2hex.cmake",
"repo_id": "cutlass",
"token_count": 866
}
| 0 |
var searchData=
[
['_5f_5fnv_5fstd_5fmax',['__NV_STD_MAX',['../platform_8h.html#abd31f291635329bc15292954f1f01d38',1,'platform.h']]],
['_5f_5fnv_5fstd_5fmin',['__NV_STD_MIN',['../platform_8h.html#a39e234a3e3b0018b58df720bcb143420',1,'platform.h']]],
['_5f_5fplatform_5fcat',['__platform_cat',['../platform_8h.html#aece7fe71be5aaf8d12dc9e2372f97de4',1,'platform.h']]],
['_5f_5fplatform_5fcat_5f',['__platform_cat_',['../platform_8h.html#acd148999a5caeba8f6fd52e7e288e659',1,'platform.h']]]
];
|
cutlass/docs/search/defines_0.js/0
|
{
"file_path": "cutlass/docs/search/defines_0.js",
"repo_id": "cutlass",
"token_count": 261
}
| 1 |
var searchData=
[
['layouttypeid',['LayoutTypeID',['../namespacecutlass_1_1library.html#aa863c416529c1fe76555be9760619a30',1,'cutlass::library']]]
];
|
cutlass/docs/search/enums_4.js/0
|
{
"file_path": "cutlass/docs/search/enums_4.js",
"repo_id": "cutlass",
"token_count": 63
}
| 2 |
var searchData=
[
['vector_2eh',['vector.h',['../vector_8h.html',1,'']]],
['volta_5ftensor_5fop_5fpolicy_2eh',['volta_tensor_op_policy.h',['../volta__tensor__op__policy_8h.html',1,'']]]
];
|
cutlass/docs/search/files_12.js/0
|
{
"file_path": "cutlass/docs/search/files_12.js",
"repo_id": "cutlass",
"token_count": 90
}
| 3 |
var searchData=
[
['debug_2eh',['debug.h',['../include_2cutlass_2util_2debug_8h.html',1,'']]],
['gemm_2eh',['gemm.h',['../include_2cutlass_2gemm_2device_2gemm_8h.html',1,'']]],
['gemm_2eh',['gemm.h',['../include_2cutlass_2gemm_2kernel_2gemm_8h.html',1,'']]],
['gemm_2eh',['gemm.h',['../include_2cutlass_2gemm_2gemm_8h.html',1,'']]],
['gemm_5fcomplex_2eh',['gemm_complex.h',['../include_2cutlass_2gemm_2device_2gemm__complex_8h.html',1,'']]],
['inner_5fproduct_2eh',['inner_product.h',['../inner__product_8h.html',1,'']]],
['integer_5fsubbyte_2eh',['integer_subbyte.h',['../integer__subbyte_8h.html',1,'']]],
['interleaved_5fepilogue_2eh',['interleaved_epilogue.h',['../interleaved__epilogue_8h.html',1,'']]]
];
|
cutlass/docs/search/files_8.js/0
|
{
"file_path": "cutlass/docs/search/files_8.js",
"repo_id": "cutlass",
"token_count": 339
}
| 4 |
var searchData=
[
['w',['w',['../structcutlass_1_1Tensor4DCoord.html#ae3136dc898c4ef079e73b51b1850ba7e',1,'cutlass::Tensor4DCoord::w() const '],['../structcutlass_1_1Tensor4DCoord.html#a3b391bf3ec3db6eec31eb23d5ff7fd21',1,'cutlass::Tensor4DCoord::w()']]],
['wait',['wait',['../classcutlass_1_1Semaphore.html#a176a4cbf65e47e9fcba9d93fc264b9c3',1,'cutlass::Semaphore']]]
];
|
cutlass/docs/search/functions_16.js/0
|
{
"file_path": "cutlass/docs/search/functions_16.js",
"repo_id": "cutlass",
"token_count": 188
}
| 5 |
var searchData=
[
['predicate_20iterator_20concept',['Predicate Iterator Concept',['../group__predicate__iterator__concept.html',1,'']]],
['predicate_20tile_20adapter_20concept',['Predicate Tile Adapter Concept',['../group__predicate__tile__adapter.html',1,'']]],
['predicate_20vector_20concept',['Predicate Vector Concept',['../group__predicate__vector__concept.html',1,'']]]
];
|
cutlass/docs/search/groups_0.js/0
|
{
"file_path": "cutlass/docs/search/groups_0.js",
"repo_id": "cutlass",
"token_count": 121
}
| 6 |
var searchData=
[
['uniform',['uniform',['../structcutlass_1_1Distribution.html#afc30b6976acb39e54f061af1bf2870db',1,'cutlass::Distribution']]],
['use_5fdp4a',['use_dp4a',['../classcutlass_1_1gemm_1_1warp_1_1MmaSimt.html#a39e22e3c7afea584e8425064fe72410b',1,'cutlass::gemm::warp::MmaSimt']]]
];
|
cutlass/docs/search/variables_12.js/0
|
{
"file_path": "cutlass/docs/search/variables_12.js",
"repo_id": "cutlass",
"token_count": 150
}
| 7 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief CUTLASS layout visualization tool
*/
#include <map>
#include <iostream>
#include <iomanip>
#include <memory>
#include <cutlass/cutlass.h>
#include "options.h"
#include "register_layout.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
std::map<std::string, std::unique_ptr<VisualizeLayoutBase> > layouts;
/////////////////////////////////////////////////////////////////////////////////////////////////
void print_usage(std::ostream &out) {
out << "03_visualize_layout <layout> [options]"
<< "\n\n"
<< " Layouts:\n";
for (auto const & layout : layouts) {
out << " " << layout.first << std::string(46 - layout.first.size(), ' ');
layout.second->print_help(out);
out << "\n";
}
out << "\n";
Options::print_usage(out);
out << "\nExamples:\n\n"
<< "$ 03_visualize_layout RowMajor --extent=16,16\n"
<< "$ 03_visualize_layout \"ColumnMajorInterleaved<4>\" --extent=32,8 "
"--output-shape=16 --vectorize=4\n"
<< "$ 03_visualize_layout \"TensorOpMultiplicand<4,64>\" "
"--extent=64,64 --vectorize=32 --output-shape=256,4\n"
<< "$ 03_visualize_layout \"TensorOpMultiplicand<4,128>\" "
"--extent=128,32 --vectorize=32 --output-shape=256,4\n"
<< "$ 03_visualize_layout \"TensorOpMultiplicand<4,256>\" "
"--extent=256,16 --vectorize=32 --output-shape=256,4\n"
<< "$ 03_visualize_layout \"TensorOpMultiplicand<8,32>\" "
"--extent=32,64 --vectorize=16 --output-shape=128,4\n"
<< "$ 03_visualize_layout \"TensorOpMultiplicand<8,64>\" "
"--extent=64,32 --vectorize=16 --output-shape=128,4\n"
<< "$ 03_visualize_layout \"TensorOpMultiplicand<8,128>\" "
"--extent=128,16 --vectorize=16 --output-shape=128,4\n"
<< "$ 03_visualize_layout \"TensorOpMultiplicand<16,32>\" "
"--extent=32,32 --vectorize=8 --output-shape=64,4\n"
<< "$ 03_visualize_layout \"TensorOpMultiplicand<16,64>\" "
"--extent=64,16 --vectorize=8 --output-shape=64,4\n"
<< "$ 03_visualize_layout \"TensorOpMultiplicand<32,16>\" "
"--extent=16,32 --vectorize=4 --output-shape=32,4\n"
<< "$ 03_visualize_layout \"TensorOpMultiplicand<32,32>\" "
"--extent=32,16 --vectorize=4 --output-shape=32,4\n"
<< "$ 03_visualize_layout \"TensorOpMultiplicandCongruous<32,32>\" "
"--extent=32,16 --vectorize=4 --output-shape=32,4\n"
<< "$ 03_visualize_layout \"TensorOpMultiplicandCongruous<64, 16>\" "
"--extent=16,16 --vectorize=2 --output-shape=16,4\n"
<< "$ 03_visualize_layout \"VoltaTensorOpMultiplicandCrosswise<16,32>\" "
"--extent=32,64 --vectorize=4 --output-shape=64,4\n"
<< "$ 03_visualize_layout \"VoltaTensorOpMultiplicandCongruous<16>\" "
"--extent=64,32 --vectorize=8 --output-shape=64,4\n";
out << std::endl;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Entry point
int main(int argc, char const *arg[]) {
RegisterLayouts(layouts);
if (argc == 1 || (std::string(arg[0]) == "-h" || std::string(arg[1]) == "--help")) {
print_usage(std::cout);
return 0;
}
// parse command line, skipping layout name
cutlass::CommandLine cmd_line(argc - 1, arg + 1);
Options options(cmd_line);
if (options.help) {
print_usage(std::cout);
return 0;
}
if (!options.good) {
return -1;
}
std::string layout_name = arg[1];
auto layout_it = layouts.find(layout_name);
if (layout_it == layouts.end()) {
std::cerr << "Layout '" << layout_name << "' not supported." << std::endl;
return -1;
}
bool passed = layout_it->second->visualize(options);
if (!passed) {
return -1;
}
layout_it->second->print_csv(std::cout);
cudaFree(0); // Ensure CUDA is available.
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
|
cutlass/examples/03_visualize_layout/visualize_layout.cpp/0
|
{
"file_path": "cutlass/examples/03_visualize_layout/visualize_layout.cpp",
"repo_id": "cutlass",
"token_count": 2052
}
| 8 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a pipelined Implicit GEMM kernel.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/semaphore.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/conv3d_problem_size.h"
#include "cutlass/epilogue/threadblock/output_iterator_parameter.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename B2bMma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_, ///! Threadblock swizzling function
conv::Operator ConvOperator, ///! Convolutional operator (Fprop, Dgrad, Wgrad)
typename ConvProblemSize_ = Conv2dProblemSize ///! Convolutional operator on 2D or 3D problem
>
struct B2bImplicitGemmConvolution {
using B2bMma = B2bMma_;
using Epilogue = Epilogue_;
using EpilogueOutputOp0 = typename B2bMma::OutputOp;
using EpilogueOutputOp1 = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
static Operator const kConvolutionalOperator = ConvOperator;
using ElementA = typename B2bMma::IteratorA0::Element;
using LayoutA = typename B2bMma::IteratorA0::Layout;
using ElementB = typename B2bMma::IteratorB0::Element;
using LayoutB = typename B2bMma::IteratorB0::Layout;
using ElementC = typename EpilogueOutputOp1::ElementOutput;
/// Set output tensor C layout
using LayoutC = LayoutA;
using ElementAccumulator = typename EpilogueOutputOp0::ElementAccumulator;
using ElementCompute = typename EpilogueOutputOp0::ElementCompute;
/// Scale and Bias
using ElementScaleBias = typename B2bMma::IteratorAccumulatorScaleBias::Element;
using LayoutScaleBias = typename B2bMma::IteratorAccumulatorScaleBias::Layout;
using WarpMmaOperator0 = typename B2bMma::Policy0::Operator;
using WarpMmaOperator1 = typename B2bMma::Policy1::Operator;
using ArchMmaOperator = typename WarpMmaOperator0::ArchMmaOperator;
using MathOperator = typename ArchMmaOperator::Operator;
using OperatorClass = typename WarpMmaOperator0::OperatorClass;
using ArchTag = typename WarpMmaOperator0::ArchTag;
using ThreadblockShape0 = typename B2bMma::Shape0;
using ThreadblockShape1 = typename B2bMma::Shape1;
using WarpShape0 = typename WarpMmaOperator0::Shape;
using WarpShape1 = typename WarpMmaOperator1::Shape;
using InstructionShape = typename ArchMmaOperator::Shape;
static int const kStages = B2bMma::kStages;
static IteratorAlgorithm const kIteratorAlgorithm = B2bMma::IteratorA0::kIteratorAlgorithm;
/// Warp count (concept: GemmShape)
using WarpCount0 = typename B2bMma::WarpCount0;
static int const kThreadCount = 32 * WarpCount0::kCount;
using TensorRefA0 = typename B2bMma::IteratorA0::TensorRef;
using TensorRefB0 = typename B2bMma::IteratorB0::TensorRef;
using TensorRefScaleBias0 = typename B2bMma::IteratorAccumulatorScaleBias::TensorRef;
using TensorRefB1 = typename B2bMma::IteratorB1::TensorRef;
using TensorRefC = cutlass::TensorRef<ElementC, LayoutC>;
/// Check iterator A and B convolution dimension are the same and
// set device::B2bImplicitGemmConvolution::kConvDim
static_assert(B2bMma::IteratorA0::kConvDim == B2bMma::IteratorB0::kConvDim,
"Convolution on different dimensions is not supported");
static int const kConvDim = B2bMma::IteratorA0::kConvDim;
/// Conv dimension and problem size structure (Conv2d or Conv3d)
using ConvProblemSize = ConvProblemSize_;
/// Wgrad C stride idx for implicit gemm algorithm
// Conv2d row-major matrix C (KxRSC)
// Conv3d row-major matrix C (KxTRSC)
static int const kWgradCStrideIdx =
cutlass::platform::is_same<LayoutC, cutlass::layout::TensorNHWC>::value ? 2 : 3;
/// This chooses the appropriate stride element of the C tensor.
static int const kTensorCStrideIdx =
(kConvolutionalOperator == conv::Operator::kWgrad ? kWgradCStrideIdx : 0);
//
//
//
using ConvOutputIteratorParameter = epilogue::threadblock::ConvOutputIteratorParameter<
LayoutC,
typename Epilogue::OutputTileIterator::Layout,
TensorRefC,
ConvOperator,
ConvProblemSize
>;
/// Argument structure
struct Arguments {
//
// Data members
//
ConvProblemSize problem_size_0;
ConvProblemSize problem_size_1;
TensorRefA0 ref_A0;
TensorRefB0 ref_B0;
TensorRefC ref_C0;
TensorRefScaleBias0 ref_Scale0;
TensorRefScaleBias0 ref_Bias0;
TensorRefB1 ref_B1;
TensorRefC ref_C1;
TensorRefC ref_D1;
typename EpilogueOutputOp0::Params output_op_0;
typename EpilogueOutputOp1::Params output_op_1;
SplitKMode split_k_mode;
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
Arguments() { }
CUTLASS_HOST_DEVICE
Arguments(
ConvProblemSize const & problem_size_0,
ConvProblemSize const & problem_size_1
):
problem_size_0(problem_size_0),
problem_size_1(problem_size_1) { }
CUTLASS_HOST_DEVICE
Arguments(
ConvProblemSize const & problem_size_0,
ConvProblemSize const & problem_size_1,
TensorRefA0 const & ref_A0,
TensorRefB0 const & ref_B0,
TensorRefC const & ref_C0,
TensorRefScaleBias0 const & ref_Scale0,
TensorRefScaleBias0 const & ref_Bias0,
TensorRefB1 const & ref_B1,
TensorRefC const & ref_C1,
TensorRefC const & ref_D1,
typename EpilogueOutputOp0::Params const & output_op_0,
typename EpilogueOutputOp1::Params const & output_op_1,
SplitKMode const & split_k_mode = SplitKMode::kSerial
):
problem_size_0(problem_size_0),
problem_size_1(problem_size_1),
ref_A0(ref_A0),
ref_B0(ref_B0),
ref_C0(ref_C0),
ref_Scale0(ref_Scale0),
ref_Bias0(ref_Bias0),
ref_B1(ref_B1),
ref_C1(ref_C1),
ref_D1(ref_D1),
output_op_0(output_op_0),
output_op_1(output_op_1),
split_k_mode(split_k_mode)
{
}
};
/// Parameters structure
struct Params {
ConvProblemSize problem_size_0;
ConvProblemSize problem_size_1;
cutlass::gemm::GemmCoord grid_tiled_shape;
gemm::GemmCoord implicit_gemm_problem_size_0;
gemm::GemmCoord implicit_gemm_problem_size_1;
int swizzle_log_tile;
int gemm_k_iterations_0;
int gemm_k_iterations_1;
typename B2bMma::IteratorA0::Params iterator_A0;
typename B2bMma::IteratorA0::Element const *ptr_A0;
typename B2bMma::IteratorB0::Params iterator_B0;
typename B2bMma::IteratorB0::Element const *ptr_B0;
typename Epilogue::OutputTileIterator::Params iterator_C0;
typename Epilogue::OutputTileIterator::Element *ptr_C0;
typename B2bMma::IteratorAccumulatorScaleBias::Element *ptr_Scale0;
typename B2bMma::IteratorAccumulatorScaleBias::Element *ptr_Bias0;
typename B2bMma::IteratorB1::Params iterator_B1;
typename B2bMma::IteratorB1::Element const *ptr_B1;
typename Epilogue::OutputTileIterator::Params iterator_C1;
typename Epilogue::OutputTileIterator::Element *ptr_C1;
typename Epilogue::OutputTileIterator::Params iterator_D1;
typename Epilogue::OutputTileIterator::Element *ptr_D1;
typename EpilogueOutputOp0::Params output_op_0;
typename EpilogueOutputOp1::Params output_op_1;
int *semaphore;
SplitKMode split_k_mode;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params(): swizzle_log_tile(0), gemm_k_iterations_0(0), gemm_k_iterations_1(0) { }
///
CUTLASS_HOST_DEVICE
Params(
Arguments const &args,
int *semaphore = nullptr
):
problem_size_0(args.problem_size_0),
problem_size_1(args.problem_size_1),
implicit_gemm_problem_size_0(cutlass::conv::implicit_gemm_problem_size(kConvolutionalOperator, args.problem_size_0)),
implicit_gemm_problem_size_1(cutlass::conv::implicit_gemm_problem_size(kConvolutionalOperator, args.problem_size_1)),
iterator_A0(B2bMma::IteratorA0::getParams(args.problem_size_0, args.ref_A0.layout())),
ptr_A0(args.ref_A0.data()),
iterator_B0(args.problem_size_0, args.ref_B0.layout()),
ptr_B0(args.ref_B0.data()),
iterator_C0(ConvOutputIteratorParameter::layout(args.ref_C0)),
ptr_C0(args.ref_C0.data()),
ptr_Scale0(args.ref_Scale0.data()),
ptr_Bias0(args.ref_Bias0.data()),
iterator_B1(args.problem_size_1, args.ref_B1.layout()),
ptr_B1(args.ref_B1.data()),
iterator_C1(ConvOutputIteratorParameter::layout(args.ref_C1)),
ptr_C1(args.ref_C1.data()),
iterator_D1(ConvOutputIteratorParameter::layout(args.ref_D1)),
ptr_D1(args.ref_D1.data()),
output_op_0(args.output_op_0),
output_op_1(args.output_op_1),
semaphore(semaphore),
split_k_mode(args.split_k_mode)
{
gemm_k_iterations_0 = implicit_gemm_k_iterations(kConvolutionalOperator, ThreadblockShape0::kK, args.problem_size_0);
gemm_k_iterations_1 = implicit_gemm_k_iterations(kConvolutionalOperator, ThreadblockShape1::kK, args.problem_size_1);
ThreadblockSwizzle threadblock_swizzle;
grid_tiled_shape = threadblock_swizzle.get_tiled_shape(
implicit_gemm_problem_size_0,
{ThreadblockShape0::kM, ThreadblockShape0::kN, ThreadblockShape0::kK},
args.problem_size_0.split_k_slices);
swizzle_log_tile = ThreadblockSwizzle().get_log_tile(grid_tiled_shape);
}
};
/// Shared memory storage structure
union SharedStorage {
typename B2bMma::B2bMmaSharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
//
// Methods
//
CUTLASS_HOST_DEVICE
B2bImplicitGemmConvolution() { }
/// Executes one ImplicitGEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_idx =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_idx.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_idx.n()) {
return;
}
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands
typename B2bMma::IteratorA0 iterator_A0(
params.iterator_A0,
params.problem_size_0,
params.ptr_A0,
thread_idx,
MatrixCoord(
threadblock_tile_idx.m() * B2bMma::Shape0::kM,
threadblock_tile_idx.k() * B2bMma::Shape0::kK
)
);
typename B2bMma::IteratorB0 iterator_B0(
params.iterator_B0,
params.problem_size_0,
params.ptr_B0,
thread_idx,
MatrixCoord(
threadblock_tile_idx.k() * B2bMma::Shape0::kK,
threadblock_tile_idx.n() * B2bMma::Shape0::kN
)
);
typename B2bMma::IteratorB1 iterator_B1(
params.iterator_B1,
params.problem_size_1,
params.ptr_B1,
thread_idx,
MatrixCoord(
threadblock_tile_idx.k() * B2bMma::Shape1::kK,
threadblock_tile_idx.n() * B2bMma::Shape1::kN
)
);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
int lane_idx = threadIdx.x % 32;
// Construct iterators to accumulator scale/bias vector
typename B2bMma::IteratorAccumulatorScaleBias iterator_Scale0(
params.ptr_Scale0,
{1, params.problem_size_0.K},
thread_idx,
warp_idx,
MatrixCoord(
0, threadblock_tile_idx.n() * B2bMma::Shape0::kN
)
);
typename B2bMma::IteratorAccumulatorScaleBias iterator_Bias0(
params.ptr_Bias0,
{1, params.problem_size_0.K},
thread_idx,
warp_idx,
MatrixCoord(
0, threadblock_tile_idx.n() * B2bMma::Shape0::kN
)
);
//
// Main loop
//
EpilogueOutputOp0 output_op_0(params.output_op_0);
// Construct thread-scoped matrix multiply
B2bMma b2bMma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename B2bMma::FragmentC0 src_accum;
typename B2bMma::FragmentC1 accumulators;
src_accum.clear();
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
b2bMma(params.gemm_k_iterations_0, accumulators, iterator_A0, iterator_B0,
iterator_Scale0, iterator_Bias0, iterator_B1, src_accum, output_op_0);
//
// Epilogue
//
EpilogueOutputOp1 output_op_1(params.output_op_1);
// Construct the semaphore.
int block_idx = threadblock_tile_idx.m() + threadblock_tile_idx.n() * params.grid_tiled_shape.m();
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
// Compute logical position within grid
threadblock_tile_idx =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// If performing a reduction via split-K, fetch the initial synchronization
if (params.split_k_mode == SplitKMode::kSerial && params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op_1.set_k_partition(threadblock_tile_idx.k(), params.grid_tiled_shape.k());
}
MatrixCoord threadblock_offset(
threadblock_tile_idx.m() * B2bMma::Shape1::kM,
threadblock_tile_idx.n() * B2bMma::Shape1::kN
);
// Tile iterator writing to destination tensor
typename Epilogue::OutputTileIterator iterator_D1(
params.iterator_D1,
params.ptr_D1,
ConvOutputIteratorParameter::extent(params.problem_size_1),
thread_idx,
threadblock_offset
);
// Tile iterator reading from source accumulator tensor
typename Epilogue::OutputTileIterator iterator_C1(
params.iterator_C1,
params.ptr_C1,
ConvOutputIteratorParameter::extent(params.problem_size_1),
thread_idx,
threadblock_offset
);
// Construct the epilogue
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Wait on the semaphore - this latency may have been covered by iterator construction
if (params.split_k_mode == SplitKMode::kSerial && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_idx.k()) {
iterator_C1 = iterator_D1;
}
semaphore.wait(threadblock_tile_idx.k());
__threadfence();
}
// Each split-k-slice writes to a unique tensor location
else if (params.split_k_mode == SplitKMode::kParallel) {
iterator_D1.add_pointer_offset(threadblock_tile_idx.k() *
cutlass::conv::implicit_gemm_tensor_c_size(ConvOperator, params.problem_size_1));
}
// Run efficient epilogue
epilogue(output_op_1, iterator_D1, accumulators, iterator_C1);
//
// Release the semaphore
//
if (params.split_k_mode == SplitKMode::kSerial && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_idx.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_idx.k() + 1;
}
semaphore.release(lock);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
|
cutlass/examples/13_two_tensor_op_fusion/kernel/b2b_implicit_gemm_convolution.h/0
|
{
"file_path": "cutlass/examples/13_two_tensor_op_fusion/kernel/b2b_implicit_gemm_convolution.h",
"repo_id": "cutlass",
"token_count": 7161
}
| 9 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a double-buffered threadblock-scoped GEMM kernel.
*/
#pragma once
#include "cutlass/aligned_buffer.h"
#include "cutlass/arch/memory.h"
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
#include "threadblock/b2b_mma_base.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape0_,
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape1_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy0_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy1_,
/// Shared Memory Accumulator Iterator
typename SmemAccumulatorIterator0_,
/// Number of stages,
int Stages,
/// Used for partial specialization
typename Enable = bool>
class B2bMmaBaseSmemAccumulator :
public B2bMmaBase<Shape0_, Shape1_, Policy0_, Policy1_, Stages> {
public:
///< Base class
using Base = B2bMmaBase<Shape0_, Shape1_, Policy0_, Policy1_, Stages>;
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape0 = Shape0_;
using Shape1 = Shape1_;
///< Policy describing tuning details
using Policy0 = Policy0_;
using Policy1 = Policy1_;
using SmemAccumulatorIterator0 = SmemAccumulatorIterator0_;
//
// Nested structs
//
/// Shared storage object needed by accumulator
template<
typename Shape_,
typename Element_,
typename Layout_,
typename Padding_
>
class AccumulatorSharedStorage {
public:
//
// Type definitions
//
using Shape = Shape_;
using Element = Element_;
using Layout = Layout_;
using Padding = Padding_;
/// Tensor reference to the accumulator
using TensorRefAccum = TensorRef<Element, Layout>;
/// Shape of the accumulator matrix in shared memory
using ShapeAccum = MatrixShape<Shape::kM + Padding::kRow,
Shape::kN + Padding::kColumn>;
public:
//
// Data members
//
/// Buffer for accumulator
AlignedBuffer<Element, ShapeAccum::kCount> accum;
public:
//
// Methods
//
/// Returns a layout object for the Accum matrix
CUTLASS_DEVICE
static Layout LayoutAccum() {
return Layout::packed({ShapeAccum::kRow, ShapeAccum::kColumn});
}
/// Returns a TensorRef to the Accumulator
CUTLASS_HOST_DEVICE
TensorRefAccum accum_ref() {
return TensorRefAccum{accum.data(), LayoutAccum()};
}
};
using AccumulatorSharedStorage0 = AccumulatorSharedStorage<
Shape0, typename SmemAccumulatorIterator0::Element,
typename SmemAccumulatorIterator0::TensorLayout,
typename SmemAccumulatorIterator0::Padding>;
struct B2bMmaSharedStorage {
typename Base::B2bMmaSharedStorage b2b_mma_shared_storage;
AccumulatorSharedStorage0 accumulator_shared_storage0;
};
public:
/// Construct from tensor references
CUTLASS_DEVICE
B2bMmaBaseSmemAccumulator(
///< Shared storage needed for internal use by threadblock-scoped GEMM
B2bMmaSharedStorage &shared_storage,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx
):
Base(shared_storage.b2b_mma_shared_storage, thread_idx, warp_idx, lane_idx) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
|
cutlass/examples/13_two_tensor_op_fusion/threadblock/b2b_mma_base_smem_accumulator.h/0
|
{
"file_path": "cutlass/examples/13_two_tensor_op_fusion/threadblock/b2b_mma_base_smem_accumulator.h",
"repo_id": "cutlass",
"token_count": 1950
}
| 10 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
The convolution version of 12_gemm_bias_relu. Similarly, we put bias vector in Operand C and the
rest is the same as normal convolution.
*/
#include <iostream>
#include <sstream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/conv/kernel/default_conv2d_fprop.h"
#include "cutlass/conv/device/implicit_gemm_convolution.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/host_reorder.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/device/convolution.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
// The code section below describes datatype for input, output tensors and computation between
// elements
using ElementAccumulator = float; // Data type of accumulator
using ElementComputeEpilogue = ElementAccumulator; // Data type of epilogue computation
using ElementInputA = cutlass::half_t; // Data type of elements in input tensor
using ElementInputB = cutlass::half_t; // Data type of elements in input tensor
using ElementOutput = float; // Data type of elements in output tensor
using LayoutInputA = cutlass::layout::TensorNHWC;
using LayoutInputB = cutlass::layout::TensorNHWC;
using LayoutOutput = cutlass::layout::TensorNHWC;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm80;
// This code section describes the tile size a thread block will compute
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; // Threadblock tile shape
// This code section describes tile size a warp will compute
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; // Warp tile shape
// This code section describes the size of MMA op
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; // TensorCore instruction shape
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>;
// Number of pipelines you want to use
constexpr int NumStages = 4;
// This code section describe iterator algorithm selected is Analytic or Optimized
static cutlass::conv::IteratorAlgorithm const IteratorAlgorithm = cutlass::conv::IteratorAlgorithm::kOptimized;
// This code section describes the epilogue part of the kernel, we use default value
using EpilogueOp = cutlass::epilogue::thread::LinearCombinationRelu<
ElementOutput, // Data type of output matrix.
128 / cutlass::sizeof_bits<ElementOutput>::value, // The number of elements per vectorized.
// memory access. This becomes the vector width of
// math instructions in the epilogue too.
ElementAccumulator, // Data type of accumulator
ElementComputeEpilogue, // Data type for alpha in linear combination
cutlass::epilogue::thread::ScaleType::NoBetaScaling>; // alpha X C + per channel bias
using Conv2dFpropKernel = typename cutlass::conv::kernel::DefaultConv2dFprop<
ElementInputA, LayoutInputA,
ElementInputB, LayoutInputB,
ElementOutput, LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
cutlass::arch::OpMultiplyAdd,
IteratorAlgorithm
>::Kernel;
using ImplicitGemm = cutlass::conv::device::ImplicitGemmConvolution<Conv2dFpropKernel>;
/////////////////////////////////////////////////////////////////////////////////////////////////
int run() {
// Construct Conv2dProblemSize with user defined output size
cutlass::conv::Conv2dProblemSize problem_size(
{1, 7, 7, 512}, // activation
{512, 3, 3, 512}, // filter
{1, 1, 1, 1}, // padding
{1, 1}, // striding
{1, 1}, // dilation
cutlass::conv::Mode::kCrossCorrelation, // mode (convolution or cross-correlation)
1 // split-k slices
);
// Initialize tensors using CUTLASS helper functions
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(problem_size.activation_extent());
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(problem_size.filter_extent());
// Create tensor C with dimensions 1x1x1xk which is the bias vector
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c_bias({1, 1, 1, problem_size.K});
// Create tensor D used to store output from CUTLASS kernel
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(problem_size.output_extent());
// Create matrix D with dimensions M x N used to store output from reference
// kernel
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(problem_size.output_extent());
// Fill input and output matrices on host using CUTLASS helper functions
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(4),
ElementInputA(-4),
0); // <- Fill tensor A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputB(4),
ElementInputB(-4),
0); // <- Fill tensor B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c_bias.host_view(),
1,
ElementOutput(4),
ElementOutput(-4),
0); // <- Fill matrix C on host with uniform-distribution random data
cutlass::reference::host::TensorFill(
tensor_d.host_view()); // <- fill matrix D on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_c_bias.sync_device();
tensor_d.sync_device();
tensor_ref_d.sync_device();
// Initialize alpha for dot product computation
ElementComputeEpilogue alpha = ElementComputeEpilogue(1);
// Create a tuple of gemm kernel arguments. This is later passed as arguments to launch
// instantiated CUTLASS kernel
typename ImplicitGemm::Arguments arguments{
problem_size,
tensor_a.device_ref(), // <- reference to tensor A on device
tensor_b.device_ref(), // <- reference to tensor B on device
// tensor C is treated as the bias vector. We can enable the CONV
// to project away the N, H, W dimension by setting the stride to zero.
{tensor_c_bias.device_data(), LayoutOutput::Stride(0)},
tensor_d.device_ref(), // <- reference to tensor D on device
{alpha} };
// Instantiate CUTLASS kernel depending on templates
ImplicitGemm implicit_gemm_op;
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size = implicit_gemm_op.get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
// Check the problem size is supported or not
cutlass::Status status = implicit_gemm_op.can_implement(arguments);
CUTLASS_CHECK(status);
// Initialize CUTLASS kernel with arguments and workspace pointer
status = implicit_gemm_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(status);
// Launch initialized CUTLASS kernel
status = implicit_gemm_op();
CUTLASS_CHECK(status);
//
// Create instantiation for device reference conv kernel
//
// Launch device reference to compute strictly the product A * B
cutlass::reference::device::Conv2d<
ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementAccumulator,
cutlass::NumericConverter<ElementOutput, ElementComputeEpilogue>>
(
cutlass::conv::Operator::kFprop,
problem_size,
tensor_a.device_ref(),
tensor_b.device_ref(),
tensor_c_bias.device_ref(),
tensor_ref_d.device_ref(),
alpha, ElementComputeEpilogue(0)
);
// Wait for kernels to finish
cudaDeviceSynchronize();
// Copy output data from CUTLASS and reference kernel to host for comparison
tensor_d.sync_host();
tensor_ref_d.sync_host();
// Compute bias + relu in host code
for (int n = 0; n < problem_size.N; ++n) {
for (int p = 0; p < problem_size.P; ++p) {
for (int q = 0; q < problem_size.Q; ++q) {
for (int k = 0; k < problem_size.K; ++k) {
tensor_ref_d.at({n, p, q, k}) =
std::max(ElementOutput(0),
ElementOutput(tensor_ref_d.at({n, p, q, k}) +
tensor_c_bias.at({0, 0, 0, k})));
}
}
}
}
// Check if output from CUTLASS kernel and reference kernel are equal or not
std::cout << (cutlass::reference::host::TensorEquals(tensor_d.host_view(),
tensor_ref_d.host_view())
? "Passed"
: "Failed")
<< std::endl;
CUTLASS_CHECK(status);
return 0;
}
int main(int argc, char const **args) {
bool notSupported = false;
// Ampere Tensor Core operations exposed with mma.sync are first available in CUDA 11.0.
//
// CUTLASS must be compiled with CUDA 11 Toolkit to run Conv2dFprop examples.
if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0))) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
cudaDeviceProp props;
CUDA_CHECK(cudaGetDeviceProperties(&props, 0));
if (!(props.major >= 8)) {
std::cerr << "Ampere Tensor Ops must be run on a machine with compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
return 0;
}
return run();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
|
cutlass/examples/17_fprop_per_channel_bias/fprop_per_channel_bias.cu/0
|
{
"file_path": "cutlass/examples/17_fprop_per_channel_bias/fprop_per_channel_bias.cu",
"repo_id": "cutlass",
"token_count": 4569
}
| 11 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
This example shows how to fuse per channel scale+bias+relu of the activations
into the 3D fprop mainloop.
Compared with original 3D fprop kernel, this example has two more vectors, one for
the scale and one for the bias. The length of the vectors is the same as the
activation channel number. This kernel loads the vectors when the associated
activation channels are loaded in the mainloop. Between reading the
activations and scale/bias data from the shared memory and calling tensor core
instructions, scale+bias+relu is computed in the register file.
This example is customized for Ampere 16816 fp16 tensor core instruction.
Changing to different data types or different tensor core instruction require
source code changing. See
include/cutlass/conv/threadblock/implicit_gemm_fprop_fusion_multistage.h for more
technical details.
This example is modified based on 25_ampere_fprop_mainloop_fusion. The command
line is the same.
*/
#include <iostream>
#include <fstream>
#include <sstream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/conv/kernel/default_conv3d_fprop_fusion.h"
#include "cutlass/conv/device/implicit_gemm_convolution_fusion.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/device/convolution.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
// The code section below describes datatype for input, output tensors and computation between
// elements
using ElementAccumulator = float; // Data type of accumulator
using ElementComputeEpilogue = float; // Data type of epilogue computation (alpha, beta)
using ElementInputA = cutlass::half_t; // Data type of elements in input tensor
using ElementInputB = cutlass::half_t; // Data type of elements in input tensor
using ElementInputScaleBias = cutlass::half_t; // Data type of elements in input sclae and bias vectors
using ElementOutput = float; // Data type of elements in output tensor
using LayoutInputA = cutlass::layout::TensorNDHWC;
using LayoutInputB = cutlass::layout::TensorNDHWC;
using LayoutInputScaleBias = cutlass::layout::RowMajor;
using LayoutOutput = cutlass::layout::TensorNDHWC;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm80;
// This code section describes the tile size a thread block will compute
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; // Threadblock tile shape
// This code section describes tile size a warp will compute
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; // Warp tile shape
// This code section describes the size of MMA op
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; // TensorCore instruction shape
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>;
// Number of pipelines you want to use
constexpr int NumStages = 4;
// This code section describe iterator algorithm selected is Analytic or Optimized
static cutlass::conv::IteratorAlgorithm const IteratorAlgorithm = cutlass::conv::IteratorAlgorithm::kOptimized;
// This code section describes the epilogue part of the kernel, we use default value
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // Data type of output matrix.
128 / cutlass::sizeof_bits<ElementOutput>::value, // The number of elements per vectorized.
// memory access. This becomes the vector width of
// math instructions in the epilogue too.
ElementAccumulator, // Data type of accumulator
ElementComputeEpilogue>; // Data type for alpha/beta in linear combination
using Conv3dFpropFusionKernel = typename cutlass::conv::kernel::DefaultConv3dFpropFusion<
ElementInputA, LayoutInputA,
ElementInputB, LayoutInputB,
ElementInputScaleBias, LayoutInputScaleBias,
ElementOutput, LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
cutlass::arch::OpMultiplyAdd,
IteratorAlgorithm
>::Kernel;
using ImplicitGemmFusion = cutlass::conv::device::ImplicitGemmConvolutionFusion<Conv3dFpropFusionKernel>;
/////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
cutlass::Tensor5DCoord input_size;
cutlass::Tensor5DCoord filter_size;
cutlass::Coord<3> padding;
cutlass::Coord<3> conv_stride;
cutlass::Coord<3> dilation;
bool reference_check;
bool measure_performance;
int iterations;
bool save_workspace;
ElementComputeEpilogue alpha;
ElementComputeEpilogue beta;
bool benchmark;
std::string tag;
Options():
help(false),
input_size(1, 32, 32, 32, 32),
filter_size(32, 3, 3, 3, 32),
padding(cutlass::make_Coord(1, 1, 1)),
conv_stride(cutlass::make_Coord(1, 1, 1)),
dilation(cutlass::make_Coord(1, 1, 1)),
reference_check(true),
measure_performance(false),
iterations(20),
save_workspace(false),
alpha(1),
beta(0),
benchmark(false) { }
// Verify the problem size is compatible with the CUTLASS Convolution implementation.
bool valid() {
//
// CUTLASS attempts to load 128b vectors of cutlass::half_t (F16) elements. Consequently,
// all pointers, strides, and tensor extents must be divisible by 8 elements.
//
int const kAlignment = 8;
if ((input_size.c() % kAlignment) ||
(filter_size.n() % kAlignment)) {
// misaligned tensors
return false;
}
// Invalid padding
if ((padding[0] != filter_size.d() / 2) ||
(padding[1] != filter_size.h() / 2) ||
(padding[2] != filter_size.w() / 2)) {
return false;
}
return true;
}
/// Updates input and filter sizes
void update(
cutlass::Tensor5DCoord input_size,
cutlass::Tensor5DCoord filter_size,
cutlass::Coord<3> stride) {
this->input_size = input_size;
this->filter_size = filter_size;
conv_stride = stride;
padding[0] = filter_size.d() / 2;
padding[1] = filter_size.h() / 2;
padding[2] = filter_size.w() / 2;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
if (cmd.check_cmd_line_flag("ref-check")) {
reference_check = true;
}
if (cmd.check_cmd_line_flag("perf-check")) {
measure_performance = true;
}
if (cmd.check_cmd_line_flag("save-workspace")) {
save_workspace = true;
}
if (cmd.check_cmd_line_flag("benchmark")) {
benchmark = true;
}
cmd.get_cmd_line_argument("n", input_size.n());
cmd.get_cmd_line_argument("d", input_size.d());
cmd.get_cmd_line_argument("h", input_size.h());
cmd.get_cmd_line_argument("w", input_size.w());
cmd.get_cmd_line_argument("c", input_size.c());
cmd.get_cmd_line_argument("k", filter_size.n());
cmd.get_cmd_line_argument("t", filter_size.d());
cmd.get_cmd_line_argument("r", filter_size.h());
cmd.get_cmd_line_argument("s", filter_size.w());
filter_size.c() = input_size.c();
cmd.get_cmd_line_argument("alpha", alpha);
cmd.get_cmd_line_argument("beta", beta);
cmd.get_cmd_line_argument("iterations", iterations);
cmd.get_cmd_line_argument("tag", tag);
if (filter_size.d() == 3 && filter_size.h() == 3 && filter_size.w() == 3) {
padding = cutlass::make_Coord(1, 1, 1);
}
else {
filter_size.d() = 1;
filter_size.h() = 1;
filter_size.w() = 1;
padding = cutlass::make_Coord(0, 0, 0);
}
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "25_ampere_3d_fprop_mainloop_fusion example\n\n"
<< " This example fuses scale+bias+relu of the activations into Ampere's\n"
<< " Tensor Core operators on F16 data types to compute\n"
<< " forward convolution on tensors of layout NDHWC.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --n <int> Input tensor extent N\n"
<< " --d <int> Input tensor extent D\n"
<< " --h <int> Input tensor extent H\n"
<< " --w <int> Input tensor extent W\n"
<< " --c <int> Input tensor extent C\n"
<< " --k <int> Filter extent K\n"
<< " --t <int> Filter extent T\n"
<< " --r <int> Filter extent R\n"
<< " --s <int> Filter extent S\n\n"
<< " --alpha <float> Epilogue scalar alpha\n"
<< " --beta <float> Epilogue scalar beta\n\n"
<< " --ref-check If set (true), reference check on the host is computed\n"
<< " --perf-check If set (true), performance is measured.\n"
<< " --benchmark If set (true), performance benchmarking on several layers and batch-size.\n"
<< " --iterations <int> Number of profiling iterations to perform.\n"
<< " --save-workspace If set, workspace is written to a text file.\n"
<< " --tag <string> String to replicate across the first column in the results table\n";
out << "\n\nExamples:\n\n"
<< "$ ./25_ampere_3d_fprop_mainloop_fusion --n=32 --d=96 --h=96 --w=96 --c=64 --k=64 --t=1 --r=1 --s=1\n\n"
<< "$ ./25_ampere_3d_fprop_mainloop_fusion --n=1 --d=224 --h=224 --w=224 --c=32 --k=32 --t=3 --r=3 --s=3 --ref-check\n\n"
<< "$ ./25_ampere_3d_fprop_mainloop_fusion --n=19 --d=94 --h=96 --w=96 --c=128 --k=128 --t=1 --r=1 --s=1\n\n";
return out;
}
/// Computes the output tensor size (NPQK)
cutlass::Tensor5DCoord output_size() const {
return cutlass::Tensor5DCoord(
input_size.n(),
(input_size.d() + padding[0] + padding[0] - filter_size.d()) / conv_stride[0] + 1,
(input_size.h() + padding[1] + padding[1] - filter_size.h()) / conv_stride[1] + 1,
(input_size.w() + padding[2] + padding[2] - filter_size.w()) / conv_stride[2] + 1,
filter_size.n());
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of multiply-adds = NPQK * CRS
int64_t fmas = output_size().product() * int64_t(filter_size.d() * filter_size.h() * filter_size.w() * filter_size.c());
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
cutlass::Status reference_check;
cudaError_t error;
Result():
runtime_ms(0),
gflops(0),
status(cutlass::Status::kSuccess),
reference_check(cutlass::Status::kInvalid),
error(cudaSuccess) { }
static std::ostream & print_header(std::ostream &out, Options const &options) {
if (!options.tag.empty()) {
out << "Name,";
}
out << "Layer,N,D,H,W,C,K,T,R,S,Stride_D,Stride_H,Stride_W,Runtime,GFLOPs";
return out;
}
std::ostream & print(std::ostream &out, int idx, Options const &options) {
if (!options.tag.empty()) {
out << options.tag << ",";
}
out
<< "conv_" << idx << ","
<< options.input_size.n() << ","
<< options.input_size.d() << ","
<< options.input_size.h() << ","
<< options.input_size.w() << ","
<< options.input_size.c() << ","
<< options.filter_size.n() << ","
<< options.filter_size.d() << ","
<< options.filter_size.h() << ","
<< options.filter_size.w() << ","
<< options.conv_stride[0] << ","
<< options.conv_stride[1] << ","
<< options.conv_stride[2] << ","
<< runtime_ms << ","
<< gflops;
return out;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Runs one benchmark
Result profile_convolution(Options const &options) {
Result result;
//
// Allocate host-device tensors using the CUTLASS Utilities.
//
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(options.input_size);
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_transformed_a(options.input_size);
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(options.filter_size);
cutlass::HostTensor<ElementInputScaleBias, LayoutInputScaleBias>
tensor_a_scale({1, options.input_size.c()});
cutlass::HostTensor<ElementInputScaleBias, LayoutInputScaleBias>
tensor_a_bias({1, options.input_size.c()});
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(options.output_size());
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(options.output_size());
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(options.output_size());
//
// Initialize tensors
//
// Fill tensor A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(3),
ElementInputA(-4),
0);
// Fill scale vector for tensor A on host with uniform-distribution random
// data
cutlass::reference::host::TensorFillRandomUniform(
tensor_a_scale.host_view(),
1,
ElementInputA(3),
ElementInputA(-4),
0);
// Fill bias vector for tensor A on host with uniform-distribution random
// data
cutlass::reference::host::TensorFillRandomUniform(
tensor_a_bias.host_view(),
1,
ElementInputA(3),
ElementInputA(-4),
0);
// Fill tensor B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputB(7),
ElementInputB(-8),
0);
// Fill tensor C on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c.host_view(),
1,
ElementOutput(7),
ElementOutput(-8),
0);
// Fill tensor D for reference on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_d.host_view());
// Copy data from host to GPU
tensor_a.sync_device();
tensor_a_scale.sync_device();
tensor_a_bias.sync_device();
tensor_b.sync_device();
tensor_c.sync_device();
tensor_d.sync_device();
tensor_ref_d.sync_device();
//
// Define arguments for CUTLASS Convolution
//
cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation;
// Split K dimension into 1 partitions
int split_k_slices = 1;
// Construct Conv3dProblemSize with user defined output size
cutlass::conv::Conv3dProblemSize problem_size(
options.input_size,
options.filter_size,
options.padding,
options.conv_stride,
options.dilation,
options.output_size(),
mode,
split_k_slices
);
typename ImplicitGemmFusion::Arguments arguments{
problem_size,
tensor_a.device_ref(),
tensor_b.device_ref(),
tensor_a_scale.device_ref(),
tensor_a_bias.device_ref(),
tensor_c.device_ref(),
tensor_d.device_ref(),
{options.alpha, options.beta},
};
//
// Initialize CUTLASS Convolution
//
ImplicitGemmFusion implicit_gemm_fusion_op;
size_t workspace_size = implicit_gemm_fusion_op.get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
result.status = implicit_gemm_fusion_op.can_implement(arguments);
CUTLASS_CHECK(result.status);
result.status = implicit_gemm_fusion_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(result.status);
//
// Launch initialized CUTLASS kernel
//
result.status = implicit_gemm_fusion_op();
CUTLASS_CHECK(result.status);
//
// Optional reference check
//
if (options.reference_check) {
std::cout << "Verification on device...\n";
// Compute scale + bias + relu in host code
for (int n = 0; n < options.input_size.n(); ++n) {
for (int d = 0; d < options.input_size.d(); ++d) {
for (int h = 0; h < options.input_size.h(); ++h) {
for (int w = 0; w < options.input_size.w(); ++w) {
for (int c = 0; c < options.input_size.c(); ++c) {
tensor_transformed_a.at({n, d, h, w, c}) = std::max(
ElementOutput(0), ElementOutput(tensor_a.at({n, d, h, w, c}) *
tensor_a_scale.at({0, c}) +
tensor_a_bias.at({0, c})));
}
}
}
}
}
tensor_transformed_a.sync_device();
// Compute with reference implementation
cutlass::reference::device::Conv3dFprop<
ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementAccumulator,
cutlass::NumericConverter<ElementOutput, ElementComputeEpilogue>
>(
problem_size,
tensor_transformed_a.device_ref(),
tensor_b.device_ref(),
tensor_c.device_ref(),
tensor_ref_d.device_ref(),
options.alpha,
options.beta
);
// Check if output from CUTLASS kernel and reference kernel are equal or not
tensor_d.sync_host();
tensor_ref_d.sync_host();
bool passed = cutlass::reference::host::TensorEquals(
tensor_d.host_view(),
tensor_ref_d.host_view());
if (!passed) {
result.reference_check = cutlass::Status::kErrorInternal;
std::cout << "ERROR - results miscompared.\n";
}
else {
result.reference_check = cutlass::Status::kSuccess;
std::cout << "Passed.\n";
}
}
else {
result.reference_check = cutlass::Status::kInvalid;
}
if (options.save_workspace) {
std::stringstream ss;
ss << "25_ampere_3d_fprop_mainloop_fusion"
<< options.input_size.n() << "x" << options.input_size.h() << "x" << options.input_size.w() << "x" << options.input_size.c()
<< "_"
<< options.filter_size.n() << "x" << options.filter_size.h() << "x" << options.filter_size.w() << "x" << options.filter_size.c()
<< ".dat";
std::ofstream output_workspace(ss.str());
output_workspace
<< "Input = \n" << tensor_a.host_view() << "\n\n"
<< "Filters = \n" << tensor_b.host_view() << "\n\n";
if (options.reference_check) {
output_workspace << "Reference = \n" << tensor_ref_d.host_view() << "\n\n";
}
output_workspace << "Computed = \n" << tensor_d.host_view() << std::endl;
std::cout << "Results written to '" << ss.str() << "'." << std::endl;
}
//
// Performance measurement
//
if (options.measure_performance) {
cudaEvent_t events[2];
for (auto & event : events) {
result.error = cudaEventCreate(&event);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
}
// Record an event at the start of a series of convolution operations.
result.error = cudaEventRecord(events[0]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Launch a sequence of implicit GEMM operations on the device
for (int iteration = 0; iteration < options.iterations; ++iteration) {
result.status = implicit_gemm_fusion_op();
CUTLASS_CHECK(result.status);
}
// Record an event when the convolutions have been launched.
result.error = cudaEventRecord(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Wait for work on the device to complete.
result.error = cudaEventSynchronize(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Print average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(options.iterations);
result.gflops = options.gflops(result.runtime_ms / 1000.0);
// Cleanup
for (auto event : events) {
(void)cudaEventDestroy(event);
}
}
return result;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char const **args) {
bool notSupported = false;
// Ampere Tensor Core operations exposed with mma.sync are first available in CUDA 11.0.
//
// CUTLASS must be compiled with CUDA 11 Toolkit to run Conv3dFprop examples.
if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0))) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
cudaDeviceProp props;
CUDA_CHECK(cudaGetDeviceProperties(&props, 0));
if (!(props.major >= 8)) {
std::cerr << "This test must run on SM80 or above.\n";
notSupported = true;
}
if (notSupported) {
return 0;
}
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
if (options.benchmark) {
// Benchmark several layers
int batch_sizes[] = {34, 18};
struct Benchmark {
int d, h, w, c, k, t, r, s, stride_d, stride_h, stride_w;
} layers[] = {
{56, 56, 56, 64, 256, 1, 1, 1, 1, 1, 1},
{56, 56, 56, 64, 64, 1, 1, 1, 1, 1, 1},
{56, 56, 56, 64, 64, 3, 3, 3, 1, 1, 1},
{56, 56, 56, 256, 64, 1, 1, 1, 1, 1, 1},
{56, 56, 56, 256, 512, 1, 1, 1, 2, 2, 2},
{56, 56, 56, 256, 128, 1, 1, 1, 1, 1, 1},
{56, 56, 56, 128, 128, 3, 3, 3, 2, 2, 2},
{28, 28, 28, 128, 512, 1, 1, 1, 1, 1, 1},
{28, 28, 28, 512, 128, 1, 1, 1, 1, 1, 1},
{28, 28, 28, 128, 128, 3, 3, 3, 1, 1, 1},
{28, 28, 28, 512, 1024, 1, 1, 1, 2, 2, 2},
{28, 28, 28, 512, 256, 1, 1, 1, 1, 1, 1},
{28, 28, 28, 256, 256, 3, 3, 3, 2, 2, 2},
{14, 14, 14, 256, 1024, 1, 1, 1, 1, 1, 1},
{14, 14, 14, 1024, 256, 1, 1, 1, 1, 1, 1},
{14, 14, 14, 256, 256, 3, 3, 3, 1, 1, 1},
{14, 14, 14, 1024, 2048, 1, 1, 1, 2, 2, 2},
{14, 14, 14, 1024, 512, 1, 1, 1, 1, 1, 1},
{14, 14, 14, 512, 512, 3, 3, 3, 2, 2, 2},
{ 7, 7, 7, 512, 2048, 1, 1, 1, 1, 1, 1},
{ 7, 7, 7, 2048, 512, 1, 1, 1, 1, 1, 1},
{ 7, 7, 7, 512, 512, 3, 3, 3, 1, 1, 1},
};
Result::print_header(std::cout, options) << std::endl;
int idx = 1;
for (auto const &layer : layers) {
for (auto N : batch_sizes) {
options.update({N, layer.d, layer.h, layer.w, layer.c},
{layer.k, layer.t, layer.r, layer.s, layer.c},
cutlass::make_Coord(layer.stride_d, layer.stride_h, layer.stride_w));
Result result = profile_convolution(options);
result.print(std::cout, idx, options) << std::endl;
}
++idx;
}
}
else {
// Execute one problem size
if (!options.valid()) {
std::cerr << "Invalid problem." << std::endl;
return -1;
}
Result result = profile_convolution(options);
Result::print_header(std::cout, options) << std::endl;
result.print(std::cout, 1, options) << std::endl;
}
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
|
cutlass/examples/25_ampere_fprop_mainloop_fusion/ampere_3d_fprop_mainloop_fusion.cu/0
|
{
"file_path": "cutlass/examples/25_ampere_fprop_mainloop_fusion/ampere_3d_fprop_mainloop_fusion.cu",
"repo_id": "cutlass",
"token_count": 10476
}
| 12 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/arch/mma.h"
////////////////////////////////////////////////////////////////////////////////
// Some helper functions
////////////////////////////////////////////////////////////////////////////////
#define DISPATCH_TYPES(tensor, func) \
{ \
if (query.scalar_type() == at::ScalarType::Float) { \
using scalar_t = float; \
func(); \
} else if (query.scalar_type() == at::ScalarType::Half) { \
using scalar_t = cutlass::half_t; \
func(); \
} else if (query.scalar_type() == at::ScalarType::BFloat16) { \
using scalar_t = cutlass::bfloat16_t; \
func(); \
} else { \
XFORMERS_CHECK(false, "Only fp32, half & bf16 supported at the moment"); \
} \
}
#define DISPATCH_BOOL(BOOL_V, BOOL_NAME, F) \
{ \
if (BOOL_V) { \
constexpr bool BOOL_NAME = true; \
F(); \
} else { \
constexpr bool BOOL_NAME = false; \
F(); \
} \
}
#define DISPATCH_ARCHTAG(CC, func) \
{ \
if (CC >= 80) { \
using ArchTag = cutlass::arch::Sm80; \
func(); \
} else if (CC >= 75) { \
using ArchTag = cutlass::arch::Sm75; \
func(); \
} else if (CC >= 70) { \
using ArchTag = cutlass::arch::Sm70; \
func(); \
} else if (CC >= 50) { \
using ArchTag = cutlass::arch::Sm50; \
func(); \
} else { \
XFORMERS_CHECK( \
false, \
"Your device is too old. We require compute capability >= 50"); \
} \
}
#define CHECK_NOSPARSE_CONTIGUOUS_CUDA(TENSOR) \
XFORMERS_CHECK(TENSOR.is_cuda(), #TENSOR " must be a CUDA tensor"); \
XFORMERS_CHECK(!TENSOR.is_sparse(), #TENSOR " must be a dense tensor"); \
XFORMERS_CHECK(TENSOR.is_contiguous());
#define CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(TENSOR) \
XFORMERS_CHECK(TENSOR.is_cuda(), #TENSOR " must be a CUDA tensor"); \
XFORMERS_CHECK(!TENSOR.is_sparse(), #TENSOR " must be a dense tensor"); \
XFORMERS_CHECK( \
TENSOR.stride(-1) == 1, #TENSOR ": last dimension must be contiguous");
#ifdef TORCH_CHECK
#define CHECK_ALIGNED_PTR(PTR, ALIGNMENT) \
XFORMERS_CHECK( \
uint64_t(PTR) % ALIGNMENT == 0, #PTR " is not correctly aligned")
#define XFORMERS_CHECK TORCH_CHECK
#elif defined(__CUDACC_RTC__)
#define CHECK_ALIGNED_PTR(PTR, ALIGNMENT) \
if (!(uint64_t(PTR) % ALIGNMENT == 0)) { \
return false; \
}
#define XFORMERS_CHECK(COND, ERR) \
if (!(COND)) { \
return false; \
}
#else
#include <iostream>
#define CHECK_ALIGNED_PTR(PTR, ALIGNMENT) \
if (!(uint64_t(PTR) % ALIGNMENT == 0)) { \
std::cerr << #PTR " is not correctly aligned\n"; \
return false; \
}
#define XFORMERS_CHECK(COND, ERR) \
if (!(COND)) { \
std::cerr << "'" #COND "' failed: " << ERR << "\n"; \
return false; \
}
#endif
#define ASSIGN_CHECK_OVERFLOW(A, B) \
{ \
A = B; \
XFORMERS_CHECK( \
B < std::numeric_limits<decltype(A)>::max(), #B " overflows"); \
}
namespace gemm_kernel_utils {
template <typename integer>
constexpr CUTLASS_HOST_DEVICE integer ceil_div(integer n, integer m) {
return (n + m - 1) / m;
}
template <typename integer>
constexpr CUTLASS_HOST_DEVICE integer align_up(integer n, integer m) {
return ((n + m - 1) / m) * m;
}
////////////////////////////////////////////////////////////////////////////////
// Determine the type of GEMM we do (TensorCores or not, Shapes ...)
// TODO: Maybe we could rely on Cutlass's DefaultGemm templates
////////////////////////////////////////////////////////////////////////////////
// Fallback to Simt (FMA on cuda cores) if not in a special case below
template <typename ArchTag, typename scalar_t_, typename Enable = void>
struct DefaultGemmType {
static constexpr int ThreadK = 8;
static constexpr int WarpK = 8;
static constexpr int kMinimumAlignment = 1;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using OpClass = cutlass::arch::OpClassSimt;
using Operator = cutlass::arch::OpMultiplyAdd;
};
// Specialization for tensorcores with f32
template <typename ArchTag>
struct DefaultGemmType<
ArchTag,
float,
typename cutlass::platform::enable_if<
ArchTag::kMinComputeCapability >= 80>::type> {
static constexpr int ThreadK = 32;
static constexpr int WarpK = 32;
static constexpr int kMinimumAlignment = 4;
using OpClass = cutlass::arch::OpClassTensorOp;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Operator = cutlass::arch::OpMultiplyAddFastF32;
};
// Specialization for tensorcores with f16/bf16 - Sm75+
template <typename ArchTag, typename scalar_t>
struct DefaultGemmType<
ArchTag,
scalar_t,
typename cutlass::platform::enable_if<
ArchTag::kMinComputeCapability >= 75 &&
cutlass::sizeof_bits<scalar_t>::value == 16>::type> {
static constexpr int ThreadK = 32;
static constexpr int WarpK = 32;
static constexpr int kMinimumAlignment = 4;
using OpClass = cutlass::arch::OpClassTensorOp;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Operator = cutlass::arch::OpMultiplyAdd;
};
// Specialization for tensorcores with f16 - Volta
template <>
struct DefaultGemmType<cutlass::arch::Sm70, cutlass::half_t, void> {
static constexpr int ThreadK = 32;
static constexpr int WarpK = 32;
static constexpr int kMinimumAlignment = 2;
using OpClass = cutlass::arch::OpClassTensorOp;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Operator = cutlass::arch::OpMultiplyAdd;
};
// Enables to do
// `auto x = kCondition ? fa(arg) : fb(arg)`
// when `fa` and `fb` have different types
template <bool kVal, typename TA, typename TB>
struct call_conditional;
template <typename TA, typename TB>
struct call_conditional<true, TA, TB> {
template <typename Arg>
static CUTLASS_HOST_DEVICE auto apply(TA ta, TB tb, Arg arg)
-> decltype(ta(arg)) {
return ta(arg);
}
};
template <typename TA, typename TB>
struct call_conditional<false, TA, TB> {
template <typename Arg>
static CUTLASS_HOST_DEVICE auto apply(TA ta, TB tb, Arg arg)
-> decltype(tb(arg)) {
return tb(arg);
}
};
////////////////////////////////////////////////////////////////////////////////
// Mark a variable as warp-uniform - enables some compiler optimizations
// The cheapest way to do it is just to broadcast it from lane 0
////////////////////////////////////////////////////////////////////////////////
template <typename T>
CUTLASS_DEVICE T warp_uniform(T value) {
struct {
union {
T value;
uint32_t asInt;
};
} p;
p.value = value;
p.asInt = __shfl_sync(0xffffffff, (unsigned)p.asInt, 0);
return p.value;
}
template <typename T>
CUTLASS_DEVICE T* warp_uniform(T* ptr) {
struct {
union {
T* ptr;
uint32_t asInt[2];
};
} p;
p.ptr = ptr;
p.asInt[0] = warp_uniform(p.asInt[0]);
p.asInt[1] = warp_uniform(p.asInt[1]);
return p.ptr;
}
} // namespace gemm_kernel_utils
|
cutlass/examples/41_fused_multi_head_attention/gemm_kernel_utils.h/0
|
{
"file_path": "cutlass/examples/41_fused_multi_head_attention/gemm_kernel_utils.h",
"repo_id": "cutlass",
"token_count": 5441
}
| 13 |
This example provides utilities for generating back-to-back (B2B) GEMMs using CUTLASS.
## Quick start
A configuration file containing the GEMMs to be fused together is located in [config.json](config.json). Edit
this to change the configuration that you would like to run.
```shell
cd ir_gen
# Set up basic variables
out_dir=directory_to_emit_files
cutlass_dir=$(pwd)/../../..
config_file=$(pwd)/../config.json
# Generate code for GEMMs described in `config_file`
./generate.sh $config_file $out_dir $cutlass_dir
# Build the generated code
cd $out_dir
mkdir build && cd build
cmake .. -DGPU_ARCHS="75;80"
make -j
# Run the generated code with M=1024 K0=32 and Batch=1
./sample 1024 32 1
```
## Current restrictions
This experimental example has the following restrictions:
1. N tile should not exceed 256, or register spilling will occur.
2. Only FP16 is supported currently
3. Matrix A must be row major, matrix B must be column major, matrices C and D must be row major.
## Copyright
Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
```
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
```
|
cutlass/examples/44_multi_gemm_ir_and_codegen/README.md/0
|
{
"file_path": "cutlass/examples/44_multi_gemm_ir_and_codegen/README.md",
"repo_id": "cutlass",
"token_count": 736
}
| 14 |
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import helper
import gen_ir as ir
class gen_turing_impl:
def __init__(self,fuse_gemm_info, gen_class_name, user_header_file, output_dir = "../"):
self.fuse_gemm_info = fuse_gemm_info
self.class_name = gen_class_name
self.gen_class_name = gen_class_name + "_turing_impl"
self.user_header_file = ""
for header in user_header_file:
self.user_header_file += "#include \"" + header + "\"\n"
self.output_dir = output_dir
self.b2b_num = len(fuse_gemm_info)
self.gen_turing_unfused = gen_volta_turing_fuse_act_impl(fuse_gemm_info, gen_class_name, user_header_file, output_dir)
def gen_using(self):
code_using = "using b2b_gemm = typename cutlass::gemm::device::" + self.class_name + "<cutlass::half_t>;"
return code_using + "\n"
def gen_initialize(self):
code = ""
for i in range(self.b2b_num):
code_this = ""
code_this += helper.var_idx(helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + " alpha", i) + " = " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + "(1);\n"
beta = "(1)"
if helper.get_epilogue_add_bias_or_not(self.fuse_gemm_info[i]) is False:
beta = "(0)"
code_this += helper.var_idx(helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + " beta", i) + " = " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + beta + ";\n"
k_str = str(self.fuse_gemm_info[i]['mnk'][2])
if i == 0:
k_str = "K0"
code_this += helper.var_idx("cutlass::gemm::GemmCoord problem_size_", i) + "(M, " + str(self.fuse_gemm_info[i]['mnk'][1]) + ", " + k_str + ");\n"
code += code_this
code += "typename b2b_gemm::Arguments arguments{\n"
for i in range(self.b2b_num):
code += " " + helper.var_idx("problem_size_", i) + ",\n"
code += " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['A_tp']) + "*>(" + helper.var_idx("A", 0) + "), " + helper.var_idx("problem_size_", 0) + ".k()},\n"
for i in range(self.b2b_num):
ldmB = str(self.fuse_gemm_info[i]['mnk'][2])
if i == 0:
ldmB = "K0"
if self.fuse_gemm_info[i]['B_format'] is 'Row':
ldmB = str(self.fuse_gemm_info[i]['mnk'][1])
ldmC = str(helper.get_epilogue_bias_ldm(self.fuse_gemm_info[i]))
code += " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['B_tp']) + "*>(" + helper.var_idx("B", i) + "), " + ldmB + "},\n"
code += " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['C_tp']) + "*>(" + helper.var_idx("C", i) + "), " + ldmC + "},\n"
code += " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['C_tp']) + "*>(" + helper.var_idx("D", self.b2b_num -1) + "), " + helper.var_idx("problem_size_", self.b2b_num - 1) + ".n()},\n"
for i in range(self.b2b_num):
code += " " + "{ " + helper.var_idx("alpha", i) + ", " + helper.var_idx("beta", i)
for epilogue_arg in helper.get_epilogue_args(self.fuse_gemm_info[i]):
arg_name = helper.var_idx("Epilogue", i) + "_" + epilogue_arg[1]
code += ", " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + "(" + str(arg_name) + ")"
code += "},\n"
code += " " + "Batch};\n\n"
code += " " "b2b_gemm gemm_op;\n"
code += " " + "gemm_op.initialize(arguments);\n"
return code + "\n"
def gen_run(self):
code = " " + "gemm_op(stream);\n"
return code
def gen_wrapper(self):
code_body = ""
arg_lists = []
arg_lists.append(["int", "M"])
arg_lists.append(["int", "K0"])
arg_lists.append(["int", "Batch"])
arg_lists.append(["void*", helper.var_idx("A", 0)])
for i in range(self.b2b_num):
arg_lists.append(["void*", helper.var_idx("B", i)])
arg_lists.append(["void*", helper.var_idx("C", i)])
arg_lists.append(["void*", helper.var_idx("D", i)])
epilogue_args = helper.get_epilogue_args(self.fuse_gemm_info[i])
acc_tp = helper.get_epilogue_compute_tp(self.fuse_gemm_info[i])
for arg in epilogue_args:
arg_tp = arg[0]
arg_name = helper.var_idx("Epilogue", i) + "_" + arg[1]
arg_lists.append([arg_tp, arg_name])
if self.b2b_num == 1:
code_body += self.gen_turing_unfused.gen_using(False) #False -> Turing, True -> Volta
code_body += self.gen_turing_unfused.gen_initialize()
code_body += self.gen_turing_unfused.gen_run()
else:
code_body += self.gen_using()
code_body += self.gen_initialize()
code_body += self.gen_run()
code = ir.gen_func(self.gen_class_name, arg_lists, code_body)
return code
def gen_code(self):
code = self.gen_wrapper()
helper.write_2_headfile("turing_impl.h", self.output_dir, self.user_header_file + "\n" + code)
class gen_volta_turing_fuse_act_impl:
def __init__(self, fuse_gemm_info, gen_class_name, user_header_file, output_dir = "../"):
self.fuse_gemm_info = fuse_gemm_info
self.gen_class_name = gen_class_name + "_volta_impl"
self.user_header_file = ""
for header in user_header_file:
self.user_header_file += "#include \"" + header + "\"\n"
self.output_dir = output_dir
self.b2b_num = len(fuse_gemm_info)
def perf_tiling(self, layer_mnk):
mnk = layer_mnk[:]
block_tile = mnk[:]
block_tile[2] = 32 # force the K tile to be 32
# M tile gen
block_tile[0] = 32
# N tile gen
if mnk[1] > 128:
block_tile[1] = 256
elif mnk[1] > 64:
block_tile[1] = 128
elif mnk[1] > 32:
block_tile[1] = 64
else :
block_tile[1] = 32
warp_tile = block_tile[:]
if block_tile[1] == 256:
warp_tile[1] = 64
elif block_tile[1] == 128:
warp_tile[1] = 32
elif block_tile[1] == 64:
warp_tile[1] = 32
else :
warp_tile[1] = 32
warp_tile[0] = 32
return block_tile, warp_tile
def process_epilogue(self, epilogue_tp, n, C_tp, Acc_tp):
epilogue_setted_type = epilogue_tp
cutlass_epilogue_name = "LinearCombinationRelu"
if epilogue_setted_type.lower() == 'leakyrelu':
cutlass_epilogue_name = "LinearCombinationLeakyRelu"
elif epilogue_setted_type.lower() == 'identity':
cutlass_epilogue_name = "LinearCombination"
n_mod_8 = n % 4
N_align_elements = 1
if n_mod_8 == 0:
N_align_elements = 8
elif n_mod_8 == 4:
N_align_elements = 4
elif n_mod_8 == 2 or n_mod_8 == 6:
N_align_elements = 2
epilogue_str = "cutlass::epilogue::thread::" + cutlass_epilogue_name+ "<" + C_tp + ", " + str(N_align_elements) + ", " + Acc_tp + ", " + Acc_tp + ">"
return epilogue_str
def gen_using(self, volta = True):
code_using = ""
volta_arch = "cutlass::arch::Sm70"
volta_tc = "cutlass::gemm::GemmShape<8, 8, 4>"
turing_arch = "cutlass::arch::Sm75"
turing_tc = "cutlass::gemm::GemmShape<16, 8, 8>"
arch = ""
tc = ""
if volta:
arch = volta_arch
tc = volta_tc
else:
arch = turing_arch
tc = turing_tc
for i in range(self.b2b_num):
k = self.fuse_gemm_info[i]['mnk'][2]
k_mod_8 = k % 4
ab_ldm = 1
if k_mod_8 == 0:
ab_ldm = 8
elif k_mod_8 == 4:
ab_ldm = 4
elif k_mod_8 == 2 or k_mod_8 == 6:
ab_ldm = 2
block_tile, warp_tile = self.perf_tiling(self.fuse_gemm_info[i]['mnk'])
this_gemm_config = helper.var_idx("using Gemm", i) + " = cutlass::gemm::device::GemmBatched<\n"
this_gemm_config += " " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['A_tp']) + ",\n"
this_gemm_config += " " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['A_format']) + ",\n"
this_gemm_config += " " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['B_tp']) + ",\n"
this_gemm_config += " " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['B_format']) + ",\n"
this_gemm_config += " " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['C_tp']) + ",\n"
this_gemm_config += " " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['C_format']) + ",\n"
this_gemm_config += " " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + ",\n"
this_gemm_config += " " + "cutlass::arch::OpClassTensorOp,\n"
this_gemm_config += " " + arch + ",\n"
this_gemm_config += " " + "cutlass::gemm::GemmShape<" + str(block_tile[0]) + ", " + str(block_tile[1]) + ", " + str(block_tile[2]) + ">,\n"
this_gemm_config += " " + "cutlass::gemm::GemmShape<" + str(warp_tile[0]) + ", " + str(warp_tile[1]) + ", " + str(warp_tile[2]) + ">,\n"
this_gemm_config += " " + tc + ",\n"
this_gemm_config += " " + self.process_epilogue(helper.get_epilogue_tp(self.fuse_gemm_info[i]), self.fuse_gemm_info[i]['mnk'][1], helper.type_2_cutlass_type(self.fuse_gemm_info[i]['C_tp']), helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp'])) + ",\n"
this_gemm_config += " " + "cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle,\n"
this_gemm_config += " " + "2,\n"
this_gemm_config += " " + str(ab_ldm) + ",\n"
this_gemm_config += " " + str(ab_ldm) + ">;\n"
code_using += this_gemm_config + "\n"
return code_using + "\n"
def gen_initialize(self):
code = ""
for i in range(self.b2b_num):
code_this = ""
N_str = str(self.fuse_gemm_info[i]['mnk'][1])
code_this += helper.var_idx(helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + " alpha", i) + " = " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + "(1);\n"
beta = "(1)"
if helper.get_epilogue_add_bias_or_not( self.fuse_gemm_info[i]) is False:
beta = "(0)"
code_this += helper.var_idx(helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + " beta", i) + " = " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + beta + ";\n"
k_str = str(self.fuse_gemm_info[i]['mnk'][2])
if i == 0:
k_str = "K0"
code_this += helper.var_idx("cutlass::gemm::GemmCoord problem_size_", i) + "(M, " + str(self.fuse_gemm_info[i]['mnk'][1]) + ", " + k_str + ");\n"
code_this += helper.var_idx("typename Gemm", i) + helper.var_idx("::Arguments arguments_", i) + "{\n"
code_this += " " + helper.var_idx("problem_size_", i) + ",\n"
ldmA = k_str
ldmB = k_str
ldmC = str(self.fuse_gemm_info[i]['mnk'][1])
ldmBias = str(helper.get_epilogue_bias_ldm(self.fuse_gemm_info[i]))
if self.fuse_gemm_info[i]['A_format'] is 'Col':
ldmA = "M"
if self.fuse_gemm_info[i]['B_format'] is 'Row':
ldmB = str(self.fuse_gemm_info[i]['mnk'][1])
if self.fuse_gemm_info[i]['C_format'] is 'Col':
ldmC = "M"
if i == 0:
code_this += " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['A_tp']) + "*>(" + helper.var_idx("A", i) + "), " + ldmA + "}, " + "M * " + ldmA + ",\n"
else:
code_this += " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['A_tp']) + "*>(" + helper.var_idx("D", i - 1) + "), " + ldmA + "}, " + "M * " + ldmA + ",\n"
code_this += " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['B_tp']) + "*>(" + helper.var_idx("B", i) + "), " + ldmB + "}, " + N_str + " * " + ldmB + ",\n"
M_bias = str(helper.get_epilogue_bias_shape(self.fuse_gemm_info[i])[0])
code_this += " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['C_tp']) + "*>(" + helper.var_idx("C", i) + "), " + ldmBias + "}, " + M_bias + " * " + N_str + ",\n"
code_this += " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['C_tp']) + "*>(" + helper.var_idx("D", i) + "), " + ldmC + "}, " + "M * " + ldmC + ",\n"
code_this += " " + "{ " + helper.var_idx("alpha", i) + ", " + helper.var_idx("beta", i)
for epilogue_arg in helper.get_epilogue_args(self.fuse_gemm_info[i]):
arg_name = helper.var_idx("Epilogue", i) + "_" + epilogue_arg[1]
code_this += ", " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + "(" + str(arg_name) + ")"
code_this += " },\n"
code_this += " " + "Batch};\n"
code_this += " " + helper.var_idx("Gemm", i) + helper.var_idx(" gemm_op_", i) + ";\n"
code_this += " " + helper.var_idx("gemm_op_", i) + helper.var_idx(".initialize(arguments_", i) + ", nullptr);\n"
code += code_this + "\n"
return code + "\n"
def gen_run(self):
code = ""
for i in range(self.b2b_num):
code_this = ""
code_this += " " + helper.var_idx("gemm_op_", i) + "(stream);\n"
code += code_this
return code
def gen_wrapper(self):
code_body = ""
arg_lists = []
arg_lists.append(["int", "M"])
arg_lists.append(["int", "K0"])
arg_lists.append(["int", "Batch"])
arg_lists.append(["void*", helper.var_idx("A", 0)])
for i in range(self.b2b_num):
arg_lists.append(["void*", helper.var_idx("B", i)])
arg_lists.append(["void*", helper.var_idx("C", i)])
arg_lists.append(["void*", helper.var_idx("D", i)])
epilogue_args = helper.get_epilogue_args(self.fuse_gemm_info[i])
acc_tp = helper.get_epilogue_compute_tp(self.fuse_gemm_info[i])
for arg in epilogue_args:
arg_tp = arg[0]
arg_name = helper.var_idx("Epilogue", i) + "_" + arg[1]
arg_lists.append([arg_tp, arg_name])
code_body += self.gen_using()
code_body += self.gen_initialize()
code_body += self.gen_run()
code = ir.gen_func(self.gen_class_name, arg_lists, code_body)
return code
def gen_code(self):
code = self.gen_wrapper()
helper.write_2_headfile("volta_impl.h", self.output_dir, self.user_header_file + "\n" + code)
class gen_one_API:
def __init__(self, fuse_gemm_info, gen_class_name, user_header_file, output_dir = "../"):
self.fuse_gemm_info = fuse_gemm_info
self.gen_class_name = gen_class_name
self.user_header_file = ""
for header in user_header_file:
self.user_header_file += "#include \"" + header + "\"\n"
self.output_dir = output_dir
self.b2b_num = len(fuse_gemm_info)
self.gen_volta = gen_volta_turing_fuse_act_impl(fuse_gemm_info, gen_class_name, user_header_file, output_dir)
self.gen_turing = gen_turing_impl(fuse_gemm_info, gen_class_name, user_header_file, output_dir)
def gen_CUTLASS_irrelevant_API(self):
code = ""
code += "#include <cuda_runtime.h>\n"
code += "#include <assert.h>\n"
param_name = "Fused" + str(self.b2b_num) + "xGemm_"
for i in range(self.b2b_num):
param_name += str(self.fuse_gemm_info[i]['mnk'][1]) + "_"
param_name += "Params"
params = ""
params += " " + "int M;\n"
params += " " + "int K0;\n"
params += " " + "int Batch;\n"
params += " " + "const void* A0;\n"
for i in range(self.b2b_num):
params += " " + "const void* " + helper.var_idx("B", i) + ";\n"
params += " " + "const void* " + helper.var_idx("C", i) + ";\n"
epilogue_args = helper.get_epilogue_args(self.fuse_gemm_info[i])
acc_tp = helper.get_epilogue_compute_tp(self.fuse_gemm_info[i])
for arg in epilogue_args:
arg_tp = arg[0]
arg_name = helper.var_idx("Epilogue", i) + "_" + arg[1]
params += " " + arg_tp + " " + arg_name + ";\n"
params += " " + "void* " + helper.var_idx("D", i) + ";\n"
code += ir.gen_struct(param_name, params)
code += "using Param = " + param_name + ";\n"
code += "void one_api( const Param & param, int sm, cudaStream_t stream);\n"
return code
def gen_one_api(self):
code = ""
code += "/* Auto Generated code - Do not edit.*/\n"
code += "#include \"cutlass_irrelevant.h\"\n"
code += "#include \"api.h\"\n"
code += "void one_api( const Param & param, int sm, cudaStream_t stream) {\n"
code += " " + "if (sm == 70) \n"
code += " " + " " + self.gen_class_name + "_volta_impl(param.M, param.K0, param.Batch, const_cast<void*>(param.A0), "
for i in range(self.b2b_num):
code += helper.var_idx("const_cast<void*>(param.B", i) + "), "
code += helper.var_idx("const_cast<void*>(param.C", i) + "), "
code += helper.var_idx("param.D", i) + ", "
epilogue_args = helper.get_epilogue_args(self.fuse_gemm_info[i])
for arg in epilogue_args:
arg_name = helper.var_idx("Epilogue", i) + "_" + arg[1]
code += "param." + arg_name + ", "
code += "stream);\n"
code += " " + "else if(sm >= 75) \n"
code += " " + " " + self.gen_class_name + "_turing_impl(param.M, param.K0, param.Batch, const_cast<void*>(param.A0), "
for i in range(self.b2b_num):
code += helper.var_idx("const_cast<void*>(param.B", i) + "), "
code += helper.var_idx("const_cast<void*>(param.C", i) + "), "
code += helper.var_idx("param.D", i) + ", "
epilogue_args = helper.get_epilogue_args(self.fuse_gemm_info[i])
for arg in epilogue_args:
arg_name = helper.var_idx("Epilogue", i) + "_" + arg[1]
code += "param." + arg_name + ", "
code += "stream);\n"
code += " " + "else assert(0);\n"
code += "}\n"
return code
def gen_code(self):
turing_code = self.gen_turing.gen_wrapper()
volta_code = self.gen_volta.gen_wrapper()
cutlass_irrelevant_code = self.gen_CUTLASS_irrelevant_API()
one_api_code = self.gen_one_api()
with open(self.output_dir + "one_api.cu", "w+") as f:
f.write(one_api_code)
helper.write_2_headfile("cutlass_irrelevant.h", self.output_dir, cutlass_irrelevant_code)
helper.write_2_headfile("api.h", self.output_dir, self.user_header_file + "\n" + turing_code + volta_code)
|
cutlass/examples/44_multi_gemm_ir_and_codegen/ir_gen/gen_turing_and_volta.py/0
|
{
"file_path": "cutlass/examples/44_multi_gemm_ir_and_codegen/ir_gen/gen_turing_and_volta.py",
"repo_id": "cutlass",
"token_count": 10964
}
| 15 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a double-buffered threadblock-scoped GEMM kernel.
*/
#pragma once
#include "cutlass/aligned_buffer.h"
#include "cutlass/arch/memory.h"
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
#include "cutlass/gemm/threadblock/mma_base.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy0_,
/// B1-specific version of the policy (concept: MmaPolicy)
typename Policy1_,
/// Number of stages,
int Stages,
/// Used for partial specialization
typename Enable = bool>
class DualMmaBase {
public:
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
///< Policy describing tuning details
using Policy0 = Policy0_;
using Policy1 = Policy1_;
//
// Dependent types
//
/// Warp-level Mma
using Operator0 = typename Policy0::Operator;
using Operator1 = typename Policy1::Operator;
/// Shape describing the overall GEMM computed from shared memory
/// by each warp.
using WarpGemm = typename Policy0::Operator::Shape;
/// Shape describing the number of warps filling the CTA
using WarpCount = GemmShape<Shape::kM / WarpGemm::kM,
Shape::kN / WarpGemm::kN,
Shape::kK / WarpGemm::kK>;
/// Number of warp-level GEMM oeprations
static int const kWarpGemmIterations =
(WarpGemm::kK / Operator0::Policy::MmaShape::kK);
/// Number of stages
static int const kStages = Stages;
/// Tensor reference to the A operand
using TensorRefA = TensorRef<typename Operator0::ElementA, typename Operator0::LayoutA>;
/// Tensor reference to the B operand
using TensorRefB0 = TensorRef<typename Operator0::ElementB, typename Operator0::LayoutB>;
using TensorRefB1 = TensorRef<typename Operator1::ElementB, typename Operator1::LayoutB>;
static_assert(kWarpGemmIterations > 1,
"The pipelined structure requires at least two warp-level "
"GEMM operations.");
static_assert((kWarpGemmIterations % 2) == 0,
"Inner loop iteration must be an even number.");
//
// Nested structs
//
/// Shared storage object needed by threadblock-scoped GEMM
class SharedStorage {
public:
//
// Type definitions
//
/// Shape of the A matrix operand in shared memory
using ShapeA = MatrixShape<Shape::kM + Policy0::SmemPaddingA::kRow,
Shape::kK * kStages +
Policy0::SmemPaddingA::kColumn>;
/// Shape of the B matrix operand in shared memory
using ShapeB0 =
MatrixShape<Shape::kK * kStages + Policy0::SmemPaddingB::kRow,
Shape::kN + Policy0::SmemPaddingB::kColumn>;
using ShapeB1 =
MatrixShape<Shape::kK * kStages + Policy1::SmemPaddingB::kRow,
Shape::kN + Policy1::SmemPaddingB::kColumn>;
public:
//
// Data members
//
/// Buffer for A operand
AlignedBuffer<typename Operator0::ElementA, ShapeA::kCount> operand_A;
/// Buffer for B operand
AlignedBuffer<typename Operator0::ElementB, ShapeB0::kCount> operand_B0;
AlignedBuffer<typename Operator1::ElementB, ShapeB1::kCount> operand_B1;
public:
//
// Methods
//
/// Returns a layout object for the A matrix
CUTLASS_DEVICE
static typename Operator0::LayoutA LayoutA() {
return Operator0::LayoutA::packed({ShapeA::kRow, ShapeA::kColumn});
}
/// Returns a layout object for the B matrix
CUTLASS_HOST_DEVICE
static typename Operator0::LayoutB LayoutB0() {
return Operator0::LayoutB::packed({ShapeB0::kRow, ShapeB0::kColumn});
}
/// Returns a layout object for the B matrix
CUTLASS_HOST_DEVICE
static typename Operator1::LayoutB LayoutB1() {
return Operator1::LayoutB::packed({ShapeB1::kRow, ShapeB1::kColumn});
}
/// Returns a TensorRef to the A operand
CUTLASS_HOST_DEVICE
TensorRefA operand_A_ref() {
return TensorRefA{operand_A.data(), LayoutA()};
}
/// Returns a TensorRef to the B operand
CUTLASS_HOST_DEVICE
TensorRefB0 operand_B0_ref() {
return TensorRefB0{operand_B0.data(), LayoutB0()};
}
CUTLASS_HOST_DEVICE
TensorRefB1 operand_B1_ref() {
return TensorRefB1{operand_B1.data(), LayoutB1()};
}
};
protected:
//
// Data members
//
/// Iterator to load a warp-scoped tile of A operand from shared memory
typename Operator0::IteratorA warp_tile_iterator_A_;
/// Iterator to load a warp-scoped tile of B operand from shared memory
typename Operator0::IteratorB warp_tile_iterator_B0_;
typename Operator1::IteratorB warp_tile_iterator_B1_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
DualMmaBase(
///< Shared storage needed for internal use by threadblock-scoped GEMM
SharedStorage &shared_storage,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx
):
warp_tile_iterator_A_(shared_storage.operand_A_ref(), lane_idx),
warp_tile_iterator_B0_(shared_storage.operand_B0_ref(), lane_idx),
warp_tile_iterator_B1_(shared_storage.operand_B1_ref(), lane_idx) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
|
cutlass/examples/45_dual_gemm/threadblock/dual_mma_base.h/0
|
{
"file_path": "cutlass/examples/45_dual_gemm/threadblock/dual_mma_base.h",
"repo_id": "cutlass",
"token_count": 2711
}
| 16 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Example of a Hopper gather+GEMM+scatter kernel fusion.
This example fuses gather before GEMM and scatter after GEMM into the same
GEMM kernel. Gather and scatter operation is controled by an index vector
to select rows or columns from A, B, C or D matrices.
Gather/scatter operations are always performed along a strided dimension
in order to preserve vectorized loads/stores. Thus the index vector is
applied to rows of row-major matrices and columns of column-major matrices.
Note that the index vector must contain integers in range [0,X) where
X is one of (M,N,K), depending on selected gather dimension. The problem
shape given to the GEMM kernel must consist of matrix sizes AFTER gather
and BEFORE scatter operations are applied.
*/
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <assert.h>
#include <cuda_runtime.h>
#include <algorithm>
#include <iostream>
#include <random>
#include <numeric>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm_universal.h"
#include "cutlass/gemm/device/gemm_universal_adapter.h"
#include "cutlass/gemm/collective/collective_builder.hpp"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/epilogue/collective/collective_builder.hpp"
#include "cutlass/epilogue/collective/default_epilogue.hpp"
#include "cutlass/util/command_line.h"
#include "cutlass/util/device_memory.h"
#include "cutlass/util/packed_stride.hpp"
#include "cutlass/util/reference/device/tensor_fill.h"
#include "cutlass/util/reference/device/tensor_compare.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
#include "gather_gemm.hpp"
#include "gather_kernel.cuh"
#include "scatter_epilogue.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
using namespace cute;
namespace example {
// Command line options parsing
struct Options {
bool help = false;
cutlass::gemm::BatchedGemmCoord problem_size = {2048, 2048, 2048, 1};
int index_size = 1024;
int mode = 1; // N-mode gather/scatter by default
float alpha = 1.0f;
float beta = 0.0f;
bool reference_check = true;
int iterations = 20;
bool valid() const {
return problem_size.m() > 0
&& problem_size.n() > 0
&& problem_size.k() > 0
&& problem_size.batch() > 0
&& 0 <= mode && mode < 3
&& index_size <= problem_size.at(mode)
&& iterations > 0;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
cmd.get_cmd_line_argument("m", problem_size.m());
cmd.get_cmd_line_argument("n", problem_size.n());
cmd.get_cmd_line_argument("k", problem_size.k());
cmd.get_cmd_line_argument("batch_size", problem_size.batch());
cmd.get_cmd_line_argument("index_size", index_size);
char const modes[] = {'m', 'n', 'k'};
char mode_input = modes[mode];
cmd.get_cmd_line_argument("mode", mode_input);
mode = int(std::distance(std::begin(modes), std::find(std::begin(modes), std::end(modes), mode_input)));
cmd.get_cmd_line_argument("alpha", alpha);
cmd.get_cmd_line_argument("beta", beta);
cmd.get_cmd_line_argument("check", reference_check, true);
cmd.get_cmd_line_argument("iterations", iterations);
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out <<
"52_hopper_gather_scatter_fusion example\n"
"\n"
" This example uses the CUTLASS Library to fuse gather/scatter of input/output tensors with GEMM.\n"
" It validates and benchmarks the fused kernel against an unfused implementation that executes\n"
" gather+GEMM+scatter in sequence and writes intermediate (gathered) tensors to memory.\n"
" For the unfused implementation two GEMM kernels are considered: default one that uses the same\n"
" schedule and instruction set as the fused one, and an optimized one that utilizes advanced\n"
" features (such as TMA units) that cannot be used by the fused kernel due to hardware constraints."
"\n"
"Options:\n"
" --help If specified, displays this usage statement.\n"
" --m=<int> GEMM M dimension\n"
" --n=<int> GEMM N dimension\n"
" --k=<int> GEMM K dimension\n"
" --batch_size=<int> GEMM batch size\n"
" --index_size=<int> Size of N dimension gather/scatter index\n"
" --mode=<m,n,k> Gather mode (M, N, or K)\n"
" --alpha=<float> GEMM alpha parameter\n"
" --beta=<float> GEMM beta parameter\n"
" --iterations=<int> Number of profiling iterations to perform.\n"
"\n"
"Examples:\n"
"\n"
"$ ./examples/52_hopper_gather_scatter_fusion/52_hopper_gather_scatter_fusion --m=1024 --n=2048 --k=1024 --mode=n --index_size=1024\n";
return out;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template<class ElementA, class LayoutA, class GatherA,
class ElementB, class LayoutB, class GatherB,
class ElementC, class LayoutC, class GatherC,
class ElementD, class LayoutD, class ScatterD,
class ElementAccumulator, class ElementComputeEpilogue>
struct ExampleRunner
{
// Useful aliases
using ProblemShape = Shape<int,int,int,int>;
using StrideA = cutlass::gemm::TagToStrideA_t<LayoutA>;
using StrideB = cutlass::gemm::TagToStrideB_t<LayoutB>;
using StrideC = cutlass::gemm::TagToStrideC_t<LayoutC>;
using StrideD = cutlass::gemm::TagToStrideC_t<LayoutD>;
// Alias to for the epilogue type that supports gather/scatter
using Epilogue = cutlass::epilogue::collective::detail::Sm90TmaWarpSpecializedAdapter<
cutlass::epilogue::collective::EpilogueGatherScatter<
StrideC, StrideD,
cutlass::epilogue::thread::LinearCombination<
ElementD, 1,
ElementAccumulator, ElementComputeEpilogue,
cutlass::epilogue::thread::ScaleType::Default,
cutlass::FloatRoundStyle::round_to_nearest, ElementC
>,
cutlass::gemm::EpilogueDefault,
GatherC,
ScatterD
>
>;
// Alias to for the mainloop type
using Mainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
ElementA, LayoutA, 128 / cutlass::sizeof_bits<ElementA>::value,
ElementB, LayoutB, 128 / cutlass::sizeof_bits<ElementB>::value,
ElementAccumulator,
Shape<_128,_128,_64>,
Shape<_1,_1,_1>,
cutlass::gemm::collective::StageCountAuto,
cutlass::gemm::KernelCpAsyncWarpSpecialized
>::CollectiveOp;
using Kernel = cutlass::gemm::kernel::GemmGather<
ProblemShape,
Mainloop,
Epilogue,
void,
GatherA,
GatherB
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<Kernel>;
static constexpr bool DoGatherA = not cutlass::platform::is_same<GatherA, NoGather>::value;
static constexpr bool DoGatherB = not cutlass::platform::is_same<GatherB, NoGather>::value;
static constexpr bool DoGatherC = not cutlass::platform::is_same<GatherC, NoGather>::value;
static constexpr bool DoScatterD = not cutlass::platform::is_same<ScatterD, NoGather>::value;
static constexpr bool GatherAonM = DoGatherA && cutlass::platform::is_same<LayoutA,cutlass::layout::RowMajor>::value;
static constexpr bool GatherAonK = DoGatherA && cutlass::platform::is_same<LayoutA,cutlass::layout::ColumnMajor>::value;
static constexpr bool GatherBonN = DoGatherB && cutlass::platform::is_same<LayoutB,cutlass::layout::ColumnMajor>::value;
static constexpr bool GatherBonK = DoGatherB && cutlass::platform::is_same<LayoutB,cutlass::layout::RowMajor>::value;
static constexpr bool GatherConM = DoGatherC && cutlass::platform::is_same<LayoutC,cutlass::layout::RowMajor>::value;
static constexpr bool GatherConN = DoGatherC && cutlass::platform::is_same<LayoutC,cutlass::layout::ColumnMajor>::value;
static constexpr bool ScatterDonM = DoScatterD && cutlass::platform::is_same<LayoutD,cutlass::layout::RowMajor>::value;
static constexpr bool ScatterDonN = DoScatterD && cutlass::platform::is_same<LayoutD,cutlass::layout::ColumnMajor>::value;
static constexpr bool GatherModeM = GatherAonM || GatherConM || ScatterDonM;
static constexpr bool GatherModeN = GatherBonN || GatherConN || ScatterDonN;
static constexpr bool GatherModeK = GatherAonK || GatherBonK;
static_assert( GatherModeM && !GatherModeN && !GatherModeK ||
!GatherModeM && GatherModeN && !GatherModeK ||
!GatherModeM && !GatherModeN && GatherModeK,
"Only one gather mode (M, N or K) is supported by example runner");
// Construct a reference (non-gather) GEMM kernel type
using MainloopRef = Mainloop;
using EpilogueRef = cutlass::epilogue::collective::detail::Sm90TmaWarpSpecializedAdapter<
cutlass::epilogue::collective::DefaultEpilogue<
StrideC, StrideD,
typename Epilogue::ThreadEpilogueOp,
typename Epilogue::EpilogueSchedule
>
>;
using KernelRef = cutlass::gemm::kernel::GemmUniversal<
ProblemShape,
MainloopRef,
EpilogueRef,
void
>;
using GemmRef = cutlass::gemm::device::GemmUniversalAdapter<KernelRef>;
// Construct an optimized reference GEMM kernel type (using TMA)
using EpilogueOpt = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_128,_128,_64>,
Shape<_2,_2,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
ElementAccumulator, ElementComputeEpilogue,
ElementC, LayoutC, 128 / cutlass::sizeof_bits<ElementC>::value,
ElementD, LayoutD, 128 / cutlass::sizeof_bits<ElementD>::value,
cutlass::epilogue::collective::EpilogueScheduleAuto
>::CollectiveOp;
using MainloopOpt = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
ElementA, LayoutA, 128 / cutlass::sizeof_bits<ElementA>::value,
ElementB, LayoutB, 128 / cutlass::sizeof_bits<ElementB>::value,
ElementAccumulator,
Shape<_128,_128,_64>,
Shape<_2,_2,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<
static_cast<int>(sizeof(typename EpilogueOpt::SharedStorage))>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using KernelOpt = cutlass::gemm::kernel::GemmUniversal<
ProblemShape,
MainloopOpt,
EpilogueOpt,
void
>;
using GemmOpt = cutlass::gemm::device::GemmUniversalAdapter<KernelOpt>;
// Data members
cutlass::gemm::BatchedGemmCoord problem_size_orig;
cutlass::gemm::BatchedGemmCoord problem_size;
ProblemShape problem_shape_orig;
ProblemShape problem_shape;
cutlass::KernelHardwareInfo hw_info;
ElementComputeEpilogue alpha;
ElementComputeEpilogue beta;
StrideA stride_A_orig;
StrideB stride_B_orig;
StrideC stride_C_orig;
StrideD stride_D_orig;
StrideA stride_A;
StrideB stride_B;
StrideC stride_C;
StrideD stride_D;
cutlass::device_memory::allocation<ElementA> tensor_a;
cutlass::device_memory::allocation<ElementB> tensor_b;
cutlass::device_memory::allocation<ElementC> tensor_c;
cutlass::device_memory::allocation<ElementD> tensor_d;
cutlass::device_memory::allocation<int> gather_indices;
cutlass::device_memory::allocation<ElementA> tensor_a_gathered;
cutlass::device_memory::allocation<ElementB> tensor_b_gathered;
cutlass::device_memory::allocation<ElementC> tensor_c_gathered;
cutlass::device_memory::allocation<ElementD> tensor_d_gathered;
cutlass::device_memory::allocation<ElementD> tensor_d_reference;
cutlass::gemm::GemmUniversalMode gemm_mode;
Gemm gemm;
typename Gemm::Arguments arguments;
cutlass::device_memory::allocation<uint8_t> workspace;
GemmRef gemm_ref;
typename GemmRef::Arguments arguments_ref;
cutlass::device_memory::allocation<uint8_t> workspace_ref;
GemmOpt gemm_opt;
typename GemmOpt::Arguments arguments_opt;
cutlass::device_memory::allocation<uint8_t> workspace_opt;
ExampleRunner(Options const &options, cutlass::KernelHardwareInfo const &hw_info)
: problem_size_orig(options.problem_size),
problem_size(GatherModeM ? options.index_size : problem_size_orig.m(),
GatherModeN ? options.index_size : problem_size_orig.n(),
GatherModeK ? options.index_size : problem_size_orig.k(),
problem_size_orig.batch()),
problem_shape_orig(problem_size_orig.m(), problem_size_orig.n(), problem_size_orig.k(), problem_size_orig.batch()),
problem_shape(problem_size.m(), problem_size.n(), problem_size.k(), problem_size.batch()),
hw_info(hw_info),
alpha(options.alpha),
beta(options.beta),
stride_A_orig(cutlass::make_cute_packed_stride(
StrideA{}, make_shape(problem_size_orig.m(), problem_size_orig.k(), problem_size_orig.batch()))),
stride_B_orig(cutlass::make_cute_packed_stride(
StrideB{}, make_shape(problem_size_orig.n(), problem_size_orig.k(), problem_size_orig.batch()))),
stride_C_orig(cutlass::make_cute_packed_stride(
StrideC{}, make_shape(problem_size_orig.m(), problem_size_orig.n(), problem_size_orig.batch()))),
stride_D_orig(cutlass::make_cute_packed_stride(
StrideD{}, make_shape(problem_size_orig.m(), problem_size_orig.n(), problem_size_orig.batch()))),
stride_A(cutlass::make_cute_packed_stride(
StrideA{}, make_shape(problem_size.m(), problem_size.k(), problem_size.batch()))),
stride_B(cutlass::make_cute_packed_stride(
StrideB{}, make_shape(problem_size.n(), problem_size.k(), problem_size.batch()))),
stride_C(cutlass::make_cute_packed_stride(
StrideC{}, make_shape(problem_size.m(), problem_size.n(), problem_size.batch()))),
stride_D(cutlass::make_cute_packed_stride(
StrideD{}, make_shape(problem_size.m(), problem_size.n(), problem_size.batch()))),
tensor_a(problem_size_orig.m() * problem_size_orig.k() * problem_size_orig.batch()),
tensor_b(problem_size_orig.k() * problem_size_orig.n() * problem_size_orig.batch()),
tensor_c(problem_size_orig.m() * problem_size_orig.n() * problem_size_orig.batch()),
tensor_d(problem_size_orig.m() * problem_size_orig.n() * problem_size_orig.batch()),
gather_indices(options.index_size),
tensor_a_gathered(problem_size.m() * problem_size.k() * problem_size_orig.batch()),
tensor_b_gathered(problem_size.k() * problem_size.n() * problem_size_orig.batch()),
tensor_c_gathered(problem_size.m() * problem_size.n() * problem_size_orig.batch()),
tensor_d_gathered(problem_size.m() * problem_size.n() * problem_size_orig.batch()),
tensor_d_reference(problem_size_orig.m() * problem_size_orig.n() * problem_size_orig.batch()),
gemm_mode(problem_size.batch() > 1 ? cutlass::gemm::GemmUniversalMode::kBatched : cutlass::gemm::GemmUniversalMode::kGemm),
gemm(),
// When constructing arguments for gather/scatter gemm, we must pass stride arguments
// made for the original (non-gathered) problem size, because they are used to access
// tensors of the original shape. However we still use the reduced (gathered) problem
// shape since it corresponds to the logical indexing in reduced size GEMM.
arguments{
gemm_mode,
problem_shape,
{
tensor_a.get(),
stride_A_orig,
tensor_b.get(),
stride_B_orig
},
{
{ alpha, beta },
tensor_c.get(), stride_C_orig,
tensor_d.get(), stride_D_orig,
typename Epilogue::GatherC {gather_indices.get()},
typename Epilogue::ScatterD{gather_indices.get()}
},
hw_info,
{},
typename Kernel::GatherA{gather_indices.get()},
typename Kernel::GatherB{gather_indices.get()}
},
workspace(Gemm::get_workspace_size(arguments)),
gemm_ref(),
arguments_ref{
gemm_mode,
problem_shape,
{
DoGatherA ? tensor_a_gathered.get() : tensor_a.get(),
stride_A,
DoGatherB ? tensor_b_gathered.get() : tensor_b.get(),
stride_B
},
{
{ alpha, beta },
DoGatherC ? tensor_c_gathered.get() : tensor_c.get(),
stride_C,
DoScatterD ? tensor_d_gathered.get() : tensor_d_reference.get(),
stride_D
},
hw_info
},
workspace_ref(GemmRef::get_workspace_size(arguments_ref)),
gemm_opt(),
arguments_opt{
gemm_mode,
problem_shape,
{
DoGatherA ? tensor_a_gathered.get() : tensor_a.get(),
stride_A,
DoGatherB ? tensor_b_gathered.get() : tensor_b.get(),
stride_B
},
{
{ alpha, beta },
DoGatherC ? tensor_c_gathered.get() : tensor_c.get(),
stride_C,
DoScatterD ? tensor_d_gathered.get() : tensor_d_reference.get(),
stride_D
},
hw_info
},
workspace_opt(GemmOpt::get_workspace_size(arguments_opt))
{
// Fill input and output matrices on host using CUTLASS helper functions
cutlass::reference::device::BlockFillRandomUniform(tensor_a.get(), tensor_a.size(), 1, ElementA(7), ElementA(-8), 0);
cutlass::reference::device::BlockFillRandomUniform(tensor_b.get(), tensor_b.size(), 1, ElementB(7), ElementB(-8), 0);
cutlass::reference::device::BlockFillRandomUniform(tensor_c.get(), tensor_c.size(), 1, ElementC(7), ElementC(-8), 0);
cutlass::reference::device::BlockFillSequential(tensor_d.get(), tensor_d.size(), ElementD(0), ElementD(0));
// <- Fill gather_indices with unique random integers in range [0,n)
int index_range = GatherModeM ? problem_size_orig.m() : (GatherModeN ? problem_size_orig.n() : problem_size_orig.k());
std::vector<int> indices(index_range);
std::iota(indices.begin(), indices.end(), 0);
{ // std::random_shuffle was deprecated in C++14 and removed in C++17
std::random_device make_seed;
std::mt19937 source_of_randomness(make_seed());
std::shuffle(indices.begin(), indices.end(), source_of_randomness);
}
gather_indices.copy_from_host(indices.data());
auto const gemm_init = [](auto & gemm, auto const & arguments, auto & workspace)
{
cutlass::Status status = gemm.can_implement(arguments);
CUTLASS_CHECK(status);
status = gemm.initialize(arguments, workspace.get());
CUTLASS_CHECK(status);
};
gemm_init(gemm, arguments, workspace );
gemm_init(gemm_ref, arguments_ref, workspace_ref);
gemm_init(gemm_opt, arguments_opt, workspace_opt);
}
void debug_output(std::ostream & os)
{
auto print_tensor = [](std::ostream &os, char const * name, auto const & data, auto shape, auto stride)
{
std::vector<remove_cvref_t<decltype(*data.get())>> h_data(data.size());
data.copy_to_host(h_data.data());
Tensor t = make_tensor(h_data.data(), shape, stride);
os << "\n" << name << ": " << std::setw(4) << t << std::endl;
};
{
auto [M,N,K,L] = problem_shape_orig;
print_tensor(os, "A", tensor_a, make_shape(M,K,L), stride_A_orig);
print_tensor(os, "B", tensor_b, make_shape(N,K,L), stride_B_orig);
print_tensor(os, "C", tensor_c, make_shape(M,N,L), stride_C_orig);
print_tensor(os, "D", tensor_d, make_shape(M,N,L), stride_D_orig);
print_tensor(os, "D reference", tensor_d_reference, make_shape(M,N,L), stride_D_orig);
print_tensor(os, "indices", gather_indices, make_shape(gather_indices.size()), make_stride(_1{}));
}
}
template<class Gemm2>
static void run_gemm(Gemm2 &gemm)
{
cutlass::Status status = gemm.run();
CUTLASS_CHECK(status);
}
template<class Gemm2>
void run_reference(Gemm2 &gemm)
{
// Convenience wrapper around calls to separate gather/scatter kernels
auto run_gather = [this](auto call, auto const & input, auto & output, auto gather_func, auto batch_size, auto stride)
{
[[maybe_unused]] auto idx = find_if(stride, [](auto x){ return not is_constant<1, decltype(x)>{}; });
constexpr int I = decltype(idx)::value;
call(input.get(),
output.get(),
gather_func,
batch_size,
static_cast<int>(input.size() / batch_size),
static_cast<int>(output.size() / batch_size),
static_cast<int>(get<I>(stride)),
hw_info);
};
// Forward calls via lambda to avoid specifying template arguments
auto gather_call = [](auto&&... args){ gather(static_cast<decltype(args)&&>(args)...); };
// MSVC doesn't count use inside a false "if constexpr" branch.
[[maybe_unused]] auto scatter_call = [](auto&&... args){ scatter(static_cast<decltype(args)&&>(args)...); };
if constexpr (DoGatherA) {
run_gather(gather_call, tensor_a, tensor_a_gathered, arguments.gather_A, problem_size.batch(), stride_A);
}
if constexpr (DoGatherB) {
run_gather(gather_call, tensor_b, tensor_b_gathered, arguments.gather_B, problem_size.batch(), stride_B);
}
if constexpr (DoGatherC) {
if (beta != ElementComputeEpilogue(0)) {
run_gather(gather_call, tensor_c, tensor_c_gathered, arguments.epilogue.gather_C, problem_size.batch(), stride_C);
}
}
run_gemm(gemm);
if constexpr (DoScatterD) {
run_gather(scatter_call, tensor_d_gathered, tensor_d_reference, arguments.epilogue.scatter_D, problem_size.batch(), stride_D);
}
}
bool verify()
{
run_gemm(gemm);
run_reference(gemm_ref);
cudaDeviceSynchronize();
return cutlass::reference::device::BlockCompareEqual(tensor_d.get(), tensor_d_reference.get(), tensor_d.size());
}
bool run(Options const &options)
{
if (options.reference_check) {
if (!verify()) {
std::cout << "Failed validation" << std::endl;
#if 0
debug_output(std::cout);
#endif
return false;
}
else {
std::cout << "Passed validation" << std::endl;
}
}
//
// Run profiling loop
//
auto const benchmark = [&](auto name, auto func)
{
GpuTimer timer;
timer.start();
for (int iter = 0; iter < options.iterations; ++iter) {
func();
}
timer.stop();
double runtime = timer.elapsed_millis() / double(options.iterations);
double gflops = 2 * double(problem_size.product()) / 1e6 / runtime; // Two flops per multiply-add
std::cout << name << ":\n";
std::cout << " Runtime: " << runtime << " ms\n";
std::cout << " GFLOPs: " << gflops << "\n";
};
benchmark("Fused", [&](){ run_gemm(gemm); });
benchmark("Unfused default", [&](){ run_reference(gemm_ref); });
benchmark("Unfused optimized", [&](){ run_reference(gemm_opt); });
return true;
}
};
} // namespace example
int main(int argc, const char ** argv) {
bool notSupported = false;
// CUDA 12 minimum required
if (__CUDACC_VER_MAJOR__ < 12) {
std::cerr << "This example requires CUDA Toolkit version 12 or later.\n";
notSupported = true;
}
cudaDeviceProp props;
CUDA_CHECK(cudaGetDeviceProperties(&props, 0));
if (props.major < 9) {
std::cerr << "This example requires a device with compute capability 90 or higher.\n";
notSupported = true;
}
if (notSupported) {
return EXIT_SUCCESS; // Do not fail CI checks on unsupported systems
}
example::Options options;
options.parse(argc, argv);
if (options.help) {
options.print_usage(std::cout) << "\n";
return EXIT_SUCCESS;
}
if (!options.valid()) {
std::cerr << "Invalid arguments." << "\n";
return EXIT_FAILURE;
}
cutlass::KernelHardwareInfo hw_info;
hw_info.device_id = 0;
hw_info.sm_count = cutlass::KernelHardwareInfo::query_device_multiprocessor_count(hw_info.device_id);
bool result = true;
#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
switch (options.mode) {
using namespace example;
case 0: {
std::cout << "Gather A,C + scatter D on M mode:" << std::endl;
using Runner = ExampleRunner<
cutlass::half_t, cutlass::layout::RowMajor, IndexedGather<int>, // A
cutlass::half_t, cutlass::layout::ColumnMajor, NoGather, // B
cutlass::half_t, cutlass::layout::RowMajor, IndexedGather<int>, // C
cutlass::half_t, cutlass::layout::RowMajor, IndexedGather<int>, // D
float, float>;
result &= Runner(options, hw_info).run(options);
break;
}
case 1: {
std::cout << "Gather B,C + scatter D on N mode:" << std::endl;
using Runner = ExampleRunner<
cutlass::half_t, cutlass::layout::RowMajor, NoGather, // A
cutlass::half_t, cutlass::layout::ColumnMajor, IndexedGather<int>, // B
cutlass::half_t, cutlass::layout::ColumnMajor, IndexedGather<int>, // C
cutlass::half_t, cutlass::layout::ColumnMajor, IndexedGather<int>, // D
float, float>;
result &= Runner(options, hw_info).run(options);
break;
}
case 2: {
std::cout << "Gather A,B on K mode:" << std::endl;
using Runner = ExampleRunner<
cutlass::half_t, cutlass::layout::ColumnMajor, IndexedGather<int>, // A
cutlass::half_t, cutlass::layout::RowMajor, IndexedGather<int>, // B
cutlass::half_t, cutlass::layout::RowMajor, NoGather, // C
cutlass::half_t, cutlass::layout::RowMajor, NoGather, // D
float, float>;
result &= Runner(options, hw_info).run(options);
break;
}
}
#endif
return result ? EXIT_SUCCESS : EXIT_FAILURE;
}
|
cutlass/examples/52_hopper_gather_scatter_fusion/52_hopper_gather_scatter_fusion.cu/0
|
{
"file_path": "cutlass/examples/52_hopper_gather_scatter_fusion/52_hopper_gather_scatter_fusion.cu",
"repo_id": "cutlass",
"token_count": 10782
}
| 17 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Hopper Ptr-Array Batched GEMM example using CUTLASS 3 APIs for NVIDIA Hopper architecture.
This example demonstrates an implementation of Ptr-Array Batched GEMM using a TMA + GMMA
warp-specialized cooperative kernel.
The new feature showcased in this example is on-the-fly modification of TMA descriptors
to move between batches (represented by l).
To run this example:
$ ./examples/56_hopper_ptr_array_batched_gemm/56_hopper_ptr_array_batched_gemm --m=2048 --n=2048 --k=2048 --l=10
*/
#include <iostream>
#include "cutlass/cutlass.h"
#include "cute/tensor.hpp"
#include "cutlass/tensor_ref.h"
#include "cutlass/epilogue/collective/default_epilogue.hpp"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/gemm/dispatch_policy.hpp"
#include "cutlass/gemm/group_array_problem_shape.hpp"
#include "cutlass/gemm/collective/collective_builder.hpp"
#include "cutlass/epilogue/collective/collective_builder.hpp"
#include "cutlass/gemm/device/gemm_universal_adapter.h"
#include "cutlass/gemm/kernel/gemm_universal.hpp"
#include "cutlass/util/command_line.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/packed_stride.hpp"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/device/tensor_compare.h"
#include "cutlass/util/reference/device/tensor_fill.h"
#include "helper.h"
using namespace cute;
#if defined(CUTLASS_ARCH_MMA_MODIFIABLE_TMA_SM90_SUPPORTED)
/////////////////////////////////////////////////////////////////////////////////////////////////
/// GEMM kernel configurations
/////////////////////////////////////////////////////////////////////////////////////////////////
// A matrix configuration
using ElementA = cutlass::half_t; // Element type for A matrix operand
using LayoutA = cutlass::layout::RowMajor; // Layout type for A matrix operand
constexpr int AlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value; // Memory access granularity/alignment of A matrix in units of elements (up to 16 bytes)
// B matrix configuration
using ElementB = cutlass::half_t; // Element type for B matrix operand
using LayoutB = cutlass::layout::ColumnMajor; // Layout type for B matrix operand
constexpr int AlignmentB = 128 / cutlass::sizeof_bits<ElementB>::value; // Memory access granularity/alignment of B matrix in units of elements (up to 16 bytes)
// C/D matrix configuration
using ElementC = cutlass::half_t; // Element type for C and D matrix operands
using LayoutC = cutlass::layout::ColumnMajor; // Layout type for C and D matrix operands
constexpr int AlignmentC = 128 / cutlass::sizeof_bits<ElementC>::value; // Memory access granularity/alignment of C matrix in units of elements (up to 16 bytes)
// Core kernel configurations
using ElementAccumulator = float; // Element type for internal accumulation
using ArchTag = cutlass::arch::Sm90; // Tag indicating the minimum SM that supports the intended feature
using OperatorClass = cutlass::arch::OpClassTensorOp; // Operator class tag
using TileShape = Shape<_256,_128,_64>; // Threadblock-level tile size
using ClusterShape = Shape<_1,_2,_1>; // Shape of the threadblocks in a cluster
using StageCountType = cutlass::gemm::collective::StageCountAuto; // Stage count maximized based on the tile size
using KernelSchedule = cutlass::gemm::KernelPtrArrayTmaWarpSpecializedCooperative; // Kernel to launch
using EpilogueSchedule = cutlass::epilogue::PtrArrayNoSmemWarpSpecialized; // Epilogue to launch
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape, ClusterShape,
cutlass::epilogue::collective::EpilogueTileAuto,
ElementAccumulator, ElementAccumulator,
ElementC, LayoutC, AlignmentC,
ElementC, LayoutC, AlignmentC,
EpilogueSchedule
>::CollectiveOp;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
ArchTag, OperatorClass,
ElementA, LayoutA, AlignmentA,
ElementB, LayoutB, AlignmentB,
ElementAccumulator,
TileShape, ClusterShape,
cutlass::gemm::collective::StageCountAutoCarveout<
static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
KernelSchedule
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
cutlass::gemm::ArrayProblemShape<Shape<int,int,int,int>>,
CollectiveMainloop,
CollectiveEpilogue
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
// Reference device GEMM implementation type
using DeviceGemmReference = cutlass::reference::device::Gemm<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
ElementAccumulator>;
using StrideA = typename Gemm::GemmKernel::StrideA;
using StrideB = typename Gemm::GemmKernel::StrideB;
using StrideC = typename Gemm::GemmKernel::StrideC;
using StrideD = typename Gemm::GemmKernel::StrideD;
StrideA stride_A;
StrideB stride_B;
StrideC stride_C;
StrideD stride_D;
uint64_t seed;
std::vector<int64_t> offset_A;
std::vector<int64_t> offset_B;
std::vector<int64_t> offset_C;
std::vector<int64_t> offset_D;
cutlass::DeviceAllocation<typename Gemm::ElementA> block_A;
cutlass::DeviceAllocation<typename Gemm::ElementB> block_B;
cutlass::DeviceAllocation<typename Gemm::ElementC> block_C;
cutlass::DeviceAllocation<typename Gemm::EpilogueOutputOp::ElementOutput> block_D;
cutlass::DeviceAllocation<typename Gemm::EpilogueOutputOp::ElementOutput> block_ref_D;
cutlass::DeviceAllocation<const typename Gemm::ElementA *> ptr_A;
cutlass::DeviceAllocation<const typename Gemm::ElementB *> ptr_B;
cutlass::DeviceAllocation<const typename Gemm::ElementC *> ptr_C;
cutlass::DeviceAllocation<typename Gemm::EpilogueOutputOp::ElementOutput *> ptr_D;
cutlass::DeviceAllocation<typename Gemm::EpilogueOutputOp::ElementOutput *> ptr_ref_D;
#endif // defined(CUTLASS_ARCH_MMA_MODIFIABLE_TMA_SM90_SUPPORTED)
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Testbed utility types
/////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help = false;
float alpha = 1.0f;
float beta = 0.0f;
int iterations = 10;
int m = 1024, n = 512, k = 1024, l = 10;
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
return;
}
cmd.get_cmd_line_argument("m", m);
cmd.get_cmd_line_argument("n", n);
cmd.get_cmd_line_argument("k", k);
cmd.get_cmd_line_argument("l", l);
cmd.get_cmd_line_argument("alpha", alpha, 1.f);
cmd.get_cmd_line_argument("beta", beta, 0.f);
cmd.get_cmd_line_argument("iterations", iterations);
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "56_hopper_ptr_array_batched_gemm\n\n"
<< " Hopper FP32 GEMM using a Warp Specialized kernel.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement\n\n"
<< " --m=<int> Sets the M extent of the GEMM\n"
<< " --n=<int> Sets the N extent of the GEMM\n"
<< " --k=<int> Sets the K extent of the GEMM\n"
<< " --l=<int> Sets the batch count for Ptr-Array GEMM\n"
<< " --alpha=<f32> Epilogue scalar alpha\n"
<< " --beta=<f32> Epilogue scalar beta\n\n"
<< " --iterations=<int> Number of profiling iterations to perform\n\n";
out
<< "\n\nExamples:\n\n"
<< "$ " << "56_hopper_ptr_array_batched_gemm" << " --m=1024 --n=512 --k=1024 --l=10 --alpha=2 --beta=0.707 \n\n";
return out;
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const
{
// Two flops per multiply-add
uint64_t flop = uint64_t(2) * m * n * k * l;
double gflop = double(flop) / double(1.0e9);
return gflop / runtime_s;
}
};
/// Result structure
struct Result
{
double avg_runtime_ms = 0.0;
double gflops = 0.0;
cutlass::Status status = cutlass::Status::kSuccess;
cudaError_t error = cudaSuccess;
bool passed = false;
};
#if defined(CUTLASS_ARCH_MMA_MODIFIABLE_TMA_SM90_SUPPORTED)
/////////////////////////////////////////////////////////////////////////////////////////////////
/// GEMM setup and evaluation
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Helper to initialize a block of device data
template <class Element>
bool initialize_block(
cutlass::DeviceAllocation<Element>& block,
uint64_t seed=2023) {
Element scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
} else if (bits_input <= 8) {
scope_max = 2;
scope_min = -2;
} else {
scope_max = 8;
scope_min = -8;
}
cutlass::reference::device::BlockFillRandomUniform(
block.get(), block.size(), seed, scope_max, scope_min, 0);
return true;
}
/// Allocates device-side data
void allocate(const Options &options) {
int64_t total_elements_A = 0;
int64_t total_elements_B = 0;
int64_t total_elements_C = 0;
int64_t total_elements_D = 0;
for (int32_t i = 0; i < options.l; ++i) {
offset_A.push_back(total_elements_A);
offset_B.push_back(total_elements_B);
offset_C.push_back(total_elements_C);
offset_D.push_back(total_elements_D);
int64_t elements_A = options.m * options.k;
int64_t elements_B = options.k * options.n;
int64_t elements_C = options.m * options.n;
int64_t elements_D = options.m * options.n;
total_elements_A += elements_A;
total_elements_B += elements_B;
total_elements_C += elements_C;
total_elements_D += elements_D;
}
block_A.reset(total_elements_A);
block_B.reset(total_elements_B);
block_C.reset(total_elements_C);
block_D.reset(total_elements_D);
block_ref_D.reset(total_elements_D);
}
/// Initialize operands to be used in the GEMM and reference GEMM
void initialize(const Options &options) {
stride_A = cutlass::make_cute_packed_stride(StrideA{}, cute::make_shape(options.m, options.k, options.l));
stride_B = cutlass::make_cute_packed_stride(StrideB{}, cute::make_shape(options.n, options.k, options.l));
stride_C = cutlass::make_cute_packed_stride(StrideC{}, cute::make_shape(options.m, options.n, options.l));
stride_D = cutlass::make_cute_packed_stride(StrideD{}, cute::make_shape(options.m, options.n, options.l));
//
// Assign pointers
//
std::vector<ElementA *> ptr_A_host(options.l);
std::vector<ElementB *> ptr_B_host(options.l);
std::vector<ElementC *> ptr_C_host(options.l);
std::vector<ElementC *> ptr_D_host(options.l);
for (int32_t i = 0; i < options.l; ++i) {
ptr_A_host.at(i) = block_A.get() + offset_A.at(i);
ptr_B_host.at(i) = block_B.get() + offset_B.at(i);
ptr_C_host.at(i) = block_C.get() + offset_C.at(i);
ptr_D_host.at(i) = block_D.get() + offset_D.at(i);
}
ptr_A.reset(options.l);
ptr_A.copy_from_host(ptr_A_host.data());
ptr_B.reset(options.l);
ptr_B.copy_from_host(ptr_B_host.data());
ptr_C.reset(options.l);
ptr_C.copy_from_host(ptr_C_host.data());
ptr_D.reset(options.l);
ptr_D.copy_from_host(ptr_D_host.data());
initialize_block(block_A, seed + 2023);
initialize_block(block_B, seed + 2022);
initialize_block(block_C, seed + 2021);
}
/// Populates a Gemm::Arguments structure from the given commandline options
typename Gemm::Arguments args_from_options(const Options &options)
{
cutlass::KernelHardwareInfo hw_info;
// Change device_id to another value if you are running on a machine with multiple GPUs and wish
// to use a GPU other than that with device ID 0.
hw_info.device_id = 0;
hw_info.sm_count = cutlass::KernelHardwareInfo::query_device_multiprocessor_count(hw_info.device_id);
typename Gemm::Arguments arguments{
cutlass::gemm::GemmUniversalMode::kArray,
{{options.m, options.n, options.k, options.l}},
{ptr_A.get(), stride_A, ptr_B.get(), stride_B},
{{options.alpha, options.beta}, ptr_C.get(), stride_C, ptr_D.get(), stride_D},
hw_info
};
return arguments;
}
bool verify(const Options &options) {
bool passed = true;
for (int32_t i = 0; i < options.l; ++i) {
cutlass::TensorRef ref_A(block_A.get() + offset_A.at(i), Gemm::LayoutA::packed({options.m, options.k}));
cutlass::TensorRef ref_B(block_B.get() + offset_B.at(i), Gemm::LayoutB::packed({options.k, options.n}));
cutlass::TensorRef ref_C(block_C.get() + offset_C.at(i), Gemm::LayoutC::packed({options.m, options.n}));
cutlass::TensorRef ref_D(block_ref_D.get() + offset_D.at(i), Gemm::LayoutD::packed({options.m, options.n}));
//
// Compute reference output
//
// Create instantiation for device reference gemm kernel
DeviceGemmReference gemm_reference;
// Launch device reference gemm kernel
gemm_reference(
{options.m, options.n, options.k},
ElementAccumulator(options.alpha),
ref_A,
ref_B,
ElementAccumulator(options.beta),
ref_C,
ref_D);
// Wait for kernel to finish
CUDA_CHECK(cudaDeviceSynchronize());
// Check if output from CUTLASS kernel and reference kernel are equal or not
passed &= cutlass::reference::device::BlockCompareEqual(block_ref_D.get() + offset_D.at(i), block_D.get() + offset_D.at(i), options.m * options.n);
}
return passed;
}
/// Execute a given example GEMM computation
template <typename Gemm>
int run(Options &options)
{
allocate(options);
initialize(options);
// Instantiate CUTLASS kernel depending on templates
Gemm gemm;
// Create a structure of gemm kernel arguments suitable for invoking an instance of Gemm
auto arguments = args_from_options(options);
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size = Gemm::get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
// Check if the problem size is supported or not
CUTLASS_CHECK(gemm.can_implement(arguments));
// Initialize CUTLASS kernel with arguments and workspace pointer
CUTLASS_CHECK(gemm.initialize(arguments, workspace.get()));
// Correctness / Warmup iteration
CUTLASS_CHECK(gemm.run());
// Check if output from CUTLASS kernel and reference kernel are equal or not
Result result;
result.passed = verify(options);
std::cout << " Disposition: " << (result.passed ? "Passed" : "Failed") << std::endl;
if (!result.passed) {
exit(-1);
}
// Run profiling loop
if (options.iterations > 0)
{
GpuTimer timer;
timer.start();
for (int iter = 0; iter < options.iterations; ++iter) {
CUTLASS_CHECK(gemm.initialize(arguments, workspace.get()));
CUTLASS_CHECK(gemm.run());
}
timer.stop();
// Compute average setup and runtime and GFLOPs.
float elapsed_ms = timer.elapsed_millis();
result.avg_runtime_ms = double(elapsed_ms) / double(options.iterations);
result.gflops = options.gflops(result.avg_runtime_ms / 1000.0);
std::cout << " Problem Size: " << options.m << 'x' << options.n << 'x' << options.k << std::endl;
std::cout << " Batches : " << options.l << std::endl;
std::cout << " Alpha, Beta : " << options.alpha << ',' << options.beta << std::endl;
std::cout << " Avg runtime : " << result.avg_runtime_ms << " ms" << std::endl;
std::cout << " GFLOPS : " << result.gflops << std::endl;
}
return 0;
}
#endif // defined(CUTLASS_ARCH_MMA_MODIFIABLE_TMA_SM90_SUPPORTED)
///////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char const **args) {
// CUTLASS must be compiled with CUDA 12.3 Toolkit to run this example
if (__CUDACC_VER_MAJOR__ < 12 || (__CUDACC_VER_MAJOR__ == 12 && __CUDACC_VER_MINOR__ < 3)) {
std::cerr << "This example requires CUDA 12.3 or newer.\n";
// Returning zero so this test passes on older Toolkits. Its actions are no-op.
return 0;
}
cudaDeviceProp props;
int current_device_id;
CUDA_CHECK(cudaGetDevice(¤t_device_id));
CUDA_CHECK(cudaGetDeviceProperties(&props, current_device_id));
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (props.major < 9) {
std::cerr
<< "This example requires a GPU of NVIDIA's Hopper Architecture or "
<< "later (compute capability 90 or greater).\n";
return 0;
}
//
// Parse options
//
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
//
// Evaluate CUTLASS kernels
//
#if defined(CUTLASS_ARCH_MMA_MODIFIABLE_TMA_SM90_SUPPORTED)
run<Gemm>(options);
#endif
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
|
cutlass/examples/56_hopper_ptr_array_batched_gemm/56_hopper_ptr_array_batched_gemm.cu/0
|
{
"file_path": "cutlass/examples/56_hopper_ptr_array_batched_gemm/56_hopper_ptr_array_batched_gemm.cu",
"repo_id": "cutlass",
"token_count": 7159
}
| 18 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/container/alignment.hpp>
#include <cute/tensor.hpp>
#include <cute/tensor_predicate.hpp>
#include <cute/atom/copy_atom.hpp>
namespace cute
{
//
// Accept mutable temporaries
//
template <class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE
void
copy(Tensor<SrcEngine, SrcLayout> const& src,
Tensor<DstEngine, DstLayout> && dst)
{
return copy(src, dst);
}
template <class VecType,
class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE
void
copy_vec(Tensor<SrcEngine, SrcLayout> const& src,
Tensor<DstEngine, DstLayout> && dst)
{
return copy_vec<VecType>(src, dst);
}
template <class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE
void
copy_aligned(Tensor<SrcEngine, SrcLayout> const& src,
Tensor<DstEngine, DstLayout> && dst)
{
return copy_aligned(src, dst);
}
template <class PrdTensor,
class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE
void
copy_if(PrdTensor const& pred,
Tensor<SrcEngine, SrcLayout> const& src,
Tensor<DstEngine, DstLayout> && dst)
{
return copy_if(pred, src, dst);
}
template <class CopyPolicy,
class PrdTensor,
class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE
void
copy_if(CopyPolicy const& copy_policy,
PrdTensor const& pred,
Tensor<SrcEngine, SrcLayout> const& src,
Tensor<DstEngine, DstLayout> && dst)
{
return copy_if(copy_policy, pred, src, dst);
}
template <class CopyPolicy,
class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE
void
copy(CopyPolicy const& copy_policy,
Tensor<SrcEngine, SrcLayout> const& src,
Tensor<DstEngine, DstLayout> && dst)
{
return copy(copy_policy, src, dst);
}
//
// copy_if -- Predicated Copy
//
template <class PrdTensor,
class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE
void
copy_if(PrdTensor const& pred,
Tensor<SrcEngine, SrcLayout> const& src,
Tensor<DstEngine, DstLayout> & dst)
{
auto copy_op = select_elementwise_copy(src, dst);
CUTE_UNROLL
for (int i = 0; i < size(src); ++i) {
if (pred(i)) {
copy_op.copy(src(i), dst(i));
}
}
}
//
// copy_if -- Predicated CopyAtom
//
namespace detail {
// Trait that detects if atom's traits has a member function with(bool)
template <class, class Enable = void>
constexpr bool has_with_bool = false;
template <class T>
constexpr bool has_with_bool<T, cute::void_t<decltype(declval<typename T::Traits>().with(declval<bool>()))>> = true;
} // end namespace detail
template <class... CopyArgs,
class PredTensor,
class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE
void
copy_if(Copy_Atom<CopyArgs...> const& copy_atom,
PredTensor const& pred, // (Rest...)
Tensor<SrcEngine, SrcLayout> const& src, // (V,Rest...)
Tensor<DstEngine, DstLayout> & dst) // (V,Rest...)
{
static_assert(SrcLayout::rank == DstLayout::rank, "CopyAtom rank-mismatch.");
if constexpr (SrcLayout::rank == 1) { // Dispatch the copy
copy_atom.call(src, dst);
} else { // Loop over all but the first mode
constexpr int R = SrcLayout::rank;
Tensor src_v = group_modes<1,R>(src);
Tensor dst_v = group_modes<1,R>(dst);
CUTE_UNROLL
for (int i = 0; i < size<1>(src_v); ++i) {
// If copy traits can be transformed with a predicate value, do it, otherwise branch here
if constexpr (detail::has_with_bool<Copy_Atom<CopyArgs...>>) {
copy_atom.with(pred(i)).call(src_v(_,i), dst_v(_,i));
} else {
if (pred(i)) {
copy_atom.call(src_v(_,i), dst_v(_,i));
}
}
}
}
}
//
// copy_vec -- attempt vectorized copy with VecType
//
template <class VecType,
class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE
void
copy_vec(Tensor<SrcEngine, SrcLayout> const& src,
Tensor<DstEngine, DstLayout> & dst)
{
static_assert(sizeof_bits_v<VecType> >= 8 && sizeof_bits_v<VecType> % 8 == 0,
"Expected a vectorization type of at least a byte.");
using SrcType = typename SrcEngine::element_type;
using DstType = typename DstEngine::element_type;
if constexpr (sizeof_bits_v<SrcType> == sizeof_bits_v<DstType> &&
sizeof_bits_v<VecType> > sizeof_bits_v<DstType>)
{
// Preserve volatility of Src/Dst types.
using SrcVecType = conditional_t<is_volatile_v<SrcType>, VecType const volatile, VecType const>;
using DstVecType = conditional_t<is_volatile_v<DstType>, VecType volatile, VecType >;
Tensor src_v = recast<SrcVecType>(src);
Tensor dst_v = recast<DstVecType>(dst);
#if 0
if (thread0()) {
print("copy_vec<%db> -- vectorizing copy:\n", int(sizeof_bits_v<VecType>));
print(" "); print(src); print(" => "); print(src_v); print("\n");
print(" "); print(dst); print(" => "); print(dst_v); print("\n");
}
#endif
return copy_if(TrivialPredTensor{}, src_v, dst_v);
} else {
#if 0
if (thread0()) {
print("copy_vec<%db> -- NOT vectorizing copy:\n", int(sizeof_bits_v<VecType>));
print(" "); print(src); print("\n");
print(" "); print(dst); print("\n");
}
#endif
return copy_if(TrivialPredTensor{}, src, dst);
}
}
//
// copy -- CopyAtom
//
template <class... CopyArgs,
class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE
void
copy(Copy_Atom<CopyArgs...> const& copy_atom,
Tensor<SrcEngine, SrcLayout> const& src,
Tensor<DstEngine, DstLayout> & dst)
{
return copy_if(copy_atom, TrivialPredTensor{}, src, dst);
}
//////////////////////////////////////////
// Special Auto-Vectorizing Overloads
//////////////////////////////////////////
// Specialization for AutoVectorizingCopyAssumedAlignment<MaxVecBits>
template <int MaxVecBits, class... Args,
class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE
void
copy(AutoVectorizingCopyWithAssumedAlignment<MaxVecBits> const&,
Tensor<SrcEngine, SrcLayout> const& src,
Tensor<DstEngine, DstLayout> & dst)
{
constexpr int vec_elem = decltype(max_common_vector(src, dst))::value;
constexpr int src_bits = sizeof_bits<typename SrcEngine::value_type>::value;
// When layouts are static, accept vec_bits up to 128
// When layouts are dynamic, accept vec_bits up to MaxVecBits
constexpr int vec_bits = (is_static<SrcLayout>::value && is_static<DstLayout>::value) ?
cute::min(vec_elem * src_bits, 128) :
cute::min(vec_elem * src_bits, MaxVecBits);
#if 0
if (thread0()) {
print("copy -- found max_common_vector of %d elems and vectorization to %d bits\n", vec_elem, vec_bits);
print(" "); print(src); print("\n");
print(" "); print(dst); print("\n");
}
#endif
if constexpr (vec_elem > 1 && vec_bits >= 8) {
return copy_vec<uint_bit_t<vec_bits>>(src, dst);
} else {
return copy_if(TrivialPredTensor{}, src, dst);
}
}
// Auto-vectorizing copy for static layouts
template <class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE
void
copy(Tensor<SrcEngine, SrcLayout> const& src,
Tensor<DstEngine, DstLayout> & dst)
{
return copy(AutoVectorizingCopy{}, src, dst);
}
// Auto-vectorizing copy with assumed alignment of dynamic layout strides up to 128bit.
template <class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE
void
copy_aligned(Tensor<SrcEngine, SrcLayout> const& src,
Tensor<DstEngine, DstLayout> & dst)
{
return copy(AutoVectorizingCopyWithAssumedAlignment<128>{}, src, dst);
}
// Specializaton for Atom AutoVectorizingCopy
template <class... Args,
class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE
void
copy(Copy_Atom<AutoVectorizingCopy, Args...> const&,
Tensor<SrcEngine, SrcLayout> const& src,
Tensor<DstEngine, DstLayout> & dst)
{
return copy(AutoVectorizingCopy{}, src, dst);
}
// Specializaton for Atom AutoVectorizingCopyAssumedAlignment
template <int MaxVecBits, class... Args,
class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE
void
copy(Copy_Atom<AutoVectorizingCopyWithAssumedAlignment<MaxVecBits>, Args...> const&,
Tensor<SrcEngine, SrcLayout> const& src,
Tensor<DstEngine, DstLayout> & dst)
{
return copy(AutoVectorizingCopyWithAssumedAlignment<MaxVecBits>{}, src, dst);
}
#if defined(CUTE_COPY_ATOM_TMA_SM90_ENABLED)
template <class... CT_Args,
class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE
void
copy(Copy_Traits<SM90_BULK_COPY_AUTO, CT_Args...> const& atom, // Copy_Traits may or may not have the memory barrier in it already
Tensor<SrcEngine, SrcLayout> const& src,
Tensor<DstEngine, DstLayout> & dst)
{
using SrcType = typename SrcEngine::value_type;
using DstType = typename DstEngine::value_type;
static_assert(sizeof_bits<SrcType>::value == sizeof_bits<DstType>::value);
static_assert((is_gmem<SrcEngine>::value && is_smem<DstEngine>::value) ||
(is_smem<SrcEngine>::value && is_gmem<DstEngine>::value),
"Bulk Copy only supports gmem -> smem or smem -> gmem movement.");
// G2S or S2G dispatch
using BULK_COPY_OP = conditional_t<is_gmem<SrcEngine>::value,
SM90_BULK_COPY_G2S,
SM90_BULK_COPY_S2G>;
// Find the common subtensor of src and dst
auto tiler = max_common_layout(src, dst);
constexpr int vec_elem = decltype(size(tiler))::value;
constexpr int vec_bits = vec_elem * sizeof_bits_v<SrcType>;
static_assert(vec_bits >= 128, "Expected at least 128-bits for BLKCP");
// Construct a new concrete Atom of the vector size
using BulkAtom = Copy_Atom<Copy_Traits<BULK_COPY_OP, Int<vec_bits>, CT_Args...>, SrcType>;
auto bulk_atom = apply(atom.opargs_, [](auto const&... args) { return BulkAtom{args...}; });
#if 0
if (thread0()) {
print("copy blkcp -- found a max_common_layout of "); print(tiler); print("\n");
print(" "); print(src); print("\n");
print(" "); print(dst); print("\n");
}
#endif
return copy(bulk_atom, logical_divide(src, tiler), logical_divide(dst, tiler));
}
// Backwards-compat. Throw out any extra Copy_Atom args.
template <class... CT_Args, class... CA_Args,
class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE
void
copy(Copy_Atom<Copy_Traits<SM90_BULK_COPY_AUTO, CT_Args...>, CA_Args...> const& atom,
Tensor<SrcEngine, SrcLayout> const& src,
Tensor<DstEngine, DstLayout> & dst)
{
return copy(static_cast<Copy_Traits<SM90_BULK_COPY_AUTO, CT_Args...> const&>(atom), src, dst);
}
#endif // #if defined(CUTE_COPY_ATOM_TMA_SM90_ENABLED)
} // end namespace cute
|
cutlass/include/cute/algorithm/copy.hpp/0
|
{
"file_path": "cutlass/include/cute/algorithm/copy.hpp",
"repo_id": "cutlass",
"token_count": 5672
}
| 19 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#if !defined(__CUDACC_RTC__)
#include <cuda.h>
#endif
#include <cute/atom/copy_traits_sm90_tma_swizzle.hpp>
#include <cute/atom/copy_traits.hpp>
#include <cute/atom/copy_atom.hpp>
#include <cute/algorithm/prefetch.hpp>
#include <cute/numeric/integral_ratio.hpp>
namespace cute
{
template <class GmemTmaBasisStrides_, class TmaGmemBasis_, class TmaSwizzle_>
struct AuxTmaParams {
using GmemStrides = GmemTmaBasisStrides_; // Strides for Gmem mode -> Tma coord mode, may be dynamic
GmemStrides g_stride_;
using TmaGmemBasis = TmaGmemBasis_; // Layout for Tma box shape -> Gmem mode(s), always static
static_assert(is_static<TmaGmemBasis>::value);
using TmaSwizzle = TmaSwizzle_; // Tma swizzle, always Swizzle<B,M,S>
static_assert(is_static<TmaSwizzle>::value);
};
// Utility for unpacking TMA_LOAD arguments into a CopyOp
template <class CopyOp>
struct TMA_LOAD_Unpack
{
template <class... Args,
class TS, class SLayout,
class TD, class DLayout>
CUTE_HOST_DEVICE friend constexpr void
copy_unpack(Copy_Traits<CopyOp, Args...> const& traits,
Tensor<TS,SLayout> const& src,
Tensor<TD,DLayout> & dst)
{
auto src_coord = src.data().coord_;
if constexpr (detail::is_prefetch<CopyOp>) {
return detail::explode_tuple(detail::CallCOPY<CopyOp>{},
traits.opargs_, tuple_seq<decltype(traits.opargs_)>{},
src_coord, tuple_seq<decltype(src_coord)>{});
} else {
static_assert(is_smem<TD>::value, "SM90_TMA_LOAD requires the destination be shared memory.");
void* dst_ptr = cute::raw_pointer_cast(dst.data());
#if 0
auto [c0,c1,c2,c3,c4] = append<5>(src_coord, 0);
printf("THR (%d,%d,%d) BLK (%d,%d,%d) TMACRD (%d,%d,%d,%d,%d) SMEMADDR (%p)\n",
threadIdx.x, threadIdx.y, threadIdx.z,
blockIdx.x, blockIdx.y, blockIdx.z,
int32_t(c0), int32_t(c1), int32_t(c2), int32_t(c3), int32_t(c4), dst_ptr);
#endif
return detail::explode_tuple(detail::CallCOPY<CopyOp>{},
traits.opargs_, tuple_seq<decltype(traits.opargs_)>{},
make_tuple(dst_ptr), seq<0>{},
src_coord, tuple_seq<decltype(src_coord)>{});
}
}
};
//////////////////////////////////////////////////////////////////////////////
///////////////////////////// TMA_LOAD ///////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
struct SM90_TMA_LOAD_OP : SM90_TMA_LOAD {};
// The non-executable SM90_TMA_LOAD with tma_desc and no tma_mbar
// Use .with(tma_mbar) to construct an executable version
template <class NumBitsPerTMA, class AuxParams_>
struct Copy_Traits<SM90_TMA_LOAD, NumBitsPerTMA, AuxParams_>
{
using ThrID = Layout<_1>;
// Map from (src-thr,src-val) to bit
using SrcLayout = Layout<Shape<_1,NumBitsPerTMA>>;
// Map from (dst-thr,dst-val) to bit
using DstLayout = Layout<Shape<_1,NumBitsPerTMA>>;
// Reference map from (thr,val) to bit
using RefLayout = SrcLayout;
// SM90_TMA_LOAD arguments
TmaDescriptor tma_desc_;
using AuxParams = AuxParams_;
AuxParams aux_params_;
// Return TmaDescriptor/TensorMap
CUTE_HOST_DEVICE constexpr
TmaDescriptor const*
get_tma_descriptor() const {
return &tma_desc_;
}
// Construct an executable SM90_TMA_LOAD with tma_mbar
CUTE_HOST_DEVICE constexpr
Copy_Traits<SM90_TMA_LOAD_OP, NumBitsPerTMA>
with(uint64_t& tma_mbar, [[maybe_unused]] uint16_t const& multicast_mask = 0) const {
// We accept multicast_mask here to keep the API for both atoms consistent
return {{}, {&tma_desc_, &tma_mbar}};
}
// Construct an executable SM90_TMA_LOAD with tma_mbar (temp. overloaded for grouped gemm/ptr array gemm)
CUTE_HOST_DEVICE constexpr
Copy_Traits<SM90_TMA_LOAD_OP, NumBitsPerTMA>
with(TmaDescriptor const* new_tma_desc, uint64_t& tma_mbar, [[maybe_unused]] uint16_t const& multicast_mask = 0) const {
// We accept multicast_mask here to keep the API for both atoms consistent
return {{}, {new_tma_desc, &tma_mbar}};
}
// Generate the TMA coord tensor
template <class GShape>
CUTE_HOST_DEVICE constexpr
auto
get_tma_tensor(GShape const& g_shape) const {
static_assert(is_congruent<decltype(g_shape), decltype(aux_params_.g_stride_)>::value);
return make_counting_tensor(make_layout(g_shape, aux_params_.g_stride_));
}
// Don't try to execute a copy with SM90_TMA_LOAD before calling .with()
template <class TS, class SLayout,
class TD, class DLayout>
CUTE_HOST_DEVICE friend constexpr void
copy_unpack(Copy_Traits const& traits,
Tensor<TS,SLayout> const& src,
Tensor<TD,DLayout> & dst) = delete;
};
// The executable SM90_TMA_LOAD with tma_desc and tma_mbar
template <class NumBitsPerTMA>
struct Copy_Traits<SM90_TMA_LOAD_OP, NumBitsPerTMA>
: TMA_LOAD_Unpack<SM90_TMA_LOAD_OP>
{
using ThrID = Layout<_1>;
// Map from (src-thr,src-val) to bit
using SrcLayout = Layout<Shape<_1,NumBitsPerTMA>>;
// Map from (dst-thr,dst-val) to bit
using DstLayout = Layout<Shape<_1,NumBitsPerTMA>>;
// Reference map from (thr,val) to bit
using RefLayout = SrcLayout;
// SM90_TMA_LOAD arguments
tuple<
TmaDescriptor const*,
uint64_t* // smem mbarrier
> const opargs_;
};
// The prefetch for SM90_TMA_LOAD with tma_desc
template <class NumBitsPerTMA, class... Args>
struct Copy_Traits<SM90_TMA_LOAD::PREFETCH, NumBitsPerTMA, Args...>
: TMA_LOAD_Unpack<SM90_TMA_LOAD::PREFETCH>
{
using ThrID = Layout<_1>;
// Map from (src-thr,src-val) to bit
using SrcLayout = Layout<Shape<_1,NumBitsPerTMA>>;
// Map from (dst-thr,dst-val) to bit
using DstLayout = Layout<Shape<_1,NumBitsPerTMA>>;
// Reference map from (thr,val) to bit
using RefLayout = SrcLayout;
// SM90_TMA_LOAD::PREFETCH arguments
tuple<TmaDescriptor const*> const opargs_;
// Construct with any other Traits' TMA Desc
template <class... CopyArgs>
CUTE_HOST_DEVICE
Copy_Traits(Copy_Traits<CopyArgs...> const& traits)
: opargs_({&traits.tma_desc_}) {}
};
//////////////////////////////////////////////////////////////////////////////
///////////////////////////// TMA_LOAD_MULTICAST /////////////////////////////
//////////////////////////////////////////////////////////////////////////////
struct SM90_TMA_LOAD_MULTICAST_OP : SM90_TMA_LOAD_MULTICAST {};
// The non-executable SM90_TMA_LOAD_MULTICAST with tma_desc and no tma_mbar
// Use .with(tma_mbar, multicast_mask) to construct an executable version
template <class NumBitsPerTMA, class AuxParams_>
struct Copy_Traits<SM90_TMA_LOAD_MULTICAST, NumBitsPerTMA, AuxParams_>
{
using ThrID = Layout<_1>;
// Map from (src-thr,src-val) to bit
using SrcLayout = Layout<Shape<_1,NumBitsPerTMA>>;
// Map from (dst-thr,dst-val) to bit
using DstLayout = Layout<Shape<_1,NumBitsPerTMA>>;
// Reference map from (thr,val) to bit
using RefLayout = SrcLayout;
// SM90_TMA_LOAD_MULTICAST arguments
TmaDescriptor tma_desc_;
using AuxParams = AuxParams_;
AuxParams aux_params_;
// Return TmaDescriptor/TensorMap
CUTE_HOST_DEVICE constexpr
TmaDescriptor const*
get_tma_descriptor() const {
return &tma_desc_;
}
// Construct an executable SM90_TMA_LOAD_MULTICAST with tma_mbar
CUTE_HOST_DEVICE constexpr
Copy_Traits<SM90_TMA_LOAD_MULTICAST_OP, NumBitsPerTMA>
with(uint64_t& tma_load_mbar, uint16_t const& multicast_mask) const {
return {{}, {&tma_desc_, &tma_load_mbar, multicast_mask}};
}
// Construct an executable SM90_TMA_LOAD_MULTICAST_OP with tma_mbar (temp. overloaded for grouped gemm/ptr array gemm)
CUTE_HOST_DEVICE constexpr
Copy_Traits<SM90_TMA_LOAD_MULTICAST_OP, NumBitsPerTMA>
with(TmaDescriptor const* new_tma_desc, uint64_t& tma_load_mbar, uint16_t const& multicast_mask) const {
return {{}, {new_tma_desc, &tma_load_mbar, multicast_mask}};
}
// Generate the TMA coord tensor
template <class GShape>
CUTE_HOST_DEVICE constexpr
auto
get_tma_tensor(GShape const& g_shape) const {
static_assert(is_congruent<decltype(g_shape), decltype(aux_params_.g_stride_)>::value);
return make_counting_tensor(make_layout(g_shape, aux_params_.g_stride_));
}
// Don't try to execute a copy with SM90_TMA_LOAD_MULTICAST before calling .with()
template <class TS, class SLayout,
class TD, class DLayout>
CUTE_HOST_DEVICE friend constexpr void
copy_unpack(Copy_Traits const& traits,
Tensor<TS,SLayout> const& src,
Tensor<TD,DLayout> & dst) = delete;
};
// The executable SM90_TMA_LOAD_MULTICAST with tma_desc and tma_mbar and multicast_mask
template <class NumBitsPerTMA>
struct Copy_Traits<SM90_TMA_LOAD_MULTICAST_OP, NumBitsPerTMA>
: TMA_LOAD_Unpack<SM90_TMA_LOAD_MULTICAST_OP>
{
using ThrID = Layout<_1>;
// Map from (src-thr,src-val) to bit
using SrcLayout = Layout<Shape<_1,NumBitsPerTMA>>;
// Map from (dst-thr,dst-val) to bit
using DstLayout = Layout<Shape<_1,NumBitsPerTMA>>;
// Reference map from (thr,val) to bit
using RefLayout = SrcLayout;
// SM90_TMA_LOAD_MULTICAST arguments
tuple<
TmaDescriptor const*,
uint64_t*, // smem mbarrier
uint16_t // multicast mask
> const opargs_;
};
//////////////////////////////////////////////////////////////////////////////
///////////////////////////// TMA_STORE //////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
// The executable SM90_TMA_STORE with tma_desc
template <class NumBitsPerTMA, class AuxParams_>
struct Copy_Traits<SM90_TMA_STORE, NumBitsPerTMA, AuxParams_>
{
using ThrID = Layout<_1>;
// Map from (src-thr,src-val) to bit
using SrcLayout = Layout<Shape<_1,NumBitsPerTMA>>;
// Map from (dst-thr,dst-val) to bit
using DstLayout = Layout<Shape<_1,NumBitsPerTMA>>;
// Reference map from (thr,val) to bit
using RefLayout = SrcLayout;
// SM90_TMA_STORE arguments
TmaDescriptor tma_desc_;
using AuxParams = AuxParams_;
AuxParams aux_params_;
// Return TmaDescriptor/TensorMap
CUTE_HOST_DEVICE constexpr
TmaDescriptor const*
get_tma_descriptor() const {
return &tma_desc_;
}
// Generate the TMA coord tensor
template <class GShape>
CUTE_HOST_DEVICE constexpr
auto
get_tma_tensor(GShape const& g_shape) const {
static_assert(is_congruent<decltype(g_shape), decltype(aux_params_.g_stride_)>::value);
return make_counting_tensor(make_layout(g_shape, aux_params_.g_stride_));
}
template <class TS, class SLayout,
class TD, class DLayout>
CUTE_HOST_DEVICE friend constexpr void
copy_unpack(Copy_Traits const& traits,
Tensor<TS,SLayout> const& src,
Tensor<TD,DLayout> & dst)
{
static_assert(is_smem<TS>::value, "Expected smem src for SM90_TMA_STORE");
//static_assert(is_gmem<TD>::value, "Expected gmem dst for SM90_TMA_STORE"); // TMA spoofed src tensor
void const* const desc_ptr = &(traits.tma_desc_);
void const* const src_ptr = cute::raw_pointer_cast(src.data());
auto dst_coord = dst.data().coord_;
#if 0
auto [c0,c1,c2,c3,c4] = append<5>(dst_coord, 0);
printf("THR (%d,%d,%d) BLK (%d,%d,%d) TMACRD (%d,%d,%d,%d,%d) SMEMADDR (%p)\n",
threadIdx.x, threadIdx.y, threadIdx.z,
blockIdx.x, blockIdx.y, blockIdx.z,
int32_t(c0), int32_t(c1), int32_t(c2), int32_t(c3), int32_t(c4), src_ptr);
#endif
return detail::explode_tuple(detail::CallCOPY<SM90_TMA_STORE>{},
make_tuple(desc_ptr, src_ptr), seq<0,1>{},
dst_coord, tuple_seq<decltype(dst_coord)>{});
}
};
//////////////////////////////////////////////////////////////////////////////
///////////////////////////// TMA_REDUCE_ADD //////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
// The executable SM90_TMA_REDUCE_ADD with tma_desc
template <class NumBitsPerTMA, class AuxParams_>
struct Copy_Traits<SM90_TMA_REDUCE_ADD, NumBitsPerTMA, AuxParams_>
{
using ThrID = Layout<_1>;
// Map from (src-thr,src-val) to bit
using SrcLayout = Layout<Shape<_1,NumBitsPerTMA>>;
// Map from (dst-thr,dst-val) to bit
using DstLayout = Layout<Shape<_1,NumBitsPerTMA>>;
// Reference map from (thr,val) to bit
using RefLayout = SrcLayout;
// SM90_TMA_REDUCE_ADD arguments
TmaDescriptor tma_desc_;
using AuxParams = AuxParams_;
AuxParams aux_params_;
// Return TmaDescriptor/TensorMap
CUTE_HOST_DEVICE constexpr
TmaDescriptor const*
get_tma_descriptor() const {
return &tma_desc_;
}
// Generate the TMA coord tensor
template <class GShape>
CUTE_HOST_DEVICE constexpr
auto
get_tma_tensor(GShape const& g_shape) const {
static_assert(is_congruent<decltype(g_shape), decltype(aux_params_.g_stride_)>::value);
return make_counting_tensor(make_layout(g_shape, aux_params_.g_stride_));
}
template <class Coord, int... Is>
CUTE_HOST_DEVICE constexpr
void
copy_unpack_(void const* const src_ptr,
Coord const& dst_coord, seq<Is...>) const
{
#if 0
auto [c0,c1,c2,c3,c4] = append<5>(dst_coord, 0);
printf("THR (%d,%d,%d) BLK (%d,%d,%d) TMACRD (%d,%d,%d,%d,%d) SMEMADDR (%p)\n",
threadIdx.x, threadIdx.y, threadIdx.z,
blockIdx.x, blockIdx.y, blockIdx.z,
int32_t(c0), int32_t(c1), int32_t(c2), int32_t(c3), int32_t(c4), src_ptr);
#endif
SM90_TMA_REDUCE_ADD::copy(&tma_desc_,
src_ptr, get<Is>(dst_coord)...);
}
// This is the copy_unpack dispatch for this Copy_Traits
// Src needs to be a smem tensor
// Dst needs to be a gmem tensor with TmaCoordIterator .data()
template <class TS, class SLayout,
class TD, class DLayout>
CUTE_HOST_DEVICE friend constexpr
void
copy_unpack(Copy_Traits const& traits,
Tensor<TS,SLayout> const& src,
Tensor<TD,DLayout> & dst)
{
static_assert(is_smem<TS>::value, "Expected smem src for SM90_TMA_REDUCE_ADD");
//static_assert(is_gmem<TD>::value, "Expected gmem dst for SM90_TMA_REDUCE_ADD"); // TMA spoofed src tensor
traits.copy_unpack_(cute::raw_pointer_cast(src.data()), dst.data().coord_, tuple_seq<decltype(dst.data().coord_)>{});
}
};
//////////////////////////////////////////////////////////////////////////////
///////////////////////////// BULK COPY //////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
template <class NumBitsPerTMA, class... OpArgs>
struct Copy_Traits<SM90_BULK_COPY_G2S, NumBitsPerTMA, OpArgs...>
{
static_assert(int32_t(NumBitsPerTMA::value / 8) % 16 == 0,
"Bulk Copy requires copy vector size align to 16B.");
using ThrID = Layout<_1>;
// Map from (src-thr,src-val) to bit
using SrcLayout = Layout<Shape<_1,NumBitsPerTMA>>;
// Map from (dst-thr,dst-val) to bit
using DstLayout = Layout<Shape<_1,NumBitsPerTMA>>;
// Reference map from (thr,val) to bit
using RefLayout = SrcLayout;
// SM90_BULK_COPY_G2S arguments
// 0: uint64_t* bulk_load_memory_barrier
cute::tuple<OpArgs...> bulk_load_mbar_;
// Record the memory barrier for the instruction
CUTE_HOST_DEVICE constexpr
Copy_Traits<SM90_BULK_COPY_G2S, NumBitsPerTMA, uint64_t*>
with(uint64_t& bulk_mbar) const {
return {{&bulk_mbar}};
}
template <class TS, class SLayout,
class TD, class DLayout>
CUTE_HOST_DEVICE friend constexpr
void
copy_unpack(Copy_Traits const& traits,
Tensor<TS,SLayout> const& src,
Tensor<TD,DLayout> & dst)
{
static_assert(is_same<cute::tuple<OpArgs...>, cute::tuple<uint64_t*>>::value,
"Extra arguments not set. Set .with() before use.");
static_assert(is_gmem<TS>::value, "Expected gmem src for SM90_BULK_COPY_G2S");
static_assert(is_smem<TD>::value, "Expected smem dst for SM90_BULK_COPY_G2S");
SM90_BULK_COPY_G2S::copy(raw_pointer_cast(src.data()), get<0>(traits.bulk_load_mbar_),
raw_pointer_cast(dst.data()), int32_t(NumBitsPerTMA::value / 8));
}
};
template <class NumBitsPerTMA, class... Args>
struct Copy_Traits<SM90_BULK_COPY_G2S::PREFETCH, NumBitsPerTMA, Args...>
: Copy_Traits<SM90_BULK_COPY_G2S, NumBitsPerTMA>
{
template <class... CopyArgs>
CUTE_HOST_DEVICE
Copy_Traits(Copy_Traits<CopyArgs...> const& traits) {}
template <class TS, class SLayout,
class TD, class DLayout>
CUTE_HOST_DEVICE friend constexpr
void
copy_unpack(Copy_Traits const& traits,
Tensor<TS,SLayout> const& src,
Tensor<TD,DLayout> & dst)
{
static_assert(is_gmem<TS>::value, "Expected gmem src for SM90_BULK_PREFETCH");
SM90_BULK_COPY_G2S::PREFETCH::copy(raw_pointer_cast(src.data()), int32_t(NumBitsPerTMA::value / 8));
}
};
template <class NumBitsPerTMA>
struct Copy_Traits<SM90_BULK_COPY_S2G, NumBitsPerTMA>
{
static_assert(int32_t(NumBitsPerTMA::value / 8) % 16 == 0,
"Bulk Copy requires copy vector size align to 16B.");
using ThrID = Layout<_1>;
// Map from (src-thr,src-val) to bit
using SrcLayout = Layout<Shape<_1,NumBitsPerTMA>>;
// Map from (dst-thr,dst-val) to bit
using DstLayout = Layout<Shape<_1,NumBitsPerTMA>>;
// Reference map from (thr,val) to bit
using RefLayout = SrcLayout;
template <class TS, class SLayout,
class TD, class DLayout>
CUTE_HOST_DEVICE friend constexpr
void
copy_unpack(Copy_Traits const& traits,
Tensor<TS,SLayout> const& src,
Tensor<TD,DLayout> & dst)
{
static_assert(is_smem<TS>::value, "Expected smem src for SM90_BULK_COPY_S2G");
static_assert(is_gmem<TD>::value, "Expected gmem dst for SM90_BULK_COPY_S2G");
SM90_BULK_COPY_S2G::copy(raw_pointer_cast(src.data()), raw_pointer_cast(dst.data()), int32_t(NumBitsPerTMA::value / 8));
}
};
//
// Placeholder for the bulk copy algorithm's default, auto-vectorizing behavior
//
template <class... OpArgs>
struct Copy_Traits<SM90_BULK_COPY_AUTO, OpArgs...>
{
// Logical thread id to thread idx (one-thread)
using ThrID = Layout<_1>;
// Map from (src-thr,src-val) to bit
using SrcLayout = Layout<Shape<_1,_1>, Stride<_0,_0>>;
// Map from (dst-thr,dst-val) to bit
using DstLayout = Layout<Shape<_1,_1>, Stride<_0,_0>>;
// Reference map from (thr,val) to bit
using RefLayout = SrcLayout;
// SM90_UBULK_COPY arguments
// 0: uint64_t* bulk_load_memory_barrier [if this is a BULK_LOAD_G2S]
cute::tuple<OpArgs...> opargs_;
// Record the memory barrier for the instruction
CUTE_HOST_DEVICE constexpr
Copy_Traits<SM90_BULK_COPY_AUTO, uint64_t*>
with(uint64_t& bulk_mbar) const {
return {{&bulk_mbar}};
}
};
//
// MAKE_TMA_COPY and related
//
namespace detail {
// Custom version of coalesce that greedily combines modes only up to size-256
// Look at each element and the back of the stack (in order of priority)
// back(NewLayout) get<I>(OldLayout)
// s0:d0 _1:d1 => continue
// _1:d0 s1:d1 => replace_back s1:d1
// s0:d0 s1:s0*d0 => replace_back s0*s1:d0 if s0*s1 <= 256
// s0:d0 s1:d1 => append s1:d1
//
// @pre OldShape and OldStride are flat
template <int I, class OldShape, class OldStride, class NewShape, class NewStride>
CUTE_HOST_DEVICE constexpr
auto
coalesce_256_impl(OldShape const& old_shape, OldStride const& old_stride,
NewShape const& new_shape, NewStride const& new_stride)
{
if constexpr (I == rank_v<OldShape>) {
// Base case, we're done
if constexpr (is_constant<1, NewShape>::value) {
return Layout<_1,_0>{};
} else {
return Layout<NewShape,NewStride>{new_shape,new_stride};
}
} else if constexpr (is_constant<1, decltype(get<I>(old_shape))>::value) {
// shape<I>(layout) == _1, skip it and continue
return coalesce_256_impl<I+1>(old_shape, old_stride, new_shape, new_stride);
} else if constexpr (is_constant<1, NewShape>::value) {
// Replace our shape-1 with anything (Can only happen on input new_shape/new_stride)
return coalesce_256_impl<I+1>(old_shape, old_stride, get<I>(old_shape), get<I>(old_stride));
} else if constexpr (is_constant<true, decltype(back(new_shape) * back(new_stride) == get<I>(old_stride) &&
get<I>(old_shape) * back(new_shape) <= Int<256>{})>::value) {
// Merge modes because the shapes and strides match and the merge is 256 or less
return coalesce_256_impl<I+1>(old_shape, old_stride,
replace_back(new_shape, get<I>(old_shape) * back(new_shape)),
new_stride);
} else {
// Can't replace or merge, so append a new mode
return coalesce_256_impl<I+1>(old_shape, old_stride,
append(new_shape, get<I>(old_shape)),
append(new_stride, get<I>(old_stride)));
}
CUTE_GCC_UNREACHABLE;
}
// Combine all the modes that are possible to combine
// Does not respect the profile of the layout, but does preserve total size
template <class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
coalesce_256(Layout<Shape,Stride> const& layout)
{
auto flat_shape = flatten(layout.shape());
auto flat_stride = flatten(layout.stride());
return coalesce_256_impl<1>(flat_shape, flat_stride, get<0>(flat_shape), get<0>(flat_stride));
}
template <class TmaInternalType,
class GEngine, class GLayout,
class SShape, class SStride,
class VShape, class VStride>
CUTE_HOST_DEVICE constexpr
auto
construct_tma_gbasis(Tensor<GEngine,GLayout> const& gtensor, // The original GMEM Tensor
Layout<SShape,SStride> const& slayout, // The layout of SMEM
Layout<VShape,VStride> const& cta_v_map) // smem_idx to hier gmode
{
//
// TMA parameter checking
//
CUTE_STATIC_ASSERT_V(product_each(shape(slayout)) == product_each(shape(cta_v_map)),
"TMA requires CTA_Tile and SLayout top-level shape equivalence.");
#if 0
print("gtensor : "); print(gtensor); print("\n");
print("slayout : "); print(slayout); print("\n");
print("cta_v_map : "); print(cta_v_map); print("\n");
#endif
//
// TMA slayout manipulation
//
// Invert the smem to get the largest contiguous vector in the smem layout
// smem idx -> smem coord
auto inv_smem_layout = right_inverse(get_nonswizzle_portion(slayout));
// Compose with the V-Map to convert smem coord (CTA val idx) to gmem mode
// smem idx -> gmem mode
auto sidx2gmode_full = coalesce(composition(cta_v_map, inv_smem_layout));
#if 0
print("inv_smem_layout : "); print(inv_smem_layout); print("\n");
print("sidx2gmode_full : "); print(sidx2gmode_full); print("\n");
#endif
//
// TMA gtensor truncation
//
// Truncate any incompatibilities -- no starting in the middle of gmodes
auto smem_rank = find_if(stride(sidx2gmode_full), [](auto e) {
[[maybe_unused]] auto v = basis_value(e);
return not is_constant<1,decltype(v)>{};
});
static_assert(smem_rank > 0, "Could not find a common tile-gmem vectorization. Does the Tile select out major GMEM modes?");
// Keep only the static-1 basis modes into gmem
auto sidx2gmode = take<0,smem_rank>(sidx2gmode_full);
#if 0
print("smem_rank : "); print(smem_rank); print("\n");
print("sidx2gmode : "); print(sidx2gmode); print("\n");
#endif
//
// TMA gtensor manipulation
//
// The smem vector is the same units as gtensor, so compose first and then recast
// tma_val_idx:gmem_strides
auto tile_gstride = recast<TmaInternalType>(gtensor.compose(sidx2gmode)).layout();
// Coalesce modes up to size-256 (the maximum TMA box extent in units of TmaInternalType)
// tma_box_shape:gmem_strides
auto tma_gstride = coalesce_256(tile_gstride);
// Perform the tiling, recast, and coalesce to the gmem vector again, but with indirections to the gtensor modes
auto gbasis = make_identity_layout(shape(gtensor));
auto tile_gbasis_tmp = gbasis.compose(sidx2gmode);
// Instead of the recast (gbasis doesn't have type info), replace the shape with the already-recasted shape
// tma_box_shape:gmem_mode
auto tile_gbasis = make_layout(shape(tile_gstride), stride(tile_gbasis_tmp));
// "Coalesce" the tile basis into a compatible shape with the tma_gstride
auto tma_gbasis_tile = tile_gbasis.compose(make_layout(wrap(shape(tma_gstride))));
// Recast the original tensor for shape/stride inspections
Tensor gtensor_T = recast<TmaInternalType>(gtensor);
// Find missing bases that don't appear in tile_gbasis
auto tile_gbasis_remaining_stride = filter_tuple(flatten(shape (gtensor_T)), flatten(stride(gtensor_T)),
flatten(stride(gbasis)),
[&](auto s, auto d, auto e)
{
if constexpr (is_constant<1, decltype(s)>::value || is_constant<0, decltype(d)>::value) {
return cute::tuple<>{}; // If size-1 or stride-0, then don't append
} else {
using E = decltype(e);
auto has_e = any_of(flatten(stride(tma_gbasis_tile)), [] (auto tb) { return tb == E{}; });
if constexpr (decltype(has_e)::value) {
return cute::tuple<>{}; // If d was found, then don't append
} else {
return cute::tuple<E>(e); // Else, this is missing so append
}
}
});
// Append the remaining basis modes that contribute to the TMA with size-1
auto tile_gbasis_remaining_shape = repeat<rank(tile_gbasis_remaining_stride)>(Int<1>{});
auto tma_gbasis_full = make_layout(tuple_cat(wrap( shape(tma_gbasis_tile)), wrap(tile_gbasis_remaining_shape )),
tuple_cat(wrap(stride(tma_gbasis_tile)), wrap(tile_gbasis_remaining_stride)));
// Group the trailing modes to make this max rank-5 -- TMA rank limitation
// tma_box_shape:gmem_mode
auto tma_gbasis = group<cute::min(rank(tma_gbasis_full),4),-1>(tma_gbasis_full);
#if 0
print("tile_gstride : "); print(tile_gstride); print("\n");
print("tma_gstride : "); print(tma_gstride); print("\n");
print("gbasis : "); print(gbasis); print("\n");
print("tile_gbasis : "); print(tma_gbasis_tile); print("\n");
print("tma_gbasis : "); print(tma_gbasis); print("\n");
#endif
return tma_gbasis;
}
template <class GEngine, class GLayout,
class TmaGmemBasisStride,
class ShapeT, size_t TmaRank>
CUTE_HOST_DEVICE constexpr
void
fill_tma_gmem_shape_stride(Tensor<GEngine,GLayout> const& gtensor, // Gmem Shapes and Strides, in units of TmaInternalType
TmaGmemBasisStride const& tma_gbasis_stride, // Map Tma mode idx -> Gmem mode(s)
cute::array<ShapeT, TmaRank> & gmem_prob_shape, // Tma Shapes, uint32_t or uin64_t
cute::array<uint64_t, TmaRank> & gmem_prob_stride) // Tma Strides
{
static_assert(is_tuple<TmaGmemBasisStride>::value);
static_assert(is_same<uint32_t, ShapeT>::value || is_same<uint64_t, ShapeT>::value);
using TmaInternalType = typename GEngine::value_type;
constexpr int tma_rank = decltype(rank(tma_gbasis_stride))::value;
static_assert(TmaRank >= tma_rank);
auto gmem_shape = shape(gtensor);
auto gmem_stride = stride(gtensor);
// Use the indirections in tma_gbasis_stride into gtensor to construct the tma gmem shapes/strides
for_each(make_seq<tma_rank>{}, [&](auto i) {
constexpr int tma_i_rank = decltype(rank<i>(tma_gbasis_stride))::value;
if constexpr (tma_i_rank == 1) {
// Trivial contribution of this gmem mode to this tma mode
auto ej = unwrap(get<i>(tma_gbasis_stride));
gmem_prob_shape[i] = basis_get(ej, gmem_shape);
gmem_prob_stride[i] = basis_get(ej, gmem_stride);
} else {
// Apply a recurrence to each gmem mode that contributes to this tma mode
for_each(get<i>(tma_gbasis_stride), [&](auto ej) {
// Problem shape
uint64_t shape_j = basis_get(ej, gmem_shape);
// Problem stride (in bytes)
uint64_t stride_j = basis_get(ej, gmem_stride);
uint64_t old_stride = gmem_prob_stride[i];
gmem_prob_stride[i] = gcd(gmem_prob_stride[i], stride_j);
if (gmem_prob_stride[i] != 0) {
// Recurrence: g_shape = (s_i - 1) * (d_i / gcd_j d_j) + 1
gmem_prob_shape[i] = (gmem_prob_shape[i]-1) * (old_stride / gmem_prob_stride[i])
+ (shape_j-1) * (stride_j / gmem_prob_stride[i])
+ 1;
} else {
gmem_prob_shape[i] = shape_j;
}
});
}
});
}
// Overload for an existing Copy_Traits
template <class GEngine, class GLayout,
class Op, class Bits, class Aux,
class ShapeT, size_t TmaRank>
CUTE_HOST_DEVICE constexpr
void
fill_tma_gmem_shape_stride(Copy_Traits<Op,Bits,Aux> const& tma_traits,
Tensor<GEngine,GLayout> const& gtensor, // Gmem Shapes and Strides, value_type = TmaInternalType
cute::array<ShapeT, TmaRank> & gmem_prob_shape, // Tma Shapes, uint32_t or uin64_t
cute::array<uint64_t, TmaRank> & gmem_prob_stride) // Tma Strides
{
return fill_tma_gmem_shape_stride(gtensor, stride(typename Aux::TmaGmemBasis{}),
gmem_prob_shape, gmem_prob_stride);
}
// Use a sidx2gmode to read through the GMEM tensor
// and construct a TMA Descriptor for the resulting instruction
// At the same time, construct the Tma Tensor's Stride to generate
// the TMA coordinates that the instruction consumes.
//
template <class TmaInternalType,
class GEngine, class GLayout,
class TShape, class TStride,
int B, int M, int S>
CUTE_HOST_RTC
auto
make_tma_copy_desc(Tensor<GEngine,GLayout> const& gtensor, // The original GMEM Tensor
Layout<TShape,TStride> const& tma_gbasis, // TMA mode -> GMEM mode mapping
Swizzle<B,M,S> const& swizzle, // Swizzle fn on smem_idx
uint32_t num_multicast) // The number of CTAs in multicasting
{
//
// TMA desc creation
//
constexpr int tma_dim = decltype(rank(tma_gbasis))::value;
//
// TMA gmem desc info
//
// Recast the original tensor for shape/stride inspections
Tensor gtensor_T = recast<TmaInternalType>(gtensor);
void* gmem_address = (void*) raw_pointer_cast(gtensor_T.data());
auto gmem_layout = gtensor_T.layout();
cute::array<uint64_t, 5> gmem_prob_shape = {1,1,1,1,1};
cute::array<uint64_t, 5> gmem_prob_stride = {0,0,0,0,0};
fill_tma_gmem_shape_stride(gtensor_T, stride(tma_gbasis), gmem_prob_shape, gmem_prob_stride);
assert((reinterpret_cast<uint64_t>(gmem_address) & 0b1111) == 0); // Address must be 16B-aligned
assert(gmem_prob_shape[0] >= (uint64_t(1))); // Size must be min 1
assert(gmem_prob_shape[0] <= (uint64_t(1) << 32)); // Size must be max 2^32
assert(gmem_prob_shape[1] >= (uint64_t(1))); // Size must be min 1
assert(gmem_prob_shape[1] <= (uint64_t(1) << 32)); // Size must be max 2^32
assert(gmem_prob_shape[2] >= (uint64_t(1))); // Size must be min 1
assert(gmem_prob_shape[2] <= (uint64_t(1) << 32)); // Size must be max 2^32
assert(gmem_prob_shape[3] >= (uint64_t(1))); // Size must be min 1
assert(gmem_prob_shape[3] <= (uint64_t(1) << 32)); // Size must be max 2^32
assert(gmem_prob_shape[4] >= (uint64_t(1))); // Size must be min 1
assert(gmem_prob_shape[4] <= (uint64_t(1) << 32)); // Size must be max 2^32
// TMA descriptor does not store the zeroth stride and assumes it is 1 (TmaInternalType element).
assert(gmem_prob_stride[0] == 1 && "Majorness of smem doesn't match majorness of gmem");
// convert strides to byte strides
for(uint64_t& stride : gmem_prob_stride) {
stride = (stride * sizeof_bits_v<TmaInternalType>) / 8;
}
// Assert the byte strides. Tma Descriptor uses byte strides
assert((gmem_prob_stride[1]) < (uint64_t(1) << 40)); // Stride must be max 2^40
assert((gmem_prob_stride[1] & 0b1111) == 0); // Stride must be multiple of 16B (128b)
assert((gmem_prob_stride[2]) < (uint64_t(1) << 40)); // Stride must be max 2^40
assert((gmem_prob_stride[2] & 0b1111) == 0); // Stride must be multiple of 16B (128b)
assert((gmem_prob_stride[3]) < (uint64_t(1) << 40)); // Stride must be max 2^40
assert((gmem_prob_stride[3] & 0b1111) == 0); // Stride must be multiple of 16B (128b)
assert((gmem_prob_stride[4]) < (uint64_t(1) << 40)); // Stride must be max 2^40
assert((gmem_prob_stride[4] & 0b1111) == 0); // Stride must be multiple of 16B (128b)
//
// TMA smem desc info
//
cute::array<uint32_t, 5> smem_box_shape = {1,1,1,1,1};
cute::array<uint32_t, 5> smem_box_stride = {1,1,1,1,1};
// The smem box is simply given by the sizes of the modes in tma_gbasis
for_each(make_seq<tma_dim>{}, [&](auto i) {
smem_box_shape[i] *= size<i>(tma_gbasis);
});
// Finally, truncate the tma box by the num_multicast
for (uint32_t i = tma_dim-1, multicast = num_multicast; multicast > 1; --i) {
assert(smem_box_shape[i] % multicast == 0 || multicast % smem_box_shape[i] == 0);
uint32_t new_mult = ceil_div(multicast, smem_box_shape[i]);
smem_box_shape[i] = ceil_div(smem_box_shape[i], multicast);
multicast = new_mult;
}
assert(smem_box_shape[0] >= (uint32_t(1))); // Size must be min 1
assert(smem_box_shape[0] <= (uint32_t(1) << 8)); // Size must be max 2^8 = 256
assert(smem_box_shape[1] >= (uint32_t(1))); // Size must be min 1
assert(smem_box_shape[1] <= (uint32_t(1) << 8)); // Size must be max 2^8 = 256
assert(smem_box_shape[2] >= (uint32_t(1))); // Size must be min 1
assert(smem_box_shape[2] <= (uint32_t(1) << 8)); // Size must be max 2^8 = 256
assert(smem_box_shape[3] >= (uint32_t(1))); // Size must be min 1
assert(smem_box_shape[3] <= (uint32_t(1) << 8)); // Size must be max 2^8 = 256
assert(smem_box_shape[4] >= (uint32_t(1))); // Size must be min 1
assert(smem_box_shape[4] <= (uint32_t(1) << 8)); // Size must be max 2^8 = 256
assert(smem_box_stride[0] >= (uint32_t(1))); // Stride must be min 1
assert(smem_box_stride[0] <= (uint32_t(8))); // Stride must be max 2^3 = 8
assert(smem_box_stride[1] >= (uint32_t(1))); // Stride must be min 1
assert(smem_box_stride[1] <= (uint32_t(8))); // Stride must be max 2^3 = 8
assert(smem_box_stride[2] >= (uint32_t(1))); // Stride must be min 1
assert(smem_box_stride[2] <= (uint32_t(8))); // Stride must be max 2^3 = 8
assert(smem_box_stride[3] >= (uint32_t(1))); // Stride must be min 1
assert(smem_box_stride[3] <= (uint32_t(8))); // Stride must be max 2^3 = 8
assert(smem_box_stride[4] >= (uint32_t(1))); // Stride must be min 1
assert(smem_box_stride[4] <= (uint32_t(8))); // Stride must be max 2^3 = 8
//
// Construct the descriptor
//
TmaDescriptor tma_desc{};
//
// TMA general info
//
#if (__CUDACC_VER_MAJOR__ >= 12) && !defined(__CUDACC_RTC__)
CUtensorMapDataType tma_format = TMA::to_CUtensorMapDataType<TmaInternalType>();
CUtensorMapInterleave tma_interleave = CU_TENSOR_MAP_INTERLEAVE_NONE;
CUtensorMapL2promotion tma_l2Promotion = CU_TENSOR_MAP_L2_PROMOTION_L2_128B;
CUtensorMapFloatOOBfill tma_oobFill = CU_TENSOR_MAP_FLOAT_OOB_FILL_NONE;
// TMA smem swizzle type
CUtensorMapSwizzle smem_swizzle = TMA::to_CUtensorMapSwizzle(get_tma_swizzle_bits(swizzle));
CUresult result = cuTensorMapEncodeTiled(
&tma_desc,
tma_format,
tma_dim,
gmem_address,
gmem_prob_shape.data(),
gmem_prob_stride.data() + 1, // gmem_prob_stride[0] implicitly 1
smem_box_shape.data(),
smem_box_stride.data(),
tma_interleave,
smem_swizzle,
tma_l2Promotion,
tma_oobFill);
if (result != CUDA_SUCCESS) {
std::cerr << "TMA Desc Addr: " << &tma_desc
<< "\nformat " << tma_format
<< "\ndim " << tma_dim
<< "\ngmem_address " << gmem_address
<< "\nglobalDim " << gmem_prob_shape
<< "\nglobalStrides " << gmem_prob_stride
<< "\nboxDim " << smem_box_shape
<< "\nelementStrides " << smem_box_stride
<< "\ninterleave " << tma_interleave
<< "\nswizzle " << smem_swizzle
<< "\nl2Promotion " << tma_l2Promotion
<< "\noobFill " << tma_oobFill << std::endl;
std::cerr << "Error: Failed to initialize the TMA descriptor " << result << std::endl;
assert(false);
}
#endif // (__CUDACC_VER_MAJOR__ >= 12) && !defined(__CUDACC_RTC__)
auto recast_ratio = cute::trait_ratio(sizeof_bits<typename GEngine::value_type>{},
sizeof_bits< TmaInternalType>{});
auto gbasis = make_basis_like(shape(gtensor));
// Finally, get the inverse permutation of the E<i> bases for the mocked gmem stride
auto gmem_tma_basis_stride = transform_leaf(gbasis, [&](auto ei) {
auto si = basis_get(ei, shape(gmem_layout));
auto di = basis_get(ei, stride(gmem_layout));
if constexpr (is_constant<1, decltype(si)>::value || is_constant<0, decltype(di)>::value) {
return Int<0>{}; // If size-1 or stride-0, return arithmetic identity -- no contribution to the TMA
} else {
auto tma_gmem_basis_stride = stride(tma_gbasis);
// Find j such that E<i> is in stride<j>(tma_gbasis)
using EI = decltype(ei);
[[maybe_unused]] auto j = find_if(tma_gmem_basis_stride, [&](auto tma_stride_j) { return any_of(tma_stride_j, [&](auto dj) { return dj == EI{}; }); });
if constexpr (decltype(j == rank(tma_gmem_basis_stride))::value) {
return Int<0>{}; // If not-found, return arithmetic identity -- no contribution to the TMA
} else
if constexpr (decltype(j == Int<0>{})::value) {
auto scale = recast_ratio * basis_get(ei, stride(gtensor));
return E<j>{} * scale; // Return TMA Coord basis -- with a recast scale factor
} else
if constexpr (decltype(rank<j>(tma_gmem_basis_stride) == Int<1>{})::value) {
return E<j>{}; // Return TMA Coord basis -- known scale of Int<1>{}
} else {
int32_t scale = ceil_div(int32_t(di * sizeof_bits_v<TmaInternalType> / cute::max(gmem_prob_stride[j], uint64_t{16})), 8);
return E<j>{} * scale; // Return TMA Coord basis -- with a dynamic scale factor
}
}
});
#if 0
print("gmem_tma_basis_stride : "); print(gmem_tma_basis_stride); print("\n");
#endif
using AuxParams = AuxTmaParams<decltype(gmem_tma_basis_stride),
decltype(tma_gbasis),
decltype(swizzle)>;
return cute::make_tuple(tma_desc, AuxParams{gmem_tma_basis_stride});
}
template <class TmaInternalType,
class CopyOp,
class GEngine, class GLayout,
class SLayout,
class VShape, class VStride>
CUTE_HOST_RTC
auto
make_tma_copy_atom(CopyOp,
Tensor<GEngine,GLayout> const& gtensor, // Full GMEM Tensor
SLayout const& slayout, // CTA Tile of SMEM, potentially swizzled
uint32_t const& num_multicast, // The number of CTAs involved in multicasting
Layout<VShape,VStride> const& cta_v_map) // V: CTA val idx -> gmem mode
{
//
// TMA truncated layout
//
auto smem_swizzle = get_swizzle_portion(slayout);
auto smem_layout = get_nonswizzle_portion(slayout);
auto tma_gbasis = detail::construct_tma_gbasis<TmaInternalType>(gtensor, smem_layout, cta_v_map);
//
// Construct the TMA Desc and the strides of the TMA Tensor
//
auto [tma_desc, aux_params] = detail::make_tma_copy_desc<TmaInternalType>(gtensor,
tma_gbasis,
smem_swizzle,
num_multicast);
//
// Construct the Copy_Traits
//
constexpr int num_bits_per_tma = size(tma_gbasis) * sizeof_bits_v<TmaInternalType>;
using Traits = Copy_Traits<CopyOp, cute::C<num_bits_per_tma>, decltype(aux_params)>;
using Atom = Copy_Atom<Traits, typename GEngine::value_type>;
Traits tma_traits{tma_desc, aux_params};
#if 0
print("num_bits_per_tma : "); print(num_bits_per_tma); print("\n");
print("g_stride_bases : "); print(tma_traits.aux_params_.g_stride_); print("\n");
#endif
// Return the Copy_Atom
return Atom{tma_traits};
}
// The "logical TMA tid" is a map from the CTA rank to its logical id
// within the instruction. It works like a mask or ordering on the
// CTAs. For non-multicast TMA, all CTAs should map to 0. For
// multicast TMA of size 4, CTAs will be mapped to {0,1,2,3}.
template <class TmaInternalType,
class CopyOp,
class GEngine, class GLayout,
class SLayout,
class TShape, class TStride,
class VShape, class VStride>
CUTE_HOST_RTC
auto
make_tma_copy_tiled(CopyOp const& copy_op,
Tensor<GEngine,GLayout> const& gtensor, // Full GMEM Tensor
SLayout const& slayout, // CTA Tile of SMEM
Layout<TShape,TStride> const& cta_t_map, // T: CTA thr idx -> logical TMA tid
Layout<VShape,VStride> const& cta_v_map) // V: CTA val idx -> gmem mode
{
Copy_Atom atom = make_tma_copy_atom<TmaInternalType>(copy_op, gtensor, slayout,
cosize(cta_t_map), cta_v_map);
//
// Construct the TiledCopy
//
[[maybe_unused]] auto cta_tiler = product_each(shape(cta_v_map));
auto num_elems_per_tma = size<1>(typename decltype(atom)::RefLayout{}) / static_value<sizeof_bits<typename GEngine::value_type>>();
// smem idx -> smem coord
auto inv_smem_layout = right_inverse(get_nonswizzle_portion(slayout));
// CTA V -> smem_coord
auto layout_v = composition(inv_smem_layout, num_elems_per_tma);
// Scale that up to cover all of the smem_coords
auto layout_V = tile_to_shape(make_layout(layout_v), size(cta_v_map));
// CTA T -> smem idx
auto layout_t = make_layout(cosize(cta_t_map), shape_div(num_elems_per_tma, cosize(cta_t_map)));
// CTA TID -> smem coord
auto layout_T = composition(inv_smem_layout, composition(layout_t, cta_t_map));
// Combine with the T mapping
[[maybe_unused]] auto layout_TV = make_layout(layout_T, layout_V);
#if 0
print("cta_tiler : "); print(cta_tiler); print("\n");
print("layout_v : "); print(layout_v); print("\n");
print("layout_V : "); print(layout_V); print("\n");
print("layout_t : "); print(layout_t); print("\n");
print("layout_T : "); print(layout_T); print("\n");
print("layout_TV : "); print(layout_TV); print("\n");
#endif
return TiledCopy<decltype(atom), decltype(layout_TV), decltype(cta_tiler)>{atom};
}
} // end namespace detail
/** Make a CuTe CTA-collective TiledCopy for a TMA operation.
*
* @param CopyOp The target copy operation: SM90_TMA_LOAD, SM90_TMA_LOAD_MULTICAST, SM90_TMA_STORE
* @param gtensor The GMEM Tensor to be involved in the TMA.
* @param slayout The SMEM Layout to be involved in the TMA.
* @param cta_tile The CTA-local tile that each CTA will be tiling GMEM with.
* This is often the blk_shape that is used to tile the GMEM for CTAs:
* local_tile(gtensor, blk_shape, blk_coord) -> CTA-local tile of gtensor
* @param cluster_size When using SM90_TMA_LOAD_MULTICAST, this can be a (static) power-of-2 <= 16
* defining the multicast size (used to further partition the SMEM)
* Else, static-1
*
* This code attempts to maximize the TMA box size. It does this by tracing
* the SMEM "vector" -- the inverse of the smem layout -- to find the largest
* contiguous array of smem that can be written to/from global memory given
* the constraints that the TMA instruction imposes.
*
* This is accomplished by assigning "basis" strides to the GMEM to track which
* modes of SMEM map to which modes of GMEM, then reorder the modes of GMEM according
* to the SMEM vector, and then using those GMEM/SMEM modes to fill in the desc.
*
* Examples:
using T = float;
T* gptr = nullptr;
{
// Simple 2D
Tensor gtensor = make_tensor(gptr, make_shape(1024, 256), GenRowMajor{}); // K-Major GMEM
auto slayout = make_layout(make_shape(_64{}, _32{}), GenRowMajor{}); // K-Major SMEM
auto tma = make_tma_copy(SM90_TMA_LOAD{}, gtensor, slayout);
}
{
// GMMA 2D
Tensor gtensor = make_tensor(gptr, make_shape(1024, 256)); // MN-Major GMEM
auto slayout = tile_to_shape(GMMA::Layout_MN_SW128_Atom<T>{}, make_shape(_128{},_64{})); // MN-Major Swizzled+Tiled 128x64 SMEM
auto tma = make_tma_copy(SM90_TMA_LOAD{}, gtensor, slayout);
}
{
// 3D
Tensor gtensor = make_tensor(gptr, make_shape(1024, 32, 512), make_stride(64, Int<1>{}, 65536)); // GMEM
auto slayout = make_layout(make_shape(_16{}, _8{}, _2{}), make_stride(_16{}, _1{}, _8{})); // SMEM w/ same major-mode
auto tma = make_tma_copy(SM90_TMA_LOAD{}, gtensor, slayout);
}
{
// cuTENSOR 4D
auto layout = make_shape(make_shape(32,40),make_shape(make_shape(8,8),656)); // GMEM
auto cta_tile = make_shape(_128{},make_shape(_32{},_2{})); // GMEM Tiling:
// Take 128-elem from m: m0 must divide 128,
// m-last may be predicated
// Take 32-elem from k0, 2-elem from k1
auto slayout = make_layout(cta_tile); // Col-Major SMEM
auto tma = make_tma_copy(SM90_TMA_LOAD{}, gtensor, slayout, cta_tile, Int<1>{});
}
*
* Check the TMA box size and desc:
print("TMA Box size: "); print(typename decltype(tma)::Tiler_MN{}); print("\n");
print("TMA desc : "); print(tma.tma_desc_); print("\n");
*
* Usage:
Tensor mA = tma_a.get_tma_tensor(make_shape(M,N)); // (M,N) TMA coord tensor
Tensor gA = local_tile(mA, cta_tile, cta_coord); // (BLK_M,BLK_N) TMA coord tensor for this CTA
Tensor sA = make_tensor(make_smem_ptr<T>(sptr), slayout); // (BLK_M,BLK_N) SMEM tensor
auto cta_tma = tma.get_slice(cta_idx_in_cluster); // Slice for multicast partitioning
Tensor tAgA = cta_tma.partition_S(gA); // Partition for src
Tensor tAsA = cta_tma.partition_D(sA); // Partition for dst
copy(tma.with(barrier, mcast_mask), tAgA, tAsA); // copy with supporting TMA params
*/
template <class TmaInternalType = void,
class CopyOp,
class GEngine, class GLayout,
class SLayout,
class CTA_Tiler,
class Cluster_Size>
CUTE_HOST_RTC
auto
make_tma_copy(CopyOp const& copy_op,
Tensor<GEngine,GLayout> const& gtensor,
SLayout const& slayout,
CTA_Tiler const& cta_tiler,
Cluster_Size const& cluster_size)
{
if constexpr (cute::is_same_v<CopyOp, SM90_TMA_LOAD_IM2COL> ||
cute::is_same_v<CopyOp, SM90_TMA_STORE_IM2COL>) {
return make_im2col_tma_copy(copy_op,
gtensor,
slayout,
cta_tiler,
cluster_size);
} else {
auto cta_v_tile = make_identity_layout(shape(gtensor)).compose(cta_tiler);
auto cta_t_tile = make_layout(cluster_size);
// Prefer TmaInternalType if specified. Fallback to GEngine::value_type
using TmaType = conditional_t<is_same<void, TmaInternalType>::value, typename GEngine::value_type, TmaInternalType>;
return detail::make_tma_copy_tiled<TmaType>(copy_op,
gtensor, slayout,
cta_t_tile, cta_v_tile);
}
}
// Explicit defaulting
template <class CopyOp,
class GEngine, class GLayout,
class SLayout>
CUTE_HOST_RTC
auto
make_tma_copy(CopyOp const& copy_op,
Tensor<GEngine,GLayout> const& gtensor,
SLayout const& slayout)
{
return make_tma_copy(copy_op, gtensor, slayout, product_each(shape(slayout)), Int<1>{});
}
// Explicit defaulting
template <class CopyOp,
class GEngine, class GLayout,
class SLayout,
class Cluster_Size>
CUTE_HOST_RTC
auto
make_tma_copy(CopyOp const& copy_op,
Tensor<GEngine,GLayout> const& gtensor,
SLayout const& slayout,
Cluster_Size const& cluster_size)
{
return make_tma_copy(copy_op, gtensor, slayout, product_each(shape(slayout)), cluster_size);
}
////////////////////////////////////
// Experimental Make TMA Atom and Partitioner
///////////////////////////////////
template <class TmaInternalType = void,
class CopyOp,
class GEngine, class GLayout,
class SLayout,
class CTA_Tiler,
class Cluster_Size>
CUTE_HOST_RTC
auto
make_tma_atom(CopyOp const& copy_op,
Tensor<GEngine,GLayout> const& gtensor,
SLayout const& slayout,
CTA_Tiler const& cta_tiler,
Cluster_Size const& cluster_size)
{
auto cta_v_tile = make_identity_layout(shape(gtensor)).compose(cta_tiler);
// Prefer TmaInternalType if specified. Fallback to GEngine::value_type
using TmaType = conditional_t<is_same<void, TmaInternalType>::value, typename GEngine::value_type, TmaInternalType>;
return detail::make_tma_copy_atom<TmaType>(copy_op,
gtensor, slayout,
size(cluster_size), cta_v_tile);
}
// The "VectorCopy Partitioner" for TMA
template <class... Args,
class CtaCoord,
class TShape, class TStride,
class SEngine, class SLayout,
class GEngine, class GLayout>
CUTE_DEVICE
auto
tma_partition(Copy_Atom<Args...> const& copy_atom,
CtaCoord const& cta_coord,
Layout<TShape,TStride> const& cta_layout, // T: CTA coord -> logical multicast id
Tensor<SEngine,SLayout> const& stensor, // SMEM Tensor (TMATile, Rest...)
Tensor<GEngine,GLayout> const& gtensor) // GMEM Tensor (TMATile, Rest...)
{
CUTE_STATIC_ASSERT_V(size<0>(stensor) == size<0>(gtensor));
// Invert the smem to get the largest contiguous vector in the smem layout
Layout inv_smem_layout = right_inverse(get_nonswizzle_portion(layout<0>(stensor)));
// Scale that up to cover all of the smem_coords
Layout layout_v = tile_to_shape(make_layout(inv_smem_layout), size<0>(stensor));
// Factor out the single-instrucion portion
Layout tma_layout_v = make_layout(Int<Copy_Atom<Args...>::NumValSrc>{});
auto layout_V = make_tile(logical_divide(layout_v, tma_layout_v));
// Append with _ until we cover all Rest... modes
auto glayout_V = append<rank_v<decltype(gtensor)>>(layout_V, _);
auto slayout_V = append<rank_v<decltype(stensor)>>(layout_V, _);
// Transform tile mode and coalesce
Tensor gtensor_v = coalesce(gtensor.compose(glayout_V), Shape<Shape<_1,_1>>{}); // ((TMA,TMA_Iter), Rest...)
Tensor stensor_v = coalesce(stensor.compose(slayout_V), Shape<Shape<_1,_1>>{}); // ((TMA,TMA_Iter), Rest...)
#if 0
if (thread0()) {
print("cta_coord : "); print(cta_coord); print("\n");
print("cta_layout : "); print(cta_layout); print("\n");
print("gtensor : "); print(gtensor); print("\n");
print("stensor : "); print(stensor); print("\n");
print("layout_V : "); print(layout_V); print("\n");
print("gtensor_v : "); print(gtensor_v); print("\n");
print("stensor_v : "); print(stensor_v); print("\n");
}
#endif
// Offset inside the TMA-mode for the multicast
auto multicast_offset = cta_layout(cta_coord) * (size(tma_layout_v) / cosize(cta_layout));
auto multicast_coord = make_coord(make_coord(multicast_offset, Int<0>{}));
auto scoord = append<SLayout::rank>(multicast_coord, Int<0>{});
auto gcoord = append<GLayout::rank>(multicast_coord, Int<0>{});
Tensor gresult = domain_offset(gcoord, gtensor_v);
Tensor sresult = domain_offset(scoord, stensor_v);
return cute::make_tuple(gresult, sresult);
}
// TMA Multicast Masks Calculation
template <int Mode, class CtaLayout, class CtaCoord>
CUTE_HOST_DEVICE constexpr
auto
create_tma_multicast_mask(CtaLayout const& cta_layout_vmnk,
CtaCoord const& cta_coord_vmnk)
{
auto cta_coord_slicer = replace<Mode>(cta_coord_vmnk, _);
auto [cta_layout, elected_cta] = slice_and_offset(cta_coord_slicer, cta_layout_vmnk);
// Get the instruction code
uint16_t mcast_mask = 0;
for (int i = 0; i < size(cta_layout); ++i) {
mcast_mask |= uint16_t(1) << cta_layout(i);
}
// Shift by the instruction's elected block rank (dynamic)
mcast_mask <<= elected_cta;
return mcast_mask;
}
} // end namespace cute
|
cutlass/include/cute/atom/copy_traits_sm90_tma.hpp/0
|
{
"file_path": "cutlass/include/cute/atom/copy_traits_sm90_tma.hpp",
"repo_id": "cutlass",
"token_count": 25399
}
| 20 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <vector_types.h>
#include <cute/config.hpp>
#include <cute/util/type_traits.hpp>
#include <cute/numeric/integral_constant.hpp>
namespace cute
{
//
// dim3
//
using dim3 = ::dim3;
// MSVC doesn't define its C++ version macro to match
// its C++ language version. This means that when
// building with MSVC, dim3 isn't constexpr-friendly.
template <size_t I>
CUTE_HOST_DEVICE
#if ! defined(_MSC_VER)
constexpr
#endif
uint32_t& get(dim3& a)
{
static_assert(I < 3, "Index out of range");
if constexpr (I == 0) {
return a.x;
} else if constexpr (I == 1) {
return a.y;
} else if constexpr (I == 2) {
return a.z;
}
CUTE_GCC_UNREACHABLE;
}
template <size_t I>
CUTE_HOST_DEVICE
#if ! defined(_MSC_VER)
constexpr
#endif
uint32_t const& get(dim3 const& a)
{
static_assert(I < 3, "Index out of range");
if constexpr (I == 0) {
return a.x;
} else if constexpr (I == 1) {
return a.y;
} else if constexpr (I == 2) {
return a.z;
}
CUTE_GCC_UNREACHABLE;
}
template <size_t I>
CUTE_HOST_DEVICE
#if ! defined(_MSC_VER)
constexpr
#endif
uint32_t&& get(dim3&& a)
{
static_assert(I < 3, "Index out of range");
if constexpr (I == 0) {
return cute::move(a.x);
} else if constexpr (I == 1) {
return cute::move(a.y);
} else if constexpr (I == 2) {
return cute::move(a.z);
}
CUTE_GCC_UNREACHABLE;
}
// Specialize cute::tuple-traits for external types
template <>
struct tuple_size<dim3>
: integral_constant<size_t, 3>
{};
template <size_t I>
struct tuple_element<I, dim3>
{
using type = uint32_t;
};
//
// uint3
//
using uint3 = ::uint3;
template <size_t I>
CUTE_HOST_DEVICE constexpr
uint32_t& get(uint3& a)
{
static_assert(I < 3, "Index out of range");
if constexpr (I == 0) {
return a.x;
} else if constexpr (I == 1) {
return a.y;
} else if constexpr (I == 2) {
return a.z;
}
CUTE_GCC_UNREACHABLE;
}
template <size_t I>
CUTE_HOST_DEVICE constexpr
uint32_t const& get(uint3 const& a)
{
static_assert(I < 3, "Index out of range");
if constexpr (I == 0) {
return a.x;
} else if constexpr (I == 1) {
return a.y;
} else if constexpr (I == 2) {
return a.z;
}
CUTE_GCC_UNREACHABLE;
}
template <size_t I>
CUTE_HOST_DEVICE constexpr
uint32_t&& get(uint3&& a)
{
static_assert(I < 3, "Index out of range");
if constexpr (I == 0) {
return cute::move(a.x);
} else if constexpr (I == 1) {
return cute::move(a.y);
} else if constexpr (I == 2) {
return cute::move(a.z);
}
CUTE_GCC_UNREACHABLE;
}
// Specialize cute::tuple-traits for external types
template <>
struct tuple_size<uint3>
: integral_constant<size_t, 3>
{};
template <size_t I>
struct tuple_element<I, uint3>
{
using type = uint32_t;
};
} // end namespace cute
|
cutlass/include/cute/container/cuda_types.hpp/0
|
{
"file_path": "cutlass/include/cute/container/cuda_types.hpp",
"repo_id": "cutlass",
"token_count": 1651
}
| 21 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/util/type_traits.hpp>
#include <cute/numeric/numeric_types.hpp> // sizeof_bits
namespace cute
{
//
// C++20 <iterator> iterator_traits
//
namespace detail {
// Default reference type of an iterator
template <class T, class = void>
struct iter_ref { using type = decltype(*declval<T&>()); };
// Prefer to propagate ::reference
template <class T>
struct iter_ref<T,void_t<typename T::reference>> { using type = typename T::reference; };
} // end namespace detail
template <class T>
using iter_reference = detail::iter_ref<T>;
template <class T>
using iter_reference_t = typename iter_reference<T>::type;
namespace detail {
// Default element_type of an iterator
template <class T, class = void>
struct iter_e { using type = remove_reference_t<typename iter_ref<T>::type>; };
// Prefer to propagate ::element_type
template <class T>
struct iter_e<T,void_t<typename T::element_type>> { using type = typename T::element_type; };
} // end namespace detail
template <class T>
using iter_element = detail::iter_e<T>;
template <class T>
using iter_element_t = typename iter_element<T>::type;
namespace detail {
// Default value_type of an iterator
template <class T, class = void>
struct iter_v { using type = remove_cv_t<typename iter_e<T>::type>; };
// Prefer to propagate ::value_type
template <class T>
struct iter_v<T,void_t<typename T::value_type>> { using type = typename T::value_type; };
} // end namespace detail
template <class T>
using iter_value = detail::iter_v<T>;
template <class T>
using iter_value_t = typename iter_value<T>::type;
template <class Iterator>
struct iterator_traits {
using reference = iter_reference_t<Iterator>;
using element_type = iter_element_t<Iterator>;
using value_type = iter_value_t<Iterator>;
};
//
// has_dereference to determine if a type is an iterator concept
//
namespace detail {
template <class T, class = void>
struct has_dereference : CUTE_STL_NAMESPACE::false_type {};
template <class T>
struct has_dereference<T, void_t<decltype(*declval<T&>())>> : CUTE_STL_NAMESPACE::true_type {};
} // end namespace detail
template <class T>
using has_dereference = detail::has_dereference<T>;
//
// raw_pointer_cast
//
template <class T>
CUTE_HOST_DEVICE constexpr
T*
raw_pointer_cast(T* ptr) {
return ptr;
}
//
// A very simplified iterator adaptor.
// Derived classed may override methods, but be careful to reproduce interfaces exactly.
// Clients should never have an instance of this class. Do not write methods that take this as a param.
//
template <class Iterator, class DerivedType>
struct iter_adaptor
{
using iterator = Iterator;
using reference = typename iterator_traits<iterator>::reference;
using element_type = typename iterator_traits<iterator>::element_type;
using value_type = typename iterator_traits<iterator>::value_type;
iterator ptr_;
CUTE_HOST_DEVICE constexpr
iter_adaptor(iterator ptr = {}) : ptr_(ptr) {}
CUTE_HOST_DEVICE constexpr
reference operator*() const { return *ptr_; }
template <class Index>
CUTE_HOST_DEVICE constexpr
reference operator[](Index const& i) const { return ptr_[i]; }
template <class Index>
CUTE_HOST_DEVICE constexpr
DerivedType operator+(Index const& i) const { return {ptr_ + i}; }
CUTE_HOST_DEVICE constexpr
iterator get() const { return ptr_; }
CUTE_HOST_DEVICE constexpr
friend bool operator==(DerivedType const& x, DerivedType const& y) { return x.ptr_ == y.ptr_; }
CUTE_HOST_DEVICE constexpr
friend bool operator!=(DerivedType const& x, DerivedType const& y) { return x.ptr_ != y.ptr_; }
CUTE_HOST_DEVICE constexpr
friend bool operator< (DerivedType const& x, DerivedType const& y) { return x.ptr_ < y.ptr_; }
CUTE_HOST_DEVICE constexpr
friend bool operator<=(DerivedType const& x, DerivedType const& y) { return x.ptr_ <= y.ptr_; }
CUTE_HOST_DEVICE constexpr
friend bool operator> (DerivedType const& x, DerivedType const& y) { return x.ptr_ > y.ptr_; }
CUTE_HOST_DEVICE constexpr
friend bool operator>=(DerivedType const& x, DerivedType const& y) { return x.ptr_ >= y.ptr_; }
};
template <class I, class D>
CUTE_HOST_DEVICE constexpr
auto
raw_pointer_cast(iter_adaptor<I,D> const& x) {
return raw_pointer_cast(x.ptr_);
}
//
// counting iterator -- quick and dirty
//
template <class T = int>
struct counting_iterator
{
using index_type = T;
using value_type = T;
using reference = T;
index_type n_;
CUTE_HOST_DEVICE constexpr
counting_iterator(index_type n = 0) : n_(n) {}
CUTE_HOST_DEVICE constexpr
index_type operator*() const { return n_; }
CUTE_HOST_DEVICE constexpr
index_type operator[](index_type i) const { return n_ + i; }
CUTE_HOST_DEVICE constexpr
counting_iterator operator+(index_type i) const { return {n_ + i}; }
CUTE_HOST_DEVICE constexpr
counting_iterator& operator++() { ++n_; return *this; }
CUTE_HOST_DEVICE constexpr
counting_iterator operator++(int) { counting_iterator ret = *this; ++n_; return ret; }
CUTE_HOST_DEVICE constexpr
friend bool operator==(counting_iterator const& x, counting_iterator const& y) { return x.n_ == y.n_; }
CUTE_HOST_DEVICE constexpr
friend bool operator!=(counting_iterator const& x, counting_iterator const& y) { return x.n_ != y.n_; }
CUTE_HOST_DEVICE constexpr
friend bool operator< (counting_iterator const& x, counting_iterator const& y) { return x.n_ < y.n_; }
CUTE_HOST_DEVICE constexpr
friend bool operator<=(counting_iterator const& x, counting_iterator const& y) { return x.n_ <= y.n_; }
CUTE_HOST_DEVICE constexpr
friend bool operator> (counting_iterator const& x, counting_iterator const& y) { return x.n_ > y.n_; }
CUTE_HOST_DEVICE constexpr
friend bool operator>=(counting_iterator const& x, counting_iterator const& y) { return x.n_ >= y.n_; }
};
template <class T>
CUTE_HOST_DEVICE constexpr
T
raw_pointer_cast(counting_iterator<T> const& x) {
return x.n_;
}
//
// Display utilities
//
template <class T>
CUTE_HOST_DEVICE void print(T const* const ptr)
{
printf("ptr["); print(sizeof_bits<T>::value); printf("b](%p)", ptr);
}
template <class T>
CUTE_HOST_DEVICE void print(counting_iterator<T> ptr)
{
printf("counting_iter("); print(ptr.n_); printf(")");
}
#if !defined(__CUDACC_RTC__)
template <class T>
CUTE_HOST std::ostream& operator<<(std::ostream& os, counting_iterator<T> ptr)
{
return os << "counting_iter(" << ptr.n_ << ")";
}
#endif // !defined(__CUDACC_RTC__)
} // end namespace cute
|
cutlass/include/cute/pointer_base.hpp/0
|
{
"file_path": "cutlass/include/cute/pointer_base.hpp",
"repo_id": "cutlass",
"token_count": 2732
}
| 22 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Architecture-specific operators on memory
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/arch/cache_operation.h"
#include "cutlass/platform/platform.h"
namespace cutlass {
namespace arch {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Fragment type to store loaded data
typename AccessType,
/// The bytes of loading
int LoadBytes,
/// Cache operation
CacheOperation::Kind cache_op = CacheOperation::Always
>
struct global_load;
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Specializations
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
#if (((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ >= 4)) || \
(__CUDACC_VER_MAJOR__ > 11)) && \
defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 750)
#define CUTLASS_ENABLE_L2_PREFETCH 1
#else
#define CUTLASS_ENABLE_L2_PREFETCH 0
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
// The redundant mov PTX instruction is used to enforce the compiler to
// keep the initializing code before ld.global
template <typename AccessType>
struct global_load<AccessType,
32,
CacheOperation::Always
> {
CUTLASS_DEVICE
global_load(AccessType &D, void const *ptr, bool pred_guard) {
uint4 *data = reinterpret_cast<uint4 *>(&D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %9, 0;\n"
" mov.b32 %0, %10;\n"
" mov.b32 %1, %11;\n"
" mov.b32 %2, %12;\n"
" mov.b32 %3, %13;\n"
" mov.b32 %4, %14;\n"
" mov.b32 %5, %15;\n"
" mov.b32 %6, %16;\n"
" mov.b32 %7, %17;\n"
#if CUTLASS_ENABLE_L2_PREFETCH
" @p ld.global.L2::128B.v4.u32 {%0, %1, %2, %3}, [%8];\n"
" @p ld.global.L2::128B.v4.u32 {%4, %5, %6, %7}, [%18];\n"
#else
" @p ld.global.v4.u32 {%0, %1, %2, %3}, [%8];\n"
" @p ld.global.v4.u32 {%4, %5, %6, %7}, [%18];\n"
#endif
"}\n"
: "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w),
"=r"(data[1].x), "=r"(data[1].y), "=r"(data[1].z), "=r"(data[1].w)
: "l"(ptr), "r"((int)pred_guard), "r"(data[0].x), "r"(data[0].y),
"r"(data[0].z), "r"(data[0].w), "r"(data[1].x), "r"(data[1].y),
"r"(data[1].z), "r"(data[1].w), "l"(((uint8_t *)ptr) + 16));
}
};
template <typename AccessType>
struct global_load<AccessType,
32,
CacheOperation::LastUse
> {
CUTLASS_DEVICE
global_load(AccessType &D, void const *ptr, bool pred_guard) {
uint4 *data = reinterpret_cast<uint4 *>(&D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %9, 0;\n"
" mov.b32 %0, %10;\n"
" mov.b32 %1, %11;\n"
" mov.b32 %2, %12;\n"
" mov.b32 %3, %13;\n"
" mov.b32 %4, %14;\n"
" mov.b32 %5, %15;\n"
" mov.b32 %6, %16;\n"
" mov.b32 %7, %17;\n"
" @p ld.global.lu.v4.u32 {%0, %1, %2, %3}, [%8];\n"
" @p ld.global.lu.v4.u32 {%4, %5, %6, %7}, [%18];\n"
"}\n"
: "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w),
"=r"(data[1].x), "=r"(data[1].y), "=r"(data[1].z), "=r"(data[1].w)
: "l"(ptr), "r"((int)pred_guard), "r"(data[0].x), "r"(data[0].y),
"r"(data[0].z), "r"(data[0].w), "r"(data[1].x), "r"(data[1].y),
"r"(data[1].z), "r"(data[1].w), "l"(((uint8_t *)ptr) + 16));
}
};
template <typename AccessType>
struct global_load<AccessType,
16,
CacheOperation::Always
> {
CUTLASS_DEVICE
global_load(AccessType &D, void const *ptr, bool pred_guard) {
uint4 &data = reinterpret_cast<uint4 &>(D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %5, 0;\n"
" mov.b32 %0, %6;\n"
" mov.b32 %1, %7;\n"
" mov.b32 %2, %8;\n"
" mov.b32 %3, %9;\n"
#if CUTLASS_ENABLE_L2_PREFETCH
" @p ld.global.L2::128B.v4.u32 {%0, %1, %2, %3}, [%4];\n"
#else
" @p ld.global.v4.u32 {%0, %1, %2, %3}, [%4];\n"
#endif
"}\n"
: "=r"(data.x), "=r"(data.y), "=r"(data.z), "=r"(data.w)
: "l"(ptr), "r"((int)pred_guard), "r"(data.x), "r"(data.y), "r"(data.z), "r"(data.w));
}
};
template <typename AccessType>
struct global_load<AccessType,
16,
CacheOperation::LastUse
> {
CUTLASS_DEVICE
global_load(AccessType &D, void const *ptr, bool pred_guard) {
uint4 &data = reinterpret_cast<uint4 &>(D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %5, 0;\n"
" mov.b32 %0, %6;\n"
" mov.b32 %1, %7;\n"
" mov.b32 %2, %8;\n"
" mov.b32 %3, %9;\n"
" @p ld.global.lu.v4.u32 {%0, %1, %2, %3}, [%4];\n"
"}\n"
: "=r"(data.x), "=r"(data.y), "=r"(data.z), "=r"(data.w)
: "l"(ptr), "r"((int)pred_guard), "r"(data.x), "r"(data.y), "r"(data.z), "r"(data.w));
}
};
template <typename AccessType>
struct global_load<AccessType,
8,
CacheOperation::Always
> {
CUTLASS_DEVICE
global_load(AccessType &D, void const *ptr, bool pred_guard) {
uint2 &data = reinterpret_cast<uint2 &>(D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %3, 0;\n"
" mov.b32 %0, %4;\n"
" mov.b32 %1, %5;\n"
#if CUTLASS_ENABLE_L2_PREFETCH
" @p ld.global.L2::128B.v2.u32 {%0, %1}, [%2];\n"
#else
" @p ld.global.v2.u32 {%0, %1}, [%2];\n"
#endif
"}\n"
: "=r"(data.x), "=r"(data.y)
: "l"(ptr), "r"((int)pred_guard), "r"(data.x), "r"(data.y));
}
};
template <typename AccessType>
struct global_load<AccessType,
8,
CacheOperation::LastUse
> {
CUTLASS_DEVICE
global_load(AccessType &D, void const *ptr, bool pred_guard) {
uint2 &data = reinterpret_cast<uint2 &>(D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %3, 0;\n"
" mov.b32 %0, %4;\n"
" mov.b32 %1, %5;\n"
" @p ld.global.lu.v2.u32 {%0, %1}, [%2];\n"
"}\n"
: "=r"(data.x), "=r"(data.y)
: "l"(ptr), "r"((int)pred_guard), "r"(data.x), "r"(data.y));
}
};
template <typename AccessType>
struct global_load<AccessType,
4,
CacheOperation::Always
> {
CUTLASS_DEVICE
global_load(AccessType &D, void const *ptr, bool pred_guard) {
unsigned &data = reinterpret_cast<unsigned &>(D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %2, 0;\n"
" mov.b32 %0, %3;\n"
#if CUTLASS_ENABLE_L2_PREFETCH
" @p ld.global.L2::128B.u32 %0, [%1];\n"
#else
" @p ld.global.u32 %0, [%1];\n"
#endif
"}\n"
: "=r"(data)
: "l"(ptr), "r"((int)pred_guard), "r"(data));
}
};
template <typename AccessType>
struct global_load<AccessType,
4,
CacheOperation::LastUse
> {
CUTLASS_DEVICE
global_load(AccessType &D, void const *ptr, bool pred_guard) {
unsigned &data = reinterpret_cast<unsigned &>(D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %2, 0;\n"
" mov.b32 %0, %3;\n"
" @p ld.global.lu.u32 %0, [%1];\n"
"}\n"
: "=r"(data)
: "l"(ptr), "r"((int)pred_guard), "r"(data));
}
};
template <typename AccessType>
struct global_load<AccessType,
2,
CacheOperation::Always
> {
CUTLASS_DEVICE
global_load(AccessType &D, void const *ptr, bool pred_guard) {
uint16_t &data = reinterpret_cast<uint16_t &>(D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %2, 0;\n"
" mov.b16 %0, %3;\n"
#if CUTLASS_ENABLE_L2_PREFETCH
" @p ld.global.L2::128B.u16 %0, [%1];\n"
#else
" @p ld.global.u16 %0, [%1];\n"
#endif
"}\n"
: "=h"(data)
: "l"(ptr), "r"((int)pred_guard), "h"(data));
}
};
template <typename AccessType>
struct global_load<AccessType,
2,
CacheOperation::LastUse
> {
CUTLASS_DEVICE
global_load(AccessType &D, void const *ptr, bool pred_guard) {
uint16_t &data = reinterpret_cast<uint16_t &>(D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %2, 0;\n"
" mov.b16 %0, %3;\n"
" @p ld.global.lu.u16 %0, [%1];\n"
"}\n"
: "=h"(data)
: "l"(ptr), "r"((int)pred_guard), "h"(data));
}
};
template <typename AccessType>
struct global_load<AccessType,
1,
CacheOperation::Always
> {
CUTLASS_DEVICE
global_load(AccessType &D, void const *ptr, bool pred_guard) {
if (pred_guard) D = *(reinterpret_cast<AccessType const *>(ptr));
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Fragment type to store data
typename AccessType,
/// The bytes of storing
int StoreBytes
>
struct global_store;
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Specializations
//
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename AccessType>
struct global_store<AccessType, 64> {
CUTLASS_DEVICE
global_store(AccessType const &D, void *ptr, bool pred_guard) {
uint4 const *data = reinterpret_cast<uint4 const *>(&D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %5, 0;\n"
" @p st.global.v4.u32 [%0], {%1, %2, %3, %4};\n"
" @p st.global.v4.u32 [%6], {%7, %8, %9, %10};\n"
" @p st.global.v4.u32 [%11], {%12, %13, %14, %15};\n"
" @p st.global.v4.u32 [%16], {%17, %18, %19, %20};\n"
"}\n"
:
: "l"(ptr), "r"(data[0].x), "r"(data[0].y), "r"(data[0].z),
"r"(data[0].w), "r"((int)pred_guard), "l"(((uint8_t *)ptr) + 16),
"r"(data[1].x), "r"(data[1].y), "r"(data[1].z), "r"(data[1].w),
"l"(((uint8_t *)ptr) + 32),
"r"(data[2].x), "r"(data[2].y), "r"(data[2].z), "r"(data[2].w),
"l"(((uint8_t *)ptr) + 48),
"r"(data[3].x), "r"(data[3].y), "r"(data[3].z), "r"(data[3].w));
}
};
template <typename AccessType>
struct global_store<AccessType, 32> {
CUTLASS_DEVICE
global_store(AccessType const &D, void *ptr, bool pred_guard) {
uint4 const *data = reinterpret_cast<uint4 const *>(&D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %5, 0;\n"
" @p st.global.v4.u32 [%0], {%1, %2, %3, %4};\n"
" @p st.global.v4.u32 [%6], {%7, %8, %9, %10};\n"
"}\n"
:
: "l"(ptr), "r"(data[0].x), "r"(data[0].y), "r"(data[0].z),
"r"(data[0].w), "r"((int)pred_guard), "l"(((uint8_t *)ptr) + 16),
"r"(data[1].x), "r"(data[1].y), "r"(data[1].z), "r"(data[1].w));
}
};
template <typename AccessType>
struct global_store<AccessType, 16> {
CUTLASS_DEVICE
global_store(AccessType const &D, void *ptr, bool pred_guard) {
uint4 const &data = reinterpret_cast<uint4 const &>(D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %5, 0;\n"
" @p st.global.v4.u32 [%0], {%1, %2, %3, %4};\n"
"}\n"
:
: "l"(ptr), "r"(data.x), "r"(data.y), "r"(data.z), "r"(data.w), "r"((int)pred_guard));
}
};
template <typename AccessType>
struct global_store<AccessType, 8> {
CUTLASS_DEVICE
global_store(AccessType const &D, void *ptr, bool pred_guard) {
uint2 const &data = reinterpret_cast<uint2 const &>(D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %3, 0;\n"
" @p st.global.v2.u32 [%0], {%1, %2};\n"
"}\n"
:
: "l"(ptr), "r"(data.x), "r"(data.y), "r"((int)pred_guard));
}
};
template <typename AccessType>
struct global_store<AccessType, 4> {
CUTLASS_DEVICE
global_store(AccessType const &D, void *ptr, bool pred_guard) {
uint32_t const &data = reinterpret_cast<uint32_t const &>(D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %2, 0;\n"
" @p st.global.u32 [%0], %1;\n"
"}\n"
:
: "l"(ptr), "r"(data), "r"((int)pred_guard));
}
};
template <typename AccessType>
struct global_store<AccessType, 2> {
CUTLASS_DEVICE
global_store(AccessType const &D, void *ptr, bool pred_guard) {
uint16_t const &data = reinterpret_cast<uint16_t const &>(D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %2, 0;\n"
" @p st.global.u16 [%0], %1;\n"
"}\n"
:
: "l"(ptr), "h"(data), "r"((int)pred_guard));
}
};
template <typename AccessType>
struct global_store<AccessType, 1> {
CUTLASS_DEVICE
global_store(AccessType const &D, void *ptr, bool pred_guard) {
if (pred_guard) *(reinterpret_cast<AccessType *>(ptr)) = D;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// ld.shared
template <int Bytes>
CUTLASS_DEVICE
void shared_load(void *dst, uint32_t ptr);
/// ld.shared - 16b
template <>
CUTLASS_DEVICE
void shared_load<2>(void *dst, uint32_t ptr) {
asm volatile("ld.shared.u16 %0, [%1];\n"
: "=h"(*reinterpret_cast<uint16_t *>(dst))
: "r"(ptr));
}
/// ld.shared - 32b
template <>
CUTLASS_DEVICE
void shared_load<4>(void *dst, uint32_t ptr) {
asm volatile("ld.shared.u32 %0, [%1];\n"
: "=r"(*reinterpret_cast<uint32_t *>(dst))
: "r"(ptr));
}
/// ld.shared - 64b
template <>
CUTLASS_DEVICE
void shared_load<8>(void *dst, uint32_t ptr) {
uint2 *dst_u64 = reinterpret_cast<uint2 *>(dst);
asm volatile("ld.shared.v2.u32 {%0, %1}, [%2];\n"
:
"=r"(dst_u64->x),
"=r"(dst_u64->y)
: "r"(ptr));
}
/// ld.shared - 128b
template <>
CUTLASS_DEVICE
void shared_load<16>(void *dst, uint32_t ptr) {
uint4 *dst_u128 = reinterpret_cast<uint4 *>(dst);
asm volatile("ld.shared.v4.u32 {%0, %1, %2, %3}, [%4];\n"
:
"=r"(dst_u128->x),
"=r"(dst_u128->y),
"=r"(dst_u128->z),
"=r"(dst_u128->w)
: "r"(ptr));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// st.shared
template <int Bytes>
CUTLASS_DEVICE
void shared_store(uint32_t ptr, void const *src);
/// st.shared - 16b
template <>
CUTLASS_DEVICE
void shared_store<2>(uint32_t ptr, void const *src) {
asm volatile("st.shared.u16 [%0], %1;\n"
: :
"r"(ptr),
"h"(*reinterpret_cast<uint16_t const *>(src))
);
}
/// st.shared - 32b
template <>
CUTLASS_DEVICE
void shared_store<4>(uint32_t ptr, void const *src) {
asm volatile("st.shared.u32 [%0], %1;\n"
: :
"r"(ptr),
"r"(*reinterpret_cast<uint32_t const *>(src))
);
}
/// st.shared - 64b
template <>
CUTLASS_DEVICE
void shared_store<8>(uint32_t ptr, void const *src) {
uint2 const *dst_u64 = reinterpret_cast<uint2 const *>(src);
asm volatile("st.shared.v2.u32 [%0], {%1, %2};\n"
: :
"r"(ptr),
"r"(dst_u64->x),
"r"(dst_u64->y)
);
}
/// st.shared - 128b
template <>
CUTLASS_DEVICE
void shared_store<16>(uint32_t ptr, void const *src) {
uint4 const *dst_u128 = reinterpret_cast<uint4 const *>(src);
asm volatile("st.shared.v4.u32 [%0], {%1, %2, %3, %4};\n"
: :
"r"(ptr),
"r"(dst_u128->x),
"r"(dst_u128->y),
"r"(dst_u128->z),
"r"(dst_u128->w)
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace arch
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/arch/memory_sm80.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
|
cutlass/include/cutlass/arch/memory.h/0
|
{
"file_path": "cutlass/include/cutlass/arch/memory.h",
"repo_id": "cutlass",
"token_count": 8863
}
| 23 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Boost-style constant definitions for floating-point types.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/complex.h"
///////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace constants {
///////////////////////////////////////////////////////////////////////////////////
//
// Primary templates
//
/// Returns 1, the multiplicative identity element
template <typename T> CUTLASS_HOST_DEVICE T one();
/// Returns 0, the additive identity element
template <typename T> CUTLASS_HOST_DEVICE T zero();
/// Returns 2
template <typename T> CUTLASS_HOST_DEVICE T two();
/// Returns pi, approximately 3.141
template <typename T> CUTLASS_HOST_DEVICE T pi();
/// Returns 2 * pi
template <typename T> CUTLASS_HOST_DEVICE T two_pi();
/// Returns pi / 2
template <typename T> CUTLASS_HOST_DEVICE T half_pi();
/// Returns sqrt(pi)
template <typename T> CUTLASS_HOST_DEVICE T root_pi();
/// Returns sqrt(pi / 2)
template <typename T> CUTLASS_HOST_DEVICE T root_half_pi();
/// Returns sqrt(2 * pi)
template <typename T> CUTLASS_HOST_DEVICE T root_two_pi();
/// Returns sqrt(ln(4))
template <typename T> CUTLASS_HOST_DEVICE T root_ln_four();
/// Returns e, approximately 2.718...
template <typename T> CUTLASS_HOST_DEVICE T e();
/// Returns (1/2)
template <typename T> CUTLASS_HOST_DEVICE T half();
/// Returns sqrt(2), approximately 1.414...
template <typename T> CUTLASS_HOST_DEVICE T root_two();
/// Returns sqrt(2)/2, approximately 0.707...
template <typename T> CUTLASS_HOST_DEVICE T half_root_two();
/// Returns ln(2), approximately 0.693...
template <typename T> CUTLASS_HOST_DEVICE T ln_two();
/// Returns ln(ln(2)), approximately -0.3665...
template <typename T> CUTLASS_HOST_DEVICE T ln_ln_two();
/// Returns 1/3, approximately 0.333...
template <typename T> CUTLASS_HOST_DEVICE T third();
/// Returns 2/3, approximately 0.666...
template <typename T> CUTLASS_HOST_DEVICE T twothirds();
/// Returns pi - 3, approximately 0.1416...
template <typename T> CUTLASS_HOST_DEVICE T pi_minus_three();
/// Returns 4 - pi, approximately 0.858...
template <typename T> CUTLASS_HOST_DEVICE T four_minus_pi();
/////////////////////////////////////////////////////////////////////////////////////
// Specialization for double
/// Returns 1, the multiplicative identity element (specialization for double)
template <> CUTLASS_HOST_DEVICE double one<double>() {
uint64_t bits = 0x3ff0000000000000ull;
return reinterpret_cast<double const &>(bits);
}
/// Returns 1, the multiplicative identity element (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> one< complex<double> >() {
return complex<double>(one<double>(), double());
}
/// Returns 0, the additive identity element (specialization for double)
template <> CUTLASS_HOST_DEVICE double zero<double>() {
uint64_t bits = 0x0ull;
return reinterpret_cast<double const &>(bits);
}
/// Returns 0, the additive identity element (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> zero< complex<double> >() {
return complex<double>(zero<double>(), double());
}
/// Returns 2 (specialization for double)
template <> CUTLASS_HOST_DEVICE double two<double>() {
uint64_t bits = 0x4000000000000000ull;
return reinterpret_cast<double const &>(bits);
}
/// Returns 2 (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> two< complex<double> >() {
return complex<double>(two<double>(), double());
}
/// Returns pi, approximately 3.141 (specialization for double)
template <> CUTLASS_HOST_DEVICE double pi<double>() {
uint64_t bits = 0x400921fb54442d18ull;
return reinterpret_cast<double const &>(bits);
}
/// Returns pi, approximately 3.141 (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> pi< complex<double> >() {
return complex<double>(pi<double>(), double());
}
/// Returns 2 * pi (specialization for double)
template <> CUTLASS_HOST_DEVICE double two_pi<double>() {
uint64_t bits = 0x401921fb54442d18ull;
return reinterpret_cast<double const &>(bits);
}
/// Returns 2 * pi (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> two_pi< complex<double> >() {
return complex<double>(two_pi<double>(), double());
}
/// Returns pi / 2 (specialization for double)
template <> CUTLASS_HOST_DEVICE double half_pi<double>() {
uint64_t bits = 0x3ff921fb54442d18ull;
return reinterpret_cast<double const &>(bits);
}
/// Returns pi / 2 (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> half_pi< complex<double> >() {
return complex<double>(half_pi<double>(), double());
}
/// Returns sqrt(pi) (specialization for double)
template <> CUTLASS_HOST_DEVICE double root_pi<double>() {
uint64_t bits = 0x3ffc5bf891b4ef6aull;
return reinterpret_cast<double const &>(bits);
}
/// Returns sqrt(pi) (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> root_pi< complex<double> >() {
return complex<double>(root_pi<double>(), double());
}
/// Returns sqrt(pi / 2) (specialization for double)
template <> CUTLASS_HOST_DEVICE double root_half_pi<double>() {
uint64_t bits = 0x3ff40d931ff62705ull;
return reinterpret_cast<double const &>(bits);
}
/// Returns sqrt(pi / 2) (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> root_half_pi< complex<double> >() {
return complex<double>(root_half_pi<double>(), double());
}
/// Returns sqrt(2 * pi) (specialization for double)
template <> CUTLASS_HOST_DEVICE double root_two_pi<double>() {
uint64_t bits = 0x40040d931ff62705ull;
return reinterpret_cast<double const &>(bits);
}
/// Returns sqrt(2 * pi) (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> root_two_pi< complex<double> >() {
return complex<double>(root_two_pi<double>(), double());
}
/// Returns sqrt(ln(4)) (specialization for double)
template <> CUTLASS_HOST_DEVICE double root_ln_four<double>() {
uint64_t bits = 0x3ff2d6abe44afc43ull;
return reinterpret_cast<double const &>(bits);
}
/// Returns sqrt(ln(4)) (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> root_ln_four< complex<double> >() {
return complex<double>(root_ln_four<double>(), double());
}
/// Returns e, approximately 2.718... (specialization for double)
template <> CUTLASS_HOST_DEVICE double e<double>() {
uint64_t bits = 0x4005bf0a8b145769ull;
return reinterpret_cast<double const &>(bits);
}
/// Returns e, approximately 2.718... (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> e< complex<double> >() {
return complex<double>(e<double>(), double());
}
/// Returns (1/2) (specialization for double)
template <> CUTLASS_HOST_DEVICE double half<double>() {
uint64_t bits = 0x3fe0000000000000ull;
return reinterpret_cast<double const &>(bits);
}
/// Returns (1/2) (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> half< complex<double> >() {
return complex<double>(half<double>(), double());
}
/// Returns sqrt(2), approximately 1.414... (specialization for double)
template <> CUTLASS_HOST_DEVICE double root_two<double>() {
uint64_t bits = 0x3ff6a09e667f3bcdull;
return reinterpret_cast<double const &>(bits);
}
/// Returns sqrt(2), approximately 1.414... (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> root_two< complex<double> >() {
return complex<double>(root_two<double>(), double());
}
/// Returns sqrt(2)/2, approximately 0.707... (specialization for double)
template <> CUTLASS_HOST_DEVICE double half_root_two<double>() {
uint64_t bits = 0x3fe6a09e667f3bcdull;
return reinterpret_cast<double const &>(bits);
}
/// Returns sqrt(2)/2, approximately 0.707... (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> half_root_two< complex<double> >() {
return complex<double>(half_root_two<double>(), double());
}
/// Returns ln(2), approximately 0.693... (specialization for double)
template <> CUTLASS_HOST_DEVICE double ln_two<double>() {
uint64_t bits = 0x3fe62e42fefa39efull;
return reinterpret_cast<double const &>(bits);
}
/// Returns ln(2), approximately 0.693... (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> ln_two< complex<double> >() {
return complex<double>(ln_two<double>(), double());
}
/// Returns ln(ln(2)), approximately -0.3665... (specialization for double)
template <> CUTLASS_HOST_DEVICE double ln_ln_two<double>() {
uint64_t bits = 0xbfd774f29bdd6b9full;
return reinterpret_cast<double const &>(bits);
}
/// Returns ln(ln(2)), approximately -0.3665... (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> ln_ln_two< complex<double> >() {
return complex<double>(ln_ln_two<double>(), double());
}
/// Returns 1/3, approximately 0.333... (specialization for double)
template <> CUTLASS_HOST_DEVICE double third<double>() {
uint64_t bits = 0x3fd5555555555555ull;
return reinterpret_cast<double const &>(bits);
}
/// Returns 1/3, approximately 0.333... (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> third< complex<double> >() {
return complex<double>(third<double>(), double());
}
/// Returns 2/3, approximately 0.666... (specialization for double)
template <> CUTLASS_HOST_DEVICE double twothirds<double>() {
uint64_t bits = 0x3fe5555555555555ull;
return reinterpret_cast<double const &>(bits);
}
/// Returns 2/3, approximately 0.666... (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> twothirds< complex<double> >() {
return complex<double>(twothirds<double>(), double());
}
/// Returns pi - 3, approximately 0.1416... (specialization for double)
template <> CUTLASS_HOST_DEVICE double pi_minus_three<double>() {
uint64_t bits = 0x3fc21fb54442d180ull;
return reinterpret_cast<double const &>(bits);
}
/// Returns pi - 3, approximately 0.1416... (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> pi_minus_three< complex<double> >() {
return complex<double>(pi_minus_three<double>(), double());
}
/// Returns 4 - pi, approximately 0.858... (specialization for double)
template <> CUTLASS_HOST_DEVICE double four_minus_pi<double>() {
uint64_t bits = 0x3feb7812aeef4ba0ull;
return reinterpret_cast<double const &>(bits);
}
/// Returns 4 - pi, approximately 0.858... (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> four_minus_pi< complex<double> >() {
return complex<double>(four_minus_pi<double>(), double());
}
/////////////////////////////////////////////////////////////////////////////////////
// Specialization for float
/// Returns 1, the multiplicative identity element (specialization for float)
template <> CUTLASS_HOST_DEVICE float one<float>() {
uint32_t bits = 0x3f800000u;
return reinterpret_cast<float const &>(bits);
}
/// Returns 1, the multiplicative identity element (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> one< complex<float> >() {
return complex<float>(one<float>(), float());
}
/// Returns 0, the additive identity element (specialization for float)
template <> CUTLASS_HOST_DEVICE float zero<float>() {
uint32_t bits = 0x0u;
return reinterpret_cast<float const &>(bits);
}
/// Returns 0, the additive identity element (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> zero< complex<float> >() {
return complex<float>(zero<float>(), float());
}
/// Returns 2 (specialization for float)
template <> CUTLASS_HOST_DEVICE float two<float>() {
uint32_t bits = 0x40000000u;
return reinterpret_cast<float const &>(bits);
}
/// Returns 2 (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> two< complex<float> >() {
return complex<float>(two<float>(), float());
}
/// Returns pi, approximately 3.141 (specialization for float)
template <> CUTLASS_HOST_DEVICE float pi<float>() {
uint32_t bits = 0x40490fdbu;
return reinterpret_cast<float const &>(bits);
}
/// Returns pi, approximately 3.141 (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> pi< complex<float> >() {
return complex<float>(pi<float>(), float());
}
/// Returns 2 * pi (specialization for float)
template <> CUTLASS_HOST_DEVICE float two_pi<float>() {
uint32_t bits = 0x40c90fdbu;
return reinterpret_cast<float const &>(bits);
}
/// Returns 2 * pi (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> two_pi< complex<float> >() {
return complex<float>(two_pi<float>(), float());
}
/// Returns pi / 2 (specialization for float)
template <> CUTLASS_HOST_DEVICE float half_pi<float>() {
uint32_t bits = 0x3fc90fdbu;
return reinterpret_cast<float const &>(bits);
}
/// Returns pi / 2 (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> half_pi< complex<float> >() {
return complex<float>(half_pi<float>(), float());
}
/// Returns sqrt(pi) (specialization for float)
template <> CUTLASS_HOST_DEVICE float root_pi<float>() {
uint32_t bits = 0x3fe2dfc5u;
return reinterpret_cast<float const &>(bits);
}
/// Returns sqrt(pi) (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> root_pi< complex<float> >() {
return complex<float>(root_pi<float>(), float());
}
/// Returns sqrt(pi / 2) (specialization for float)
template <> CUTLASS_HOST_DEVICE float root_half_pi<float>() {
uint32_t bits = 0x3fa06c99u;
return reinterpret_cast<float const &>(bits);
}
/// Returns sqrt(pi / 2) (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> root_half_pi< complex<float> >() {
return complex<float>(root_half_pi<float>(), float());
}
/// Returns sqrt(2 * pi) (specialization for float)
template <> CUTLASS_HOST_DEVICE float root_two_pi<float>() {
uint32_t bits = 0x40206c99u;
return reinterpret_cast<float const &>(bits);
}
/// Returns sqrt(2 * pi) (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> root_two_pi< complex<float> >() {
return complex<float>(root_two_pi<float>(), float());
}
/// Returns sqrt(ln(4)) (specialization for float)
template <> CUTLASS_HOST_DEVICE float root_ln_four<float>() {
uint32_t bits = 0x3f96b55fu;
return reinterpret_cast<float const &>(bits);
}
/// Returns sqrt(ln(4)) (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> root_ln_four< complex<float> >() {
return complex<float>(root_ln_four<float>(), float());
}
/// Returns e, approximately 2.718... (specialization for float)
template <> CUTLASS_HOST_DEVICE float e<float>() {
uint32_t bits = 0x402df854u;
return reinterpret_cast<float const &>(bits);
}
/// Returns e, approximately 2.718... (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> e< complex<float> >() {
return complex<float>(e<float>(), float());
}
/// Returns (1/2) (specialization for float)
template <> CUTLASS_HOST_DEVICE float half<float>() {
uint32_t bits = 0x3f000000u;
return reinterpret_cast<float const &>(bits);
}
/// Returns (1/2) (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> half< complex<float> >() {
return complex<float>(half<float>(), float());
}
/// Returns sqrt(2), approximately 1.414... (specialization for float)
template <> CUTLASS_HOST_DEVICE float root_two<float>() {
uint32_t bits = 0x3fb504f3u;
return reinterpret_cast<float const &>(bits);
}
/// Returns sqrt(2), approximately 1.414... (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> root_two< complex<float> >() {
return complex<float>(root_two<float>(), float());
}
/// Returns sqrt(2)/2, approximately 0.707... (specialization for float)
template <> CUTLASS_HOST_DEVICE float half_root_two<float>() {
uint32_t bits = 0x3f3504f3u;
return reinterpret_cast<float const &>(bits);
}
/// Returns sqrt(2)/2, approximately 0.707... (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> half_root_two< complex<float> >() {
return complex<float>(half_root_two<float>(), float());
}
/// Returns ln(2), approximately 0.693... (specialization for float)
template <> CUTLASS_HOST_DEVICE float ln_two<float>() {
uint32_t bits = 0x3f317218u;
return reinterpret_cast<float const &>(bits);
}
/// Returns ln(2), approximately 0.693... (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> ln_two< complex<float> >() {
return complex<float>(ln_two<float>(), float());
}
/// Returns ln(ln(2)), approximately -0.3665... (specialization for float)
template <> CUTLASS_HOST_DEVICE float ln_ln_two<float>() {
uint32_t bits = 0xbebba795u;
return reinterpret_cast<float const &>(bits);
}
/// Returns ln(ln(2)), approximately -0.3665... (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> ln_ln_two< complex<float> >() {
return complex<float>(ln_ln_two<float>(), float());
}
/// Returns 1/3, approximately 0.333... (specialization for float)
template <> CUTLASS_HOST_DEVICE float third<float>() {
uint32_t bits = 0x3eaaaaabu;
return reinterpret_cast<float const &>(bits);
}
/// Returns 1/3, approximately 0.333... (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> third< complex<float> >() {
return complex<float>(third<float>(), float());
}
/// Returns 2/3, approximately 0.666... (specialization for float)
template <> CUTLASS_HOST_DEVICE float twothirds<float>() {
uint32_t bits = 0x3f2aaaabu;
return reinterpret_cast<float const &>(bits);
}
/// Returns 2/3, approximately 0.666... (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> twothirds< complex<float> >() {
return complex<float>(twothirds<float>(), float());
}
/// Returns pi - 3, approximately 0.1416... (specialization for float)
template <> CUTLASS_HOST_DEVICE float pi_minus_three<float>() {
uint32_t bits = 0x3e10fdaau;
return reinterpret_cast<float const &>(bits);
}
/// Returns pi - 3, approximately 0.1416... (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> pi_minus_three< complex<float> >() {
return complex<float>(pi_minus_three<float>(), float());
}
/// Returns 4 - pi, approximately 0.858... (specialization for float)
template <> CUTLASS_HOST_DEVICE float four_minus_pi<float>() {
uint32_t bits = 0x3f5bc095u;
return reinterpret_cast<float const &>(bits);
}
/// Returns 4 - pi, approximately 0.858... (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> four_minus_pi< complex<float> >() {
return complex<float>(four_minus_pi<float>(), float());
}
/////////////////////////////////////////////////////////////////////////////////////
// Specialization for tfloat32_t
/// Returns 1, the multiplicative identity element (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t one<tfloat32_t>() {
uint32_t bits = 0x3f801000u;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns 1, the multiplicative identity element (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> one< complex<tfloat32_t> >() {
return complex<tfloat32_t>(one<tfloat32_t>(), tfloat32_t());
}
/// Returns 0, the additive identity element (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t zero<tfloat32_t>() {
uint32_t bits = 0x1000u;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns 0, the additive identity element (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> zero< complex<tfloat32_t> >() {
return complex<tfloat32_t>(zero<tfloat32_t>(), tfloat32_t());
}
/// Returns 2 (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t two<tfloat32_t>() {
uint32_t bits = 0x40001000u;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns 2 (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> two< complex<tfloat32_t> >() {
return complex<tfloat32_t>(two<tfloat32_t>(), tfloat32_t());
}
/// Returns pi, approximately 3.141 (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t pi<tfloat32_t>() {
uint32_t bits = 0x40491fdbu;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns pi, approximately 3.141 (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> pi< complex<tfloat32_t> >() {
return complex<tfloat32_t>(pi<tfloat32_t>(), tfloat32_t());
}
/// Returns 2 * pi (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t two_pi<tfloat32_t>() {
uint32_t bits = 0x40c91fdbu;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns 2 * pi (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> two_pi< complex<tfloat32_t> >() {
return complex<tfloat32_t>(two_pi<tfloat32_t>(), tfloat32_t());
}
/// Returns pi / 2 (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t half_pi<tfloat32_t>() {
uint32_t bits = 0x3fc91fdbu;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns pi / 2 (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> half_pi< complex<tfloat32_t> >() {
return complex<tfloat32_t>(half_pi<tfloat32_t>(), tfloat32_t());
}
/// Returns sqrt(pi) (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t root_pi<tfloat32_t>() {
uint32_t bits = 0x3fe2efc5u;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns sqrt(pi) (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> root_pi< complex<tfloat32_t> >() {
return complex<tfloat32_t>(root_pi<tfloat32_t>(), tfloat32_t());
}
/// Returns sqrt(pi / 2) (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t root_half_pi<tfloat32_t>() {
uint32_t bits = 0x3fa07c99u;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns sqrt(pi / 2) (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> root_half_pi< complex<tfloat32_t> >() {
return complex<tfloat32_t>(root_half_pi<tfloat32_t>(), tfloat32_t());
}
/// Returns sqrt(2 * pi) (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t root_two_pi<tfloat32_t>() {
uint32_t bits = 0x40207c99u;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns sqrt(2 * pi) (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> root_two_pi< complex<tfloat32_t> >() {
return complex<tfloat32_t>(root_two_pi<tfloat32_t>(), tfloat32_t());
}
/// Returns sqrt(ln(4)) (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t root_ln_four<tfloat32_t>() {
uint32_t bits = 0x3f96c55fu;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns sqrt(ln(4)) (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> root_ln_four< complex<tfloat32_t> >() {
return complex<tfloat32_t>(root_ln_four<tfloat32_t>(), tfloat32_t());
}
/// Returns e, approximately 2.718... (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t e<tfloat32_t>() {
uint32_t bits = 0x402e0854u;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns e, approximately 2.718... (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> e< complex<tfloat32_t> >() {
return complex<tfloat32_t>(e<tfloat32_t>(), tfloat32_t());
}
/// Returns (1/2) (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t half<tfloat32_t>() {
uint32_t bits = 0x3f001000u;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns (1/2) (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> half< complex<tfloat32_t> >() {
return complex<tfloat32_t>(half<tfloat32_t>(), tfloat32_t());
}
/// Returns sqrt(2), approximately 1.414... (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t root_two<tfloat32_t>() {
uint32_t bits = 0x3fb514f3u;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns sqrt(2), approximately 1.414... (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> root_two< complex<tfloat32_t> >() {
return complex<tfloat32_t>(root_two<tfloat32_t>(), tfloat32_t());
}
/// Returns sqrt(2)/2, approximately 0.707... (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t half_root_two<tfloat32_t>() {
uint32_t bits = 0x3f3514f3u;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns sqrt(2)/2, approximately 0.707... (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> half_root_two< complex<tfloat32_t> >() {
return complex<tfloat32_t>(half_root_two<tfloat32_t>(), tfloat32_t());
}
/// Returns ln(2), approximately 0.693... (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t ln_two<tfloat32_t>() {
uint32_t bits = 0x3f318218u;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns ln(2), approximately 0.693... (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> ln_two< complex<tfloat32_t> >() {
return complex<tfloat32_t>(ln_two<tfloat32_t>(), tfloat32_t());
}
/// Returns ln(ln(2)), approximately -0.3665... (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t ln_ln_two<tfloat32_t>() {
uint32_t bits = 0xbebbb795u;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns ln(ln(2)), approximately -0.3665... (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> ln_ln_two< complex<tfloat32_t> >() {
return complex<tfloat32_t>(ln_ln_two<tfloat32_t>(), tfloat32_t());
}
/// Returns 1/3, approximately 0.333... (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t third<tfloat32_t>() {
uint32_t bits = 0x3eaabaabu;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns 1/3, approximately 0.333... (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> third< complex<tfloat32_t> >() {
return complex<tfloat32_t>(third<tfloat32_t>(), tfloat32_t());
}
/// Returns 2/3, approximately 0.666... (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t twothirds<tfloat32_t>() {
uint32_t bits = 0x3f2abaabu;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns 2/3, approximately 0.666... (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> twothirds< complex<tfloat32_t> >() {
return complex<tfloat32_t>(twothirds<tfloat32_t>(), tfloat32_t());
}
/// Returns pi - 3, approximately 0.1416... (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t pi_minus_three<tfloat32_t>() {
uint32_t bits = 0x3e110daau;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns pi - 3, approximately 0.1416... (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> pi_minus_three< complex<tfloat32_t> >() {
return complex<tfloat32_t>(pi_minus_three<tfloat32_t>(), tfloat32_t());
}
/// Returns 4 - pi, approximately 0.858... (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t four_minus_pi<tfloat32_t>() {
uint32_t bits = 0x3f5bd095u;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns 4 - pi, approximately 0.858... (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> four_minus_pi< complex<tfloat32_t> >() {
return complex<tfloat32_t>(four_minus_pi<tfloat32_t>(), tfloat32_t());
}
/////////////////////////////////////////////////////////////////////////////////////
// Specialization for half_t
/// Returns 1, the multiplicative identity element (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t one<half_t>() {
uint16_t bits = 0x3c00u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns 1, the multiplicative identity element (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> one< complex<half_t> >() {
return complex<half_t>(one<half_t>(), half_t());
}
/// Returns 0, the additive identity element (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t zero<half_t>() {
uint16_t bits = 0x0u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns 0, the additive identity element (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> zero< complex<half_t> >() {
return complex<half_t>(zero<half_t>(), half_t());
}
/// Returns 2 (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t two<half_t>() {
uint16_t bits = 0x4000u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns 2 (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> two< complex<half_t> >() {
return complex<half_t>(two<half_t>(), half_t());
}
/// Returns pi, approximately 3.141 (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t pi<half_t>() {
uint16_t bits = 0x4248u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns pi, approximately 3.141 (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> pi< complex<half_t> >() {
return complex<half_t>(pi<half_t>(), half_t());
}
/// Returns 2 * pi (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t two_pi<half_t>() {
uint16_t bits = 0x4648u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns 2 * pi (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> two_pi< complex<half_t> >() {
return complex<half_t>(two_pi<half_t>(), half_t());
}
/// Returns pi / 2 (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t half_pi<half_t>() {
uint16_t bits = 0x3e48u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns pi / 2 (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> half_pi< complex<half_t> >() {
return complex<half_t>(half_pi<half_t>(), half_t());
}
/// Returns sqrt(pi) (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t root_pi<half_t>() {
uint16_t bits = 0x3f17u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns sqrt(pi) (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> root_pi< complex<half_t> >() {
return complex<half_t>(root_pi<half_t>(), half_t());
}
/// Returns sqrt(pi / 2) (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t root_half_pi<half_t>() {
uint16_t bits = 0x3d03u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns sqrt(pi / 2) (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> root_half_pi< complex<half_t> >() {
return complex<half_t>(root_half_pi<half_t>(), half_t());
}
/// Returns sqrt(2 * pi) (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t root_two_pi<half_t>() {
uint16_t bits = 0x4103u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns sqrt(2 * pi) (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> root_two_pi< complex<half_t> >() {
return complex<half_t>(root_two_pi<half_t>(), half_t());
}
/// Returns sqrt(ln(4)) (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t root_ln_four<half_t>() {
uint16_t bits = 0x3cb6u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns sqrt(ln(4)) (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> root_ln_four< complex<half_t> >() {
return complex<half_t>(root_ln_four<half_t>(), half_t());
}
/// Returns e, approximately 2.718... (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t e<half_t>() {
uint16_t bits = 0x4170u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns e, approximately 2.718... (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> e< complex<half_t> >() {
return complex<half_t>(e<half_t>(), half_t());
}
/// Returns (1/2) (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t half<half_t>() {
uint16_t bits = 0x3800u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns (1/2) (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> half< complex<half_t> >() {
return complex<half_t>(half<half_t>(), half_t());
}
/// Returns sqrt(2), approximately 1.414... (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t root_two<half_t>() {
uint16_t bits = 0x3da8u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns sqrt(2), approximately 1.414... (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> root_two< complex<half_t> >() {
return complex<half_t>(root_two<half_t>(), half_t());
}
/// Returns sqrt(2)/2, approximately 0.707... (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t half_root_two<half_t>() {
uint16_t bits = 0x39a8u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns sqrt(2)/2, approximately 0.707... (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> half_root_two< complex<half_t> >() {
return complex<half_t>(half_root_two<half_t>(), half_t());
}
/// Returns ln(2), approximately 0.693... (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t ln_two<half_t>() {
uint16_t bits = 0x398cu;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns ln(2), approximately 0.693... (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> ln_two< complex<half_t> >() {
return complex<half_t>(ln_two<half_t>(), half_t());
}
/// Returns ln(ln(2)), approximately -0.3665... (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t ln_ln_two<half_t>() {
uint16_t bits = 0xb5ddu;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns ln(ln(2)), approximately -0.3665... (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> ln_ln_two< complex<half_t> >() {
return complex<half_t>(ln_ln_two<half_t>(), half_t());
}
/// Returns 1/3, approximately 0.333... (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t third<half_t>() {
uint16_t bits = 0x3555u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns 1/3, approximately 0.333... (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> third< complex<half_t> >() {
return complex<half_t>(third<half_t>(), half_t());
}
/// Returns 2/3, approximately 0.666... (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t twothirds<half_t>() {
uint16_t bits = 0x3955u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns 2/3, approximately 0.666... (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> twothirds< complex<half_t> >() {
return complex<half_t>(twothirds<half_t>(), half_t());
}
/// Returns pi - 3, approximately 0.1416... (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t pi_minus_three<half_t>() {
uint16_t bits = 0x3088u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns pi - 3, approximately 0.1416... (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> pi_minus_three< complex<half_t> >() {
return complex<half_t>(pi_minus_three<half_t>(), half_t());
}
/// Returns 4 - pi, approximately 0.858... (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t four_minus_pi<half_t>() {
uint16_t bits = 0x3adeu;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns 4 - pi, approximately 0.858... (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> four_minus_pi< complex<half_t> >() {
return complex<half_t>(four_minus_pi<half_t>(), half_t());
}
/////////////////////////////////////////////////////////////////////////////////////
// Specialization for bfloat16_t
/// Returns 1, the multiplicative identity element (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t one<bfloat16_t>() {
uint16_t bits = 0x3f80u;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns 1, the multiplicative identity element (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> one< complex<bfloat16_t> >() {
return complex<bfloat16_t>(one<bfloat16_t>(), bfloat16_t());
}
/// Returns 0, the additive identity element (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t zero<bfloat16_t>() {
uint16_t bits = 0x0u;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns 0, the additive identity element (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> zero< complex<bfloat16_t> >() {
return complex<bfloat16_t>(zero<bfloat16_t>(), bfloat16_t());
}
/// Returns 2 (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t two<bfloat16_t>() {
uint16_t bits = 0x4000u;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns 2 (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> two< complex<bfloat16_t> >() {
return complex<bfloat16_t>(two<bfloat16_t>(), bfloat16_t());
}
/// Returns pi, approximately 3.141 (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t pi<bfloat16_t>() {
uint16_t bits = 0x4049u;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns pi, approximately 3.141 (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> pi< complex<bfloat16_t> >() {
return complex<bfloat16_t>(pi<bfloat16_t>(), bfloat16_t());
}
/// Returns 2 * pi (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t two_pi<bfloat16_t>() {
uint16_t bits = 0x40c9u;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns 2 * pi (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> two_pi< complex<bfloat16_t> >() {
return complex<bfloat16_t>(two_pi<bfloat16_t>(), bfloat16_t());
}
/// Returns pi / 2 (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t half_pi<bfloat16_t>() {
uint16_t bits = 0x3fc9u;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns pi / 2 (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> half_pi< complex<bfloat16_t> >() {
return complex<bfloat16_t>(half_pi<bfloat16_t>(), bfloat16_t());
}
/// Returns sqrt(pi) (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t root_pi<bfloat16_t>() {
uint16_t bits = 0x3fe3u;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns sqrt(pi) (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> root_pi< complex<bfloat16_t> >() {
return complex<bfloat16_t>(root_pi<bfloat16_t>(), bfloat16_t());
}
/// Returns sqrt(pi / 2) (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t root_half_pi<bfloat16_t>() {
uint16_t bits = 0x3fa0u;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns sqrt(pi / 2) (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> root_half_pi< complex<bfloat16_t> >() {
return complex<bfloat16_t>(root_half_pi<bfloat16_t>(), bfloat16_t());
}
/// Returns sqrt(2 * pi) (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t root_two_pi<bfloat16_t>() {
uint16_t bits = 0x4020u;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns sqrt(2 * pi) (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> root_two_pi< complex<bfloat16_t> >() {
return complex<bfloat16_t>(root_two_pi<bfloat16_t>(), bfloat16_t());
}
/// Returns sqrt(ln(4)) (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t root_ln_four<bfloat16_t>() {
uint16_t bits = 0x3f97u;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns sqrt(ln(4)) (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> root_ln_four< complex<bfloat16_t> >() {
return complex<bfloat16_t>(root_ln_four<bfloat16_t>(), bfloat16_t());
}
/// Returns e, approximately 2.718... (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t e<bfloat16_t>() {
uint16_t bits = 0x402eu;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns e, approximately 2.718... (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> e< complex<bfloat16_t> >() {
return complex<bfloat16_t>(e<bfloat16_t>(), bfloat16_t());
}
/// Returns (1/2) (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t half<bfloat16_t>() {
uint16_t bits = 0x3f00u;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns (1/2) (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> half< complex<bfloat16_t> >() {
return complex<bfloat16_t>(half<bfloat16_t>(), bfloat16_t());
}
/// Returns sqrt(2), approximately 1.414... (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t root_two<bfloat16_t>() {
uint16_t bits = 0x3fb5u;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns sqrt(2), approximately 1.414... (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> root_two< complex<bfloat16_t> >() {
return complex<bfloat16_t>(root_two<bfloat16_t>(), bfloat16_t());
}
/// Returns sqrt(2)/2, approximately 0.707... (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t half_root_two<bfloat16_t>() {
uint16_t bits = 0x3f35u;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns sqrt(2)/2, approximately 0.707... (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> half_root_two< complex<bfloat16_t> >() {
return complex<bfloat16_t>(half_root_two<bfloat16_t>(), bfloat16_t());
}
/// Returns ln(2), approximately 0.693... (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t ln_two<bfloat16_t>() {
uint16_t bits = 0x3f31u;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns ln(2), approximately 0.693... (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> ln_two< complex<bfloat16_t> >() {
return complex<bfloat16_t>(ln_two<bfloat16_t>(), bfloat16_t());
}
/// Returns ln(ln(2)), approximately -0.3665... (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t ln_ln_two<bfloat16_t>() {
uint16_t bits = 0xbebcu;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns ln(ln(2)), approximately -0.3665... (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> ln_ln_two< complex<bfloat16_t> >() {
return complex<bfloat16_t>(ln_ln_two<bfloat16_t>(), bfloat16_t());
}
/// Returns 1/3, approximately 0.333... (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t third<bfloat16_t>() {
uint16_t bits = 0x3eabu;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns 1/3, approximately 0.333... (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> third< complex<bfloat16_t> >() {
return complex<bfloat16_t>(third<bfloat16_t>(), bfloat16_t());
}
/// Returns 2/3, approximately 0.666... (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t twothirds<bfloat16_t>() {
uint16_t bits = 0x3f2bu;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns 2/3, approximately 0.666... (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> twothirds< complex<bfloat16_t> >() {
return complex<bfloat16_t>(twothirds<bfloat16_t>(), bfloat16_t());
}
/// Returns pi - 3, approximately 0.1416... (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t pi_minus_three<bfloat16_t>() {
uint16_t bits = 0x3e11u;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns pi - 3, approximately 0.1416... (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> pi_minus_three< complex<bfloat16_t> >() {
return complex<bfloat16_t>(pi_minus_three<bfloat16_t>(), bfloat16_t());
}
/// Returns 4 - pi, approximately 0.858... (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t four_minus_pi<bfloat16_t>() {
uint16_t bits = 0x3f5cu;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns 4 - pi, approximately 0.858... (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> four_minus_pi< complex<bfloat16_t> >() {
return complex<bfloat16_t>(four_minus_pi<bfloat16_t>(), bfloat16_t());
}
///////////////////////////////////////////////////////////////////////////////////
} // namespace constants
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////
|
cutlass/include/cutlass/constants.h/0
|
{
"file_path": "cutlass/include/cutlass/constants.h",
"repo_id": "cutlass",
"token_count": 17582
}
| 24 |
/***************************************************************************************************
* Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level implicit GEMM convolution definitions combine threadblock-scoped
matrix multiply-add with the appropriate threadblock-scoped epilogue.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/conv/kernel/default_conv2d.h"
#include "cutlass/conv/threadblock/conv2d_dgrad_output_gradient_tile_access_iterator_analytic.h"
#include "cutlass/conv/threadblock/conv2d_dgrad_output_gradient_tile_access_iterator_optimized.h"
#include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_analytic.h"
#include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_optimized.h"
#include "cutlass/conv/threadblock/conv2d_tile_iterator.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Deconv2d
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename OperatorClass,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kOptimized,
conv::StrideSupport StrideSupport = StrideSupport::kStrided,
/// Access granularity of A matrix in units of elements
int AlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value,
/// Access granularity of B matrix in units of elements
int AlignmentB = 128 / cutlass::sizeof_bits<ElementB>::value
> struct DefaultDeconv2d;
/////////////////////////////////////////////////////////////////////////////////////////////////
// OpClassSimt convolutions
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Deconv2d specialization for Analytic IteratorAlgorithm,
/// multi-stage pipeline, and FFMA-based mainloop for SM80
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
int AlignmentA,
int AlignmentB
>
struct DefaultDeconv2d <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kAnalytic,
conv::StrideSupport::kUnity,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
ThreadMapA,
conv::StrideSupport::kUnity
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB,
cutlass::AlignedArray<ElementB, ThreadMapB::kElementsPerAccess>,
conv::GroupMode::kNone,
true /*IsDeconv*/
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
arch::CacheOperation::Always,
MmaPolicy,
Stages
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kDeconv
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
int AlignmentA,
int AlignmentB
>
struct DefaultDeconv2d <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kAnalytic,
conv::StrideSupport::kStrided,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
ThreadMapA,
conv::StrideSupport::kStrided
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB,
cutlass::AlignedArray<ElementB, ThreadMapB::kElementsPerAccess>,
conv::GroupMode::kNone,
true /*IsDeconv*/
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
arch::CacheOperation::Always,
MmaPolicy,
Stages
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimtStridedDgrad<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionStridedDgrad<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kDeconv
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Deconv2d specialization for Optimized IteratorAlgorithm,
/// multi-stage pipeline, and FFMA-based mainloop for SM80
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
int AlignmentA,
int AlignmentB
>
struct DefaultDeconv2d <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
StrideSupport::kUnity,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
ThreadMapA,
StrideSupport::kUnity
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB,
cutlass::AlignedArray<ElementB, ThreadMapB::kElementsPerAccess>,
true /*IsDeconv*/
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
arch::CacheOperation::Always,
MmaPolicy,
Stages
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kDeconv
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
int AlignmentA,
int AlignmentB
>
struct DefaultDeconv2d <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
conv::StrideSupport::kStrided,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
ThreadMapA,
conv::StrideSupport::kStrided
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB,
cutlass::AlignedArray<ElementB, ThreadMapB::kElementsPerAccess>,
true /*IsDeconv*/
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
arch::CacheOperation::Always,
MmaPolicy,
Stages
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimtStridedDgrad<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionStridedDgrad<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kDeconv
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Deconv2d specialization for Analytic IteratorAlgorithm,
/// 2 stage pipeline, and FFMA-based mainloop for SM50
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
int AlignmentA,
int AlignmentB
>
struct DefaultDeconv2d <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kAnalytic,
conv::StrideSupport::kUnity,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
2, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
ThreadMapA,
conv::StrideSupport::kUnity
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB,
cutlass::AlignedArray<ElementB, ThreadMapB::kElementsPerAccess>,
conv::GroupMode::kNone,
true /*IsDeconv*/
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kDeconv
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
int AlignmentA,
int AlignmentB
>
struct DefaultDeconv2d <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kAnalytic,
conv::StrideSupport::kStrided,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
2, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::TileIteratorStridedDgrad<
cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
ThreadMapA,
conv::StrideSupport::kStrided
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::TileIteratorStridedDgrad<
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB,
cutlass::AlignedArray<ElementB, ThreadMapB::kElementsPerAccess>,
conv::GroupMode::kNone,
true /*IsDeconv*/
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimtStridedDgrad<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionStridedDgrad<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kDeconv
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Deconv2d specialization for Optimized IteratorAlgorithm,
/// 2 stage pipeline, and FFMA-based mainloop for SM50
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
int AlignmentA,
int AlignmentB
>
struct DefaultDeconv2d <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
StrideSupport::kUnity,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
2, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
ThreadMapA,
StrideSupport::kUnity
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB,
cutlass::AlignedArray<ElementB, ThreadMapB::kElementsPerAccess>,
true /*IsDeconv*/
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kDeconv
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
int AlignmentA,
int AlignmentB
>
struct DefaultDeconv2d <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
conv::StrideSupport::kStrided,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
2, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::TileIteratorStridedDgrad<
cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
ThreadMapA,
conv::StrideSupport::kStrided
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::TileIteratorStridedDgrad<
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB,
cutlass::AlignedArray<ElementB, ThreadMapB::kElementsPerAccess>,
true /*IsDeconv*/
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimtStridedDgrad<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionStridedDgrad<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kDeconv
>;
};
} // namespace kernel
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
|
cutlass/include/cutlass/conv/kernel/default_deconv2d.h/0
|
{
"file_path": "cutlass/include/cutlass/conv/kernel/default_deconv2d.h",
"repo_id": "cutlass",
"token_count": 9688
}
| 25 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM A (output gradient tile)
matrix from memory.
This iterator assumes TensorNHWC layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/threadblock/conv2d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
conv::StrideSupport StrideSupport_ = conv::StrideSupport::kUnity,
typename AccessType_ = cutlass::AlignedArray<Element_, ThreadMap_::kElementsPerAccess>
>
class Conv2dDgradOutputGradientTileAccessIteratorOptimized;
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
// Conv2dDgradOutputGradientTileAccessIteratorOptimized strided dgrad needs special handling
// to skip MMAs (Dx = Dy * w) on invalid filter positions
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
typename AccessType_
>
class Conv2dDgradOutputGradientTileAccessIteratorOptimized <
Shape_,
Element_,
ThreadMap_,
conv::StrideSupport::kStrided,
AccessType_
> {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNHWC;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
using Mask = uint64_t;
static_assert(sizeof_bits<Element>::value >= 8,
"DGRAD requires elements of size 8b or greater.");
//
// Simpligying assertions
//
static_assert(ThreadMap::Iterations::kContiguous == 1,
"Require Iterations::kContiguous == 1");
//
// Parameters structure
//
using Params = Conv2dStridedDgradOutputGradientIteratorOptimizedParams;
private:
Params const ¶ms_;
Conv2dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
LongIndex iteration_vector_;
// One pointer per access
char const *pointer_[ThreadMap::Iterations::kStrided];
int filter_k_;
int filter_r_;
int filter_s_;
int start_r_;
int start_s_;
int64_t reset_bytes_s_;
int64_t reset_bytes_r_;
Index masks_[ThreadMap::Iterations::kStrided][kAccessesPerVector][2];
public:
CUTLASS_HOST_DEVICE
Conv2dDgradOutputGradientTileAccessIteratorOptimized(
Params const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
FastDivmod const &stride_h_divmod, FastDivmod const &stride_w_divmod,
int start_r, int start_s,
MatrixCoord const &threadblock_offset = MatrixCoord() // threadblock offset - units are whole CTA tiles
):
params_(params),
problem_size_(problem_size),
filter_k_(0),
filter_r_(start_r),
filter_s_(start_s),
start_r_(start_r),
start_s_(start_s) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_k_ = threadblock_offset.column() + thread_coord.contiguous();
reset_bytes_s_ = (problem_size_.num_gemm_k_filter_s(start_s_) - 1) * params_.inc_next[0];
reset_bytes_r_ = (problem_size_.num_gemm_k_filter_s(start_s_) - 1) * params_.inc_next[0] +
(problem_size_.num_gemm_k_filter_r(start_r_) - 1) * params_.inc_next[1];
int offset_n[ThreadMap::Iterations::kStrided];
int offset_p[ThreadMap::Iterations::kStrided];
int offset_q[ThreadMap::Iterations::kStrided];
int filter_r = filter_r_;
int filter_s = filter_s_;
if (problem_size_.mode == Mode::kConvolution) {
filter_r = (problem_size_.R - 1 - filter_r);
filter_s = (problem_size_.S - 1 - filter_s);
}
// Starting h, w positions for filter position in gemm_k=0
int start_h, start_w;
strided_dgrad_starting_coords(
problem_size_,
stride_h_divmod, stride_w_divmod,
filter_r, filter_s,
start_h, start_w);
// Effective starting P and Q for filter position required for remapping NHW rows
int P = (problem_size_.H - start_h + problem_size_.stride_h - 1) / problem_size_.stride_h;
int Q = (problem_size_.W - start_w + problem_size_.stride_w - 1) / problem_size_.stride_w;
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
pointer_[s] = reinterpret_cast<char const *>(ptr);
int offset_npq = (threadblock_offset.row() + thread_coord.strided() + s * ThreadMap::Delta::kStrided) % params_.tiled_rows_per_filter;
// (STEP 1) [reorder NHW rows to start with same filter positions]
offset_n[s] = offset_npq / (P * Q);
int residual = offset_npq % (P * Q);
int p = (residual / Q);
int q = (residual % Q);
int mapped_h = (start_h + p * problem_size_.stride_h);
int mapped_w = (start_w + q * problem_size_.stride_w);
// Access (p, q) coordinates for Dy tensor for filter position in gemm_k=0
// note that (h + pad_h - filter_r) and (w + pad_w - filter_s) are ensured to be
// divisible by stride_h and stride_w
offset_p[s] = (mapped_h + problem_size_.pad_h - filter_r) / problem_size_.stride_h;
offset_q[s] = (mapped_w + problem_size_.pad_w - filter_s) / problem_size_.stride_w;
// Initialize pointers for gemm_k=0
TensorCoord coord{offset_n[s], offset_p[s], offset_q[s], filter_k_};
pointer_[s] += params_.layout(coord) * sizeof_bits<Element>::value / 8;
}
//
// Precompute mask predicates
//
clear_mask();
CUTLASS_PRAGMA_NO_UNROLL
for (int r = start_r; r < problem_size_.R; r += problem_size_.stride_h) {
CUTLASS_PRAGMA_UNROLL
for (int s_idx = 0; s_idx < ThreadMap::Iterations::kStrided; ++s_idx) {
int p = offset_p[s_idx] ;
p += (params_.conv_sign * (r / problem_size_.stride_h));
bool pred = (offset_n[s_idx] < problem_size_.N && p >= 0 && p < problem_size_.P);
CUTLASS_PRAGMA_UNROLL
for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) {
masks_[s_idx][v_idx][0] |= (pred << r);
}
}
}
CUTLASS_PRAGMA_NO_UNROLL
for(int s = start_s; s < problem_size_.S; s += problem_size_.stride_w) {
CUTLASS_PRAGMA_UNROLL
for (int s_idx = 0; s_idx < ThreadMap::Iterations::kStrided; ++s_idx) {
int q = offset_q[s_idx];
q += (params_.conv_sign * (s / problem_size_.stride_w));
bool pred = (q >=0 && q < problem_size_.Q);
CUTLASS_PRAGMA_UNROLL
for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) {
masks_[s_idx][v_idx][1] |= (pred << s);
}
}
}
CUTLASS_PRAGMA_UNROLL
for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) {
clear_mask(v_idx, (filter_k_ + v_idx * AccessType::kElements) >= problem_size.K);
}
set_iteration_index(0);
}
CUTLASS_HOST_DEVICE
static Params getParams(Conv2dProblemSize const &problem_size, Layout const &layout) {
return Params(problem_size,
layout,
sizeof_bits<Element>::value,
{Shape::kRow, Shape::kColumn});
}
private:
/// Adds a pointer offset in units of element
CUTLASS_HOST_DEVICE
void add_byte_offset_(LongIndex byte_offset, LongIndex byte_reset = 0) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
pointer_[s] += byte_offset - byte_reset;
}
}
public:
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
add_byte_offset_(pointer_offset * sizeof_bits<Element>::value / 8);
}
CUTLASS_DEVICE
void advance() {
int next_idx = 0;
int64_t reset_bytes = 0;
// Move filter_s by stride_w
filter_s_ += problem_size_.stride_w;
if (filter_s_ >= problem_size_.S) {
// Restore filter_s
filter_s_ = start_s_;
// Move filter_r by stride_h
filter_r_ += problem_size_.stride_h;
#if 0
if (filter_r_ < problem_size_.R) {
next_idx = 1;
// Restore bytes in q coordinate (Mma in filter s dimension)
reset_bytes = reset_bytes_s_;
} else {
// Restore filter_r
filter_r_ = start_r_;
next_idx = 2;
// Restore bytes in p and q coordinate (Mma in filter s and r dimension)
reset_bytes = reset_bytes_r_;
}
#else
asm volatile(
"{\n\t"
" .reg .pred %%p;\n\t"
" setp.lt.s32 %%p, %3, %4;\n\t"
" selp.s32 %0, %3, %5, %%p;\n\t"
" selp.s32 %1, 1, 2, %%p;\n\t"
" selp.s64 %2, %6, %7, %%p;\n\t"
"}\n"
: "=r"(filter_r_), "=r"(next_idx), "=l"(reset_bytes)
: "r"(filter_r_), "r"(problem_size_.R), "r"(start_r_),
"l"(reset_bytes_s_), "l"(reset_bytes_r_));
#endif
}
// offset pointers by offset_bytes
add_byte_offset_(params_.inc_next[next_idx] - reset_bytes);
if (next_idx == 2) {
filter_k_ += params_.filter_k_delta;
}
CUTLASS_PRAGMA_UNROLL
for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) {
clear_mask(v_idx, (filter_k_ + v_idx * AccessType::kElements) >= problem_size_.K);
}
}
/// Clears the predicates
CUTLASS_HOST_DEVICE
void clear_mask(bool clear = true) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
masks_[s][v][0] = clear ? Mask(0) : masks_[s][v][0];
masks_[s][v][1] = clear ? Mask(0) : masks_[s][v][1];
}
}
}
/// Clears the predicates
CUTLASS_HOST_DEVICE
void clear_mask(int v, bool clear = true) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
masks_[s][v][0] = clear ? Mask(0) : masks_[s][v][0];
masks_[s][v][1] = clear ? Mask(0) : masks_[s][v][1];
}
}
/// Returns true if the current coordinate is within the output tensor Dy
CUTLASS_HOST_DEVICE
bool valid() const {
return
(masks_[iteration_strided_][iteration_vector_][0] & (Index(1) << filter_r_)) &&
(masks_[iteration_strided_][iteration_vector_][1] & (Index(1) << filter_s_));
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
return reinterpret_cast<AccessType const *>(pointer_[iteration_strided_]) + iteration_vector_;
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv2dDgradOutputGradientTileAccessIteratorOptimized &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.K % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
// Limit on filter size
if (problem_size.R > 32 || problem_size.S > 32) {
return Status::kErrorNotSupported;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Conv2dDgradOutputGradientTileAccessIteratorOptimized unity stride dgrad is optimized for dgrad
// with problem stride = {1x1}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
typename AccessType_
>
class Conv2dDgradOutputGradientTileAccessIteratorOptimized <
Shape_,
Element_,
ThreadMap_,
conv::StrideSupport::kUnity,
AccessType_
> {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNHWC;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized;
static StrideSupport const kStrideSupport = conv::StrideSupport::kUnity;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
using Mask = uint64_t;
//
// Simplifying assertions
//
static_assert(ThreadMap::Iterations::kContiguous == 1,
"Require Iterations::kContiguous == 1");
//
// Parameters structure
//
using Params = Conv2dDgradOutputGradientIteratorOptimizedParams;
private:
Conv2dDgradOutputGradientIteratorOptimizedParams const ¶ms_;
Conv2dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
LongIndex iteration_vector_;
// One pointer per access
char const *pointer_[ThreadMap::Iterations::kStrided];
// current filter position (r, s)
int filter_r_;
int filter_s_;
int filter_k_;
Index masks_[ThreadMap::Iterations::kStrided][kAccessesPerVector][2];
public:
CUTLASS_HOST_DEVICE
Conv2dDgradOutputGradientTileAccessIteratorOptimized(
Conv2dDgradOutputGradientIteratorOptimizedParams const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord() // tile index - units are threadblock-scoped tiles
):
params_(params),
problem_size_(problem_size),
filter_k_(0),
filter_r_(0),
filter_s_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_k_ = threadblock_offset.column() + thread_coord.contiguous();
int offset_n[ThreadMap::Iterations::kStrided];
int offset_h[ThreadMap::Iterations::kStrided];
int offset_w[ThreadMap::Iterations::kStrided];
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
pointer_[s] = reinterpret_cast<char const *>(ptr);
int offset_nhw = threadblock_offset.row() + thread_coord.strided() + s * ThreadMap::Delta::kStrided;
// The subseqnet fast_divmod() operations are equivalent to the following logical computation:
//
//
// offset_n[s] = offset_nhw / (problem_size_.H * problem_size_.W);
// int residual = offset_nhw % (problem_size_.H * problem_size_.W);
//
// offset_h[s] = residual / problem_size_.W;
// offset_w[s] = residual % problem_size_.W;
//
int residual;
params_.hw_divmod(offset_n[s], residual, offset_nhw);
params_.w_divmod(offset_h[s], offset_w[s], residual);
TensorCoord coord = at_(offset_n[s], offset_h[s], offset_w[s], 0, 0);
pointer_[s] += params_.layout(coord) * sizeof_bits<Element>::value / 8;
}
clear_mask();
CUTLASS_PRAGMA_NO_UNROLL
for (int r = 0; r < problem_size_.R; ++r) {
CUTLASS_PRAGMA_UNROLL
for (int s_idx = 0; s_idx < ThreadMap::Iterations::kStrided; ++s_idx) {
int r_ = r;
if (problem_size_.mode == Mode::kConvolution) {
r_ = problem_size_.R - 1 - r;
}
int p = offset_h[s_idx] + problem_size_.pad_h - r_ * problem_size_.dilation_h;
bool pred = (offset_n[s_idx] < problem_size_.N && p >= 0 && p < problem_size_.P);
CUTLASS_PRAGMA_UNROLL
for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) {
masks_[s_idx][v_idx][0] |= (pred << r);
}
}
}
CUTLASS_PRAGMA_NO_UNROLL
for (int s = 0; s < problem_size_.S; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int s_idx = 0; s_idx < ThreadMap::Iterations::kStrided; ++s_idx) {
int s_ = s;
if (problem_size_.mode == Mode::kConvolution) {
s_ = problem_size_.S - 1 - s;
}
int q = offset_w[s_idx] + problem_size_.pad_w - s_ * problem_size_.dilation_w;
bool pred = (q >= 0 && q < problem_size_.Q);
CUTLASS_PRAGMA_UNROLL
for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) {
masks_[s_idx][v_idx][1] |= (pred << s);
}
}
}
CUTLASS_PRAGMA_UNROLL
for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) {
clear_mask(v_idx, filter_k_ + v_idx * AccessType::kElements >= problem_size.K);
}
set_iteration_index(0);
}
CUTLASS_HOST_DEVICE
static Params getParams(Conv2dProblemSize const &problem_size, Layout const &layout) {
return Params(problem_size,
layout,
sizeof_bits<Element>::value,
{Shape::kRow, Shape::kColumn},
ThreadMap::kThreads,
ThreadMap::kElementsPerAccess,
{ThreadMap::Iterations::kContiguous, ThreadMap::Iterations::kStrided},
{ThreadMap::Delta::kContiguous, ThreadMap::Delta::kStrided});
}
private:
/// Returns the coordinate in the output gradient tensor dy that is correspoinding to
// activation nhw and filter position k, r, s
CUTLASS_HOST_DEVICE
TensorCoord at_(int n, int h, int w, int r, int s) const {
if (problem_size_.mode == Mode::kConvolution) {
r = problem_size_.R - 1 - r;
s = problem_size_.S - 1 - s;
}
int p = h + problem_size_.pad_h - r * problem_size_.dilation_h;
int q = w + problem_size_.pad_w - s * problem_size_.dilation_w;
return TensorCoord(n, p, q, filter_k_);
}
/// Adds a pointer offset in units of element
CUTLASS_HOST_DEVICE
void add_byte_offset_(LongIndex byte_offset) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
pointer_[s] += byte_offset;
}
}
public:
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
add_byte_offset_(pointer_offset * sizeof_bits<Element>::value / 8);
}
CUTLASS_HOST_DEVICE
void advance() {
int next_idx = 0;
// moves to the next tile
++filter_s_;
if (filter_s_ == problem_size_.S) {
filter_s_ = 0;
++filter_r_;
if (filter_r_ < problem_size_.R) {
next_idx = 1;
}
else {
filter_r_ = 0;
next_idx = 2;
}
}
add_byte_offset_(params_.inc_next[next_idx]);
if (next_idx == 2) {
filter_k_ += params_.filter_k_delta;
}
CUTLASS_PRAGMA_UNROLL
for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) {
clear_mask(v_idx, (filter_k_ + v_idx * AccessType::kElements) >= problem_size_.K);
}
}
/// Clears the predicates
CUTLASS_HOST_DEVICE
void clear_mask(bool clear = true) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
masks_[s][v][0] = clear ? Mask(0) : masks_[s][v][0];
masks_[s][v][1] = clear ? Mask(0) : masks_[s][v][1];
}
}
}
/// Clears the predicates
CUTLASS_HOST_DEVICE
void clear_mask(int v, bool clear = true) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
masks_[s][v][0] = clear ? Mask(0) : masks_[s][v][0];
masks_[s][v][1] = clear ? Mask(0) : masks_[s][v][1];
}
}
CUTLASS_HOST_DEVICE
bool valid() {
return
(masks_[iteration_strided_][iteration_vector_][0] & (Index(1) << filter_r_)) &&
(masks_[iteration_strided_][iteration_vector_][1] & (Index(1) << filter_s_));
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
return reinterpret_cast<AccessType const *>(pointer_[iteration_strided_]) + iteration_vector_;
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv2dDgradOutputGradientTileAccessIteratorOptimized &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
// This is specialized for unit stride
if (problem_size.stride() != MatrixCoord({1, 1})) {
return Status::kErrorNotSupported;
}
// check alignment constraint on iterator's contiguous dimension
if (problem_size.K % AccessType::kElements) {
return Status::kErrorNotSupported;
}
// Limit on filter size
if (problem_size.R > 32 || problem_size.S > 32) {
return Status::kErrorNotSupported;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
|
cutlass/include/cutlass/conv/threadblock/conv2d_dgrad_output_gradient_tile_access_iterator_optimized.h/0
|
{
"file_path": "cutlass/include/cutlass/conv/threadblock/conv2d_dgrad_output_gradient_tile_access_iterator_optimized.h",
"repo_id": "cutlass",
"token_count": 10228
}
| 26 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM B (filter tile)
matrix from memory.
This iterator assumes TensorNHWC layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv3d_problem_size.h"
#include "cutlass/conv/threadblock/conv3d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
conv::StrideSupport StrideSupport_ = conv::StrideSupport::kUnity
>
class Conv3dDgradFilterTileAccessIteratorOptimized {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNDHWC;
using ThreadMap = ThreadMap_;
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized;
static StrideSupport const kStrideSupport = StrideSupport_;
static int const kConvDim = 3;
using ConvProblemSize = typename conv::Conv3dProblemSize;
static int const kAccessesPerVector = 1;
//
// Parameters structure
//
struct Params : Conv3dDgradFilterIteratorOptimizedParams {
//
// Methods
//
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(Conv3dDgradFilterIteratorOptimizedParams const &base):
Conv3dDgradFilterIteratorOptimizedParams(base) { }
CUTLASS_HOST_DEVICE
Params(
Conv3dProblemSize const &problem_size,
Layout const &layout
):
Conv3dDgradFilterIteratorOptimizedParams(
problem_size,
layout,
sizeof_bits<Element>::value,
{Shape::kRow, Shape::kColumn},
ThreadMap::kThreads,
ThreadMap::kElementsPerAccess,
{ThreadMap::Iterations::kContiguous, ThreadMap::Iterations::kStrided},
{ThreadMap::Delta::kContiguous, ThreadMap::Delta::kStrided}
) { }
};
private:
Conv3dDgradFilterIteratorOptimizedParams const ¶ms_;
Conv3dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
char const *pointer_;
uint32_t predicates_;
int filter_trs_;
int filter_k_;
//
// Assertions
//
// We map predicates into bits packed in this uint32_t container
static_assert(ThreadMap::Iterations::kStrided *
ThreadMap::Iterations::kContiguous < sizeof(predicates_) * 8,
"Currently, the number of loads per iteration is limited by the size of the predicates container.");
public:
CUTLASS_HOST_DEVICE
Conv3dDgradFilterTileAccessIteratorOptimized(
Conv3dDgradFilterIteratorOptimizedParams const ¶ms,
Conv3dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
predicates_(0),
filter_trs_(0),
filter_k_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_k_ = threadblock_offset.row() + thread_coord.strided();
Index column = threadblock_offset.column() + thread_coord.contiguous();
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int filter_k = filter_k_ + s * ThreadMap::Delta::kStrided;
int filter_c = column + c * ThreadMap::Delta::kContiguous;
uint32_t pred = ((filter_k < problem_size_.K && filter_c < problem_size_.C) ? 1u : 0);
int pred_idx = c + s * ThreadMap::Iterations::kContiguous;
predicates_ |= (pred << pred_idx);
}
}
pointer_ += (
filter_k_ * params.layout.stride()[3] + column
) * sizeof_bits<Element>::value / 8;
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
LongIndex next = params_.inc_next_trs;
// moves to the next tile
++filter_trs_;
if (filter_trs_ == params_.TRS) {
filter_trs_ = 0;
next = params_.inc_next_k;
filter_k_ += params_.filter_k_delta;
}
// Clear predicates if needed
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
if (filter_k_ + s * ThreadMap::Delta::kStrided >= problem_size_.K) {
uint32_t kClearMask = ((1u << ThreadMap::Iterations::kContiguous) - 1) << (s * ThreadMap::Iterations::kContiguous);
predicates_ = (predicates_ & (~kClearMask));
}
}
pointer_ += next;
}
/// Returns true if the current coordinate is within the filter tensor W
CUTLASS_HOST_DEVICE
bool valid() {
LongIndex pred_idx = iteration_contiguous_ + iteration_strided_ * ThreadMap::Iterations::kContiguous;
return (predicates_ & (1u << pred_idx));
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
return reinterpret_cast<AccessType const *>(pointer_ +
iteration_contiguous_ * ThreadMap::Delta::kContiguous * sizeof_bits<Element>::value / 8);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv3dDgradFilterTileAccessIteratorOptimized &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
// Move to the next K coordinate within the tile
pointer_ += params_.inc_next_strided;
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv3dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.C % (128/sizeof_bits<Element>::value)) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
|
cutlass/include/cutlass/conv/threadblock/conv3d_dgrad_filter_tile_access_iterator_optimized.h/0
|
{
"file_path": "cutlass/include/cutlass/conv/threadblock/conv3d_dgrad_filter_tile_access_iterator_optimized.h",
"repo_id": "cutlass",
"token_count": 3194
}
| 27 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Interface betweeen a CUTLASS device-wide operator and CUDA.
*/
#pragma once
#include <cuda_runtime_api.h>
#include "cutlass/cutlass.h"
#include "cutlass/trace.h"
#include "cutlass/platform/platform.h"
#if ! defined(__CUDACC_RTC__)
#include <cstdio>
#endif
#if ((__CUDACC_VER_MAJOR__ >= 12) || ((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ >= 8)))
# define CUTLASS_SM90_CLUSTER_LAUNCH_ENABLED
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Macro-level guard for CUDA Host Adapter
//
#if !defined(CUTLASS_ENABLE_CUDA_HOST_ADAPTER)
#define CUTLASS_ENABLE_CUDA_HOST_ADAPTER false
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// This class defines an object which abstracts interactions between the CUTLASS device-wide GEMM and
/// CUDA. The intention is to enable CUTLASS to be used with both the CUDA Runtime API and CUDA Driver API.
struct CudaHostAdapter {
/// Limit the number of kernels
static constexpr int32_t kMaximumKernelCount = 4;
/// Maximum cluster size
static constexpr int MaxClusterSize = 32;
//
// Data members
//
/// Handles
void *kernel_handles[kMaximumKernelCount];
int32_t kernel_count = 0;
//
// Methods
//
/// Ctor
CudaHostAdapter() = default;
/// Dtor
virtual ~CudaHostAdapter() {}
/// Copy Ctor
inline CudaHostAdapter(const CudaHostAdapter & rhs):
kernel_count(rhs.kernel_count)
{
CUTLASS_ASSERT(rhs.kernel_count >= 0 && rhs.kernel_count < kMaximumKernelCount);
for (int32_t i = 0; i < rhs.kernel_count && i < kMaximumKernelCount; ++i) {
kernel_handles[i] = rhs.kernel_handles[i];
}
}
/// Copy Assignment
inline CudaHostAdapter& operator=(const CudaHostAdapter & rhs) {
CUTLASS_ASSERT(rhs.kernel_count >= 0 && rhs.kernel_count < kMaximumKernelCount);
for (int32_t i = 0; i < rhs.kernel_count && i < kMaximumKernelCount; ++i) {
kernel_handles[i] = rhs.kernel_handles[i];
}
kernel_count = rhs.kernel_count;
return *this;
}
/// Move ctor
inline CudaHostAdapter(CudaHostAdapter && rhs):
kernel_count(rhs.kernel_count)
{
CUTLASS_ASSERT(rhs.kernel_count >= 0 && rhs.kernel_count < kMaximumKernelCount);
for (int32_t i = 0; i < rhs.kernel_count && i < kMaximumKernelCount; ++i) {
kernel_handles[i] = rhs.kernel_handles[i];
}
}
/// Move assignment
inline CudaHostAdapter& operator=(CudaHostAdapter && rhs) {
CUTLASS_ASSERT(rhs.kernel_count >= 0 && rhs.kernel_count < kMaximumKernelCount);
for (int32_t i = 0; i < rhs.kernel_count && i < kMaximumKernelCount; ++i) {
kernel_handles[i] = rhs.kernel_handles[i];
}
kernel_count = rhs.kernel_count;
return *this;
}
/// Ctor
inline CudaHostAdapter(
void **kernel_handles_,
int32_t kernel_count_
):
kernel_count(kernel_count_)
{
CUTLASS_ASSERT(kernel_count >= 0);
for (int32_t i = 0; i < kernel_count && i < kMaximumKernelCount; ++i) {
kernel_handles[i] = kernel_handles_[i];
}
}
/// Returns true if the CudaHostAdapter is empty (kernel_count == 0)
inline bool empty() const { return !kernel_count; }
/// Returns kernel_count
inline size_t size() const { return static_cast<size_t>(kernel_count); }
/// Queries the occupancy of a kernel
virtual Status query_occupancy(
int32_t *device_sms,
int32_t *sm_occupancy,
int32_t kernel_index,
int32_t thread_count,
int32_t smem_size) const = 0;
/// Launches a kernel without using Threadblock Clusters.
virtual Status launch(
dim3 const grid_dims,
dim3 const block_dims,
size_t const smem_size,
cudaStream_t cuda_stream,
void** kernel_params,
int32_t kernel_index) const = 0;
/// Launches a kernel using the CUDA Extensible Launch API and Threadblock Clusters.
virtual Status launch(
dim3 const grid_dims,
dim3 const cluster_dims,
dim3 const block_dims,
size_t const smem_size,
cudaStream_t cuda_stream,
void** kernel_params,
int32_t kernel_index) const = 0;
protected:
/**
* Fills a buffer in Global Memory with a byte sequence copied from host memory.
* This function can be overriden to dispatch to the appropriate cuMemsetD*Async API
*/
virtual Status memsetDeviceImpl(
void* destination, ///< Device memory pointer to be filled
void const* fill_value, ///< Value to be filled in the buffer
size_t fill_size, ///< Size of the data type to be used for filling the buffer
size_t count, ///< Number of elements of size fill_size
cudaStream_t stream) const = 0;
public:
/// Fills a buffer in Global Memory with a byte sequence copied from host memory
template<class FillValueType>
Status memsetDevice(
void* destination,
FillValueType fill_value,
size_t count,
cudaStream_t stream) const
{
return this->memsetDeviceImpl(
destination,
&fill_value,
sizeof(FillValueType),
count,
stream);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
|
cutlass/include/cutlass/cuda_host_adapter.hpp/0
|
{
"file_path": "cutlass/include/cutlass/cuda_host_adapter.hpp",
"repo_id": "cutlass",
"token_count": 2346
}
| 28 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Functor performing elementwise operations used by epilogues.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/arch/barrier.h"
#include "cutlass/epilogue/dispatch_policy.hpp"
#include "cutlass/epilogue/collective/detail.hpp"
#include "cutlass/epilogue/thread/scale_type.h"
#include "cutlass/epilogue/fusion/callbacks.hpp"
#include "cutlass/epilogue/fusion/sm90_callbacks_tma_warpspecialized.hpp"
#include "cutlass/detail/layout.hpp"
#include "cutlass/trace.h"
#include "cute/tensor.hpp"
#include "cutlass/cuda_host_adapter.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace collective {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
int StagesC_,
int StagesD_,
int FragmentSize_,
bool ReuseSmemC_,
bool DelayTmaStore_,
class CtaTileMNK_, // (CTA_M,CTA_N,CTA_K)
class EpilogueTile_, // (EPI_TILE_M,EPI_TILE_N)
class ElementC_,
class StrideC_,
class ElementD_,
class StrideD_,
class FusionCallbacks_,
class CopyOpG2S_,
class SmemLayoutAtomC_,
class CopyOpS2R_,
class CopyOpS2G_,
class SmemLayoutAtomD_,
class CopyOpR2S_
>
class CollectiveEpilogue<
Sm90TmaWarpSpecialized<StagesC_,StagesD_,FragmentSize_,ReuseSmemC_,DelayTmaStore_>,
CtaTileMNK_,
EpilogueTile_,
ElementC_,
StrideC_,
ElementD_,
StrideD_,
FusionCallbacks_,
CopyOpG2S_,
SmemLayoutAtomC_,
CopyOpS2R_,
CopyOpS2G_,
SmemLayoutAtomD_,
CopyOpR2S_
> {
public:
//
// Type Aliases
//
using DispatchPolicy = Sm90TmaWarpSpecialized<StagesC_,StagesD_,FragmentSize_,ReuseSmemC_,DelayTmaStore_>;
using CtaTileMNK = CtaTileMNK_;
using EpilogueTile = EpilogueTile_;
using FusionCallbacks = FusionCallbacks_;
using ElementC = ElementC_;
using StrideC = StrideC_;
using ElementD = ElementD_;
using StrideD = StrideD_;
using CopyOpG2S = CopyOpG2S_;
using SmemLayoutAtomC = SmemLayoutAtomC_;
using CopyOpS2R = CopyOpS2R_;
using CopyOpS2G = CopyOpS2G_;
using SmemLayoutAtomD = SmemLayoutAtomD_;
using CopyOpR2S = CopyOpR2S_;
using ThreadEpilogueOp = typename epilogue::fusion::FusionCallbacksTraits<FusionCallbacks>::Operation;
using GmemTiledCopyC = CopyOpG2S;
using GmemTiledCopyD = CopyOpS2G;
static_assert(!is_layout<EpilogueTile>::value && is_tuple<EpilogueTile>::value, "EpilogueTile must be a cute::Tile or cute::Shape");
static_assert(cute::rank(CtaTileMNK{}) == 3, "CtaTileMNK must be rank-3: [CTA_M, CTA_N, CTA_K]");
static_assert(cute::rank(EpilogueTile{}) == 2, "EpilogueTile must be rank-2: [EPI_TILE_M, EPI_TILE_N]");
static_assert(size<0>(CtaTileMNK{}) % size<0>(shape(EpilogueTile{})) == 0, "EPI_TILE_M must divide CTA_M");
static_assert(size<1>(CtaTileMNK{}) % size<1>(shape(EpilogueTile{})) == 0, "EPI_TILE_N must divide CTA_N");
static_assert(cute::rank(StrideC{}) == 3, "StrideC must be rank-3: [M, N, L]");
static_assert(cute::rank(StrideD{}) == 3, "StrideD must be rank-3: [M, N, L]");
private:
constexpr static bool is_source_supported = not cute::is_void_v<ElementC>;
constexpr static bool is_destination_supported = not cute::is_void_v<ElementD>;
using SmemElementD = cute::conditional_t<not is_destination_supported,fusion::get_element_aux_t<FusionCallbacks>, ElementD>;
static_assert(not cute::is_void_v<SmemElementD>, "SmemElementD is void");
using SmemElementC = cute::conditional_t<not is_source_supported,SmemElementD,ElementC>; // prevents void ref breakages
constexpr static int StagesC = StagesC_;
constexpr static int StagesD = StagesD_;
constexpr static bool ReuseSmemC = ReuseSmemC_ and is_destination_supported;
constexpr static bool DelayTmaStore = DelayTmaStore_;
constexpr static bool is_m_major_C = detail::is_m_major<StrideC>();
constexpr static bool is_m_major_D = detail::is_m_major<StrideD>();
constexpr static bool is_im2col_C = cute::is_same_v<CopyOpG2S, SM90_TMA_LOAD_IM2COL>;
constexpr static bool is_im2col_D = cute::is_same_v<CopyOpS2G, SM90_TMA_STORE_IM2COL>;
using SmemLayoutC = decltype(tile_to_shape(
SmemLayoutAtomC{},
make_shape(size<0>(EpilogueTile{}), size<1>(EpilogueTile{}), Int<StagesC>{}),
cute::conditional_t<is_m_major_C, Step<_2,_1,_3>, Step<_1,_2,_3>>{} ));
using SmemLayoutD = decltype(tile_to_shape(
SmemLayoutAtomD{},
make_shape(size<0>(EpilogueTile{}), size<1>(EpilogueTile{}), Int<ReuseSmemC ? StagesC : StagesD>{}),
cute::conditional_t<is_m_major_D, Step<_2,_1,_3>, Step<_1,_2,_3>>{} ));
constexpr static bool support_smem_reuse = is_source_supported && is_destination_supported && StagesD <= StagesC
&& cosize(take<0,2>(SmemLayoutC{})) == cosize(take<0,2>(SmemLayoutD{}));
static_assert(not (ReuseSmemC && not support_smem_reuse), "Smem reuse requirements not met");
constexpr static size_t SmemAlignmentD = cutlass::detail::alignment_for_swizzle(SmemLayoutD{});
constexpr static size_t SmemAlignmentC = cutlass::detail::alignment_for_swizzle(SmemLayoutC{});
using EmptyType = cute::tuple<>;
using SmemCStorage = cute::conditional_t<is_source_supported and (not ReuseSmemC),
array_aligned<SmemElementC, size(SmemLayoutC{}), SmemAlignmentC>,
EmptyType>;
using SmemDStorage = cute::conditional_t<is_destination_supported,
array_aligned<SmemElementD, size(SmemLayoutD{}), SmemAlignmentD>,
EmptyType>;
struct TensorStorageImpl: cute::tuple<SmemCStorage, SmemDStorage> {
using Base = cute::tuple<SmemCStorage, SmemDStorage>;
constexpr decltype(auto)
smem_C() {
return cute::get<0>(static_cast<Base &>(*this));
}
constexpr decltype(auto)
smem_D() {
return cute::get<1>(static_cast<Base &>(*this));
}
using FusionStorage = typename FusionCallbacks::SharedStorage;
FusionStorage thread;
};
public:
// TMA pipeline for loading C
using LoadPipeline = cutlass::PipelineTransactionAsync<StagesC>;
using LoadPipelineState = cutlass::PipelineState<StagesC>;
constexpr static uint32_t TmaTransactionBytes =
(size(take<0,2>(SmemLayoutC{})) * static_cast<uint32_t>(sizeof_bits<SmemElementC>::value)) / 8;
// TMA pipeline for storing D
using StorePipeline = cute::conditional_t<ReuseSmemC,
cutlass::PipelineTmaStore<StagesC, StagesD-1>,
cutlass::PipelineTmaStore<StagesD>>;
using StorePipelineState = cutlass::PipelineState<ReuseSmemC ? StagesC : StagesD>;
struct SharedStorage {
using TensorStorage = TensorStorageImpl;
TensorStorage tensors;
using PipelineStorage = typename LoadPipeline::SharedStorage;
PipelineStorage pipeline;
};
using TensorStorage = typename SharedStorage::TensorStorage;
using PipelineStorage = typename SharedStorage::PipelineStorage;
// Host side epilogue arguments
struct Arguments {
typename FusionCallbacks::Arguments thread{};
ElementC const* ptr_C;
StrideC dC;
ElementD const* ptr_D;
StrideD dD;
};
// Device side epilogue params
struct Params {
using TMA_C = decltype(make_tma_copy(
CopyOpG2S{},
make_tensor(make_gmem_ptr(static_cast<SmemElementC const*>(nullptr)),
repeat_like(StrideC{}, int32_t(0)), StrideC{}),
take<0,2>(SmemLayoutC{}),
EpilogueTile{},
_1{}));
using TMA_D = decltype(make_tma_copy(
CopyOpS2G{},
make_tensor(make_gmem_ptr(static_cast<SmemElementD const*>(nullptr)),
repeat_like(StrideD{}, int32_t(0)), StrideD{}),
take<0,2>(SmemLayoutD{}),
EpilogueTile{},
_1{}));
typename FusionCallbacks::Params thread{};
TMA_C tma_load_c;
TMA_D tma_store_d;
};
//
// Methods
//
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(
ProblemShape const& problem_shape,
Arguments const& args,
[[maybe_unused]] void* workspace) {
// Optionally append 1s until problem shape is rank-4 in case its is only rank-3 (MNK)
auto problem_shape_MNKL = append<4>(problem_shape, 1);
auto [M, N, K, L] = problem_shape_MNKL;
// For fprop/dgrad kernel, problem shape M is multimodal which should be linearized under tiled mode
auto M_C = conditional_return<is_im2col_C>(M, size(M));
auto M_D = conditional_return<is_im2col_D>(M, size(M));
typename Params::TMA_C tma_load_c = {};
if constexpr (is_source_supported) {
Tensor tensor_c = make_tensor(make_gmem_ptr(args.ptr_C), make_layout(make_shape(M_C,N,L), args.dC));
tma_load_c = make_tma_copy(CopyOpG2S{}, tensor_c, take<0,2>(SmemLayoutC{}), EpilogueTile{}, _1{});
}
typename Params::TMA_D tma_store_d;
if constexpr (is_destination_supported) {
Tensor tensor_d = make_tensor(make_gmem_ptr(args.ptr_D), make_layout(make_shape(M_D,N,L), args.dD));
tma_store_d = make_tma_copy(CopyOpS2G{}, tensor_d, take<0,2>(SmemLayoutD{}), EpilogueTile{}, _1{});
}
return {
FusionCallbacks::to_underlying_arguments(problem_shape, args.thread, workspace),
tma_load_c,
tma_store_d
};
}
template <class ProblemShape>
static size_t
get_workspace_size(ProblemShape const& problem_shape, Arguments const& args) {
return FusionCallbacks::get_workspace_size(problem_shape, args.thread);
}
template <class ProblemShape>
static cutlass::Status
initialize_workspace(ProblemShape const& problem_shape, Arguments const& args, void* workspace, cudaStream_t stream,
CudaHostAdapter* cuda_adapter = nullptr) {
return FusionCallbacks::initialize_workspace(problem_shape, args.thread, workspace, stream, cuda_adapter);
}
template <class ProblemShape>
CUTLASS_HOST_DEVICE static bool
can_implement(
ProblemShape const& problem_shape,
[[maybe_unused]] Arguments const& args) {
constexpr int tma_alignment_bits = 128;
auto problem_shape_MNKL = append<4>(problem_shape, 1);
auto [M,N,K,L] = problem_shape_MNKL;
bool implementable = true;
if constexpr (is_destination_supported) {
constexpr int min_tma_aligned_elements_D = tma_alignment_bits / cutlass::sizeof_bits<ElementD>::value;
implementable = implementable && cutlass::detail::check_alignment<min_tma_aligned_elements_D>(cute::make_shape(M,N,L), StrideD{});
}
if constexpr (not cute::is_void_v<ElementC>) {
constexpr int min_tma_aligned_elements_C = tma_alignment_bits / cutlass::sizeof_bits<ElementC>::value;
implementable = implementable && cutlass::detail::check_alignment<min_tma_aligned_elements_C>(cute::make_shape(M,N,L), StrideC{});
}
if (!implementable) {
CUTLASS_TRACE_HOST(" CAN IMPLEMENT: Problem Size doesn't meet the minimum alignment requirements for TMA.\n");
}
return implementable;
}
template<class TileShapeMNK>
CUTLASS_HOST_DEVICE
static constexpr int
get_load_pipe_increment(TileShapeMNK tile_shape_MNK) {
// Compute number of epilogue subtiles
return size<1>(zipped_divide(make_layout(take<0,2>(tile_shape_MNK)), EpilogueTile{}));
}
template<class TileShapeMNK>
CUTLASS_HOST_DEVICE
static constexpr int
get_store_pipe_increment(TileShapeMNK tile_shape_MNK) {
return get_load_pipe_increment(tile_shape_MNK);
}
/// Issue Tma Descriptor Prefetch -- ideally from a single thread for best performance
CUTLASS_DEVICE
static void
prefetch_tma_descriptors(Params const& epilogue_params) {
if constexpr (is_source_supported) {
cute::prefetch_tma_descriptor(epilogue_params.tma_load_c.get_tma_descriptor());
}
if constexpr (is_destination_supported) {
cute::prefetch_tma_descriptor(epilogue_params.tma_store_d.get_tma_descriptor());
}
}
CUTLASS_HOST_DEVICE
CollectiveEpilogue(Params const& params_, TensorStorage& shared_tensors)
: params(params_), fusion_callbacks(params_.thread, shared_tensors.thread) {}
CUTLASS_DEVICE
bool
is_producer_load_needed() const {
return fusion_callbacks.is_producer_load_needed();
}
template<
class ProblemShapeMNKL,
class TileShapeMNK,
class TileCoordMNKL,
class TiledMma
>
CUTLASS_DEVICE auto
load(
LoadPipeline load_pipeline,
LoadPipelineState load_pipe_producer_state,
ProblemShapeMNKL problem_shape_mnkl,
TileShapeMNK tile_shape_MNK,
TileCoordMNKL tile_coord_mnkl,
TiledMma tiled_mma,
int thread_idx,
TensorStorage& shared_tensors,
int subtile_idx=-1) {
using namespace cute;
// Indexing variables
auto [M, N, K, L] = problem_shape_mnkl;
auto [m_coord, n_coord, k_coord, l_coord] = tile_coord_mnkl;
// The tma tensor C under im2col mode only has two modes (M, N) which
// should be local tiled with only (m_coord, n_coord).
auto coord_shape = conditional_return<is_im2col_C>(
make_coord(m_coord, n_coord),
make_coord(m_coord, n_coord, l_coord));
// Tile residue
auto residue_mn = make_coord(M,N);
// Represent the full source tensor, slice to get the tile this CTA is currently responsible for
Tensor mC_mn = params.tma_load_c.get_tma_tensor(make_shape(M,N,L)); // (M,N,L)
Tensor mC = coalesce(mC_mn, take<0,2>(CtaTileMNK{}));
Tensor gC = local_tile(mC, take<0,2>(CtaTileMNK{}), coord_shape); // (CTA_M,CTA_N)
// Apply epilogue subtile, get matching smem tensor
SmemElementC* ptr_sC = nullptr;
if constexpr (is_source_supported) {
if constexpr (ReuseSmemC) {
ptr_sC = reinterpret_cast<SmemElementC*>(shared_tensors.smem_D().data());
} else {
ptr_sC = shared_tensors.smem_C().data();
}
}
Tensor gC_epi = flat_divide(gC, EpilogueTile{}); // (EPI_TILE_M,EPI_TILE_N,EPI_M,EPI_N)
Tensor sC_epi = make_tensor(make_smem_ptr(ptr_sC), SmemLayoutC{}); // (EPI_TILE_M,EPI_TILE_N,PIPE_C)
// Prepare the thread(b)lock's (G)mem to (S)mem TMA tiled copy (bGS_)
ThrCopy thrblk_g2s = params.tma_load_c.get_slice(Int<0>{});
Tensor bGS_gC = thrblk_g2s.partition_S(gC_epi); // (G2S,G2S_M,G2S_N,EPI_M,EPI_N)
Tensor bGS_sC = thrblk_g2s.partition_D(sC_epi); // (G2S,G2S_M,G2S_N,PIPE_C)
// Get the fusion callbacks for the producer load warp
auto pld_args = cutlass::epilogue::fusion::detail::ProducerLoadArgs{
problem_shape_mnkl,
CtaTileMNK{},
tile_coord_mnkl,
residue_mn,
EpilogueTile{},
thread_idx
};
auto pld_callbacks = fusion_callbacks.get_producer_load_callbacks(pld_args);
bool is_C_load_needed = is_source_supported && fusion_callbacks.is_C_load_needed();
// Predication for TMA load (one thread issues TMA load)
bool issue_tma_load = cute::elect_one_sync();
// Acquire the lock for the first stage
uint64_t* tma_barrier = load_pipeline.producer_get_barrier(load_pipe_producer_state);
load_pipeline.producer_acquire(load_pipe_producer_state);
// Pre-loop fusion callback entry point
pld_callbacks.begin(tma_barrier, load_pipe_producer_state.count(), issue_tma_load);
CUTLASS_PRAGMA_UNROLL
for (int epi_n = 0; epi_n < size<3>(gC_epi); ++epi_n) {
CUTLASS_PRAGMA_UNROLL
for (int epi_m = 0; epi_m < size<2>(gC_epi); ++epi_m) {
if (subtile_idx != -1 && (epi_n * static_cast<int>(size<2>(gC_epi)) + epi_m) != subtile_idx) {
continue;
}
// Acquire the lock for this stage
constexpr uint16_t mcast_mask = 0;
uint64_t* tma_barrier = load_pipeline.producer_get_barrier(load_pipe_producer_state);
load_pipeline.producer_acquire(load_pipe_producer_state);
// Loop fusion callback entry point
pld_callbacks.step(tma_barrier, epi_m, epi_n, load_pipe_producer_state.count(), issue_tma_load);
// Execute the TMA load for C if needed
if (issue_tma_load && is_C_load_needed) {
copy(params.tma_load_c.with(*tma_barrier, mcast_mask),
bGS_gC(_,_,_,epi_m,epi_n), bGS_sC(_,_,_,load_pipe_producer_state.index()));
load_pipeline.producer_expect_transaction(load_pipe_producer_state);
}
// Commit TMA loads for this stage and release the lock
load_pipeline.producer_commit(load_pipe_producer_state);
++load_pipe_producer_state;
}
}
// Post-loop fusion callback entry point
pld_callbacks.end();
return load_pipe_producer_state;
}
CUTLASS_DEVICE auto
load_tail(
LoadPipeline load_pipeline,
LoadPipelineState load_pipe_producer_state) {
bool issue_tma_load = cute::elect_one_sync();
if (issue_tma_load) {
load_pipeline.producer_tail(load_pipe_producer_state);
}
return load_pipe_producer_state;
}
template<
class ProblemShapeMNKL,
class TileShapeMNK,
class TileCoordMNKL,
class AccEngine, class AccLayout,
class TiledMma
>
CUTLASS_DEVICE auto
store(
LoadPipeline load_pipeline,
LoadPipelineState load_pipe_consumer_state,
StorePipeline store_pipeline,
StorePipelineState store_pipe_producer_state,
ProblemShapeMNKL problem_shape_mnkl,
TileShapeMNK tile_shape_MNK,
TileCoordMNKL tile_coord_mnkl,
cute::Tensor<AccEngine,AccLayout> accumulators,
TiledMma tiled_mma,
int thread_idx,
TensorStorage& shared_tensors,
int subtile_idx=-1) {
using namespace cute;
using ElementAccumulator = typename AccEngine::value_type;
using ElementCompute_ = typename epilogue::fusion::FusionCallbacksTraits<FusionCallbacks>::ElementCompute;
using ElementCompute = cute::conditional_t<cute::is_void_v<ElementCompute_>,ElementAccumulator,ElementCompute_>;
static_assert(is_rmem<AccEngine>::value, "Accumulator must be RF resident.");
static_assert(rank(AccLayout{}) == 3, "Accumulator must be MMA-partitioned: (MMA,MMA_M,MMA_N)");
static_assert(rank(ProblemShapeMNKL{}) == 4, "ProblemShapeMNKL must be rank 4");
static_assert(is_static<TileShapeMNK>::value, "TileShapeMNK must be static");
static_assert(rank(TileShapeMNK{}) == 3, "TileShapeMNK must be rank 3");
static_assert(rank(TileCoordMNKL{}) == 4, "TileCoordMNKL must be rank 4");
// Indexing variables
auto [M, N, K, L] = problem_shape_mnkl;
auto [m_coord, n_coord, k_coord, l_coord] = tile_coord_mnkl;
auto mma_tile_m = tile_size<0>(tiled_mma);
auto mma_tile_n = tile_size<1>(tiled_mma);
auto epi_tile_m = size<0>(EpilogueTile{});
auto epi_tile_n = size<1>(EpilogueTile{});
// The tma tensor D under im2col mode only has two modes (M, N) which
// should be local tiled with only (m_coord, n_coord).
auto coord_shape = conditional_return<is_im2col_D>(
make_coord(m_coord, n_coord),
make_coord(m_coord, n_coord, l_coord));
// Represent the full output tensor, slice to get the tile this CTA is responsible for
Tensor mD_mn = params.tma_store_d.get_tma_tensor(make_shape(M,N,L)); // (M,N,L)
Tensor mD = coalesce(mD_mn, take<0,2>(CtaTileMNK{}));
Tensor gD = local_tile(mD, take<0,2>(CtaTileMNK{}), coord_shape); // (CTA_M,CTA_N)
// Apply epilogue subtiling
Tensor gD_epi = flat_divide(gD, EpilogueTile{}); // (EPI_TILE_M,EPI_TILE_N,EPI_M,EPI_N)
// Construct the corresponding pipelined smem tensors
SmemElementC* ptr_sC = nullptr;
if constexpr (is_source_supported) {
if constexpr (ReuseSmemC) {
ptr_sC = reinterpret_cast<SmemElementC*>(shared_tensors.smem_D().data());
} else {
ptr_sC = shared_tensors.smem_C().data();
}
}
SmemElementD* ptr_sD = nullptr;
if constexpr (is_destination_supported) {
ptr_sD = shared_tensors.smem_D().data();
}
Tensor sC_epi = cute::as_position_independent_swizzle_tensor(
make_tensor(make_smem_ptr(ptr_sC), SmemLayoutC{})); // (EPI_TILE_M,EPI_TILE_N,PIPE_C)
Tensor sD_epi = cute::as_position_independent_swizzle_tensor(
make_tensor(make_smem_ptr(ptr_sD), SmemLayoutD{})); // (EPI_TILE_M,EPI_TILE_N,PIPE_D)
// Get the smallest tiled copy we can use to retile the accumulators
using CopyAtomC = Copy_Atom<SM90_U32x4_STSM_N, cutlass::half_t>;
TiledCopy tiled_copy_C_atom = make_tiled_copy_C_atom(CopyAtomC{}, tiled_mma);
// (t)hread-partition for (r)egister to (s)mem copy (tRS_)
TiledCopy tiled_r2s = make_tiled_copy_S(Copy_Atom<CopyOpR2S,SmemElementD>{}, tiled_copy_C_atom);
ThrCopy thread_r2s = tiled_r2s.get_slice(thread_idx);
Tensor tRS_rAcc = thread_r2s.retile_S(accumulators); // ((R2S,R2S_V),MMA_M,MMA_N)
Tensor tRS_sD = thread_r2s.partition_D(sD_epi); // (R2S,R2S_M,R2S_N,PIPE_D)
// Allocate D registers
Layout tRS_rD_layout = make_layout(take<0,3>(shape(thread_r2s.partition_S(sD_epi))));
Tensor tRS_rD = make_tensor<SmemElementD>(tRS_rD_layout); // (R2S,R2S_M,R2S_N)
// Vectorized fragment view
constexpr int FragmentSize = DispatchPolicy::FragmentSize;
Tensor tRS_rAcc_frg = recast<Array<ElementAccumulator, FragmentSize>>(tRS_rAcc);
Tensor tRS_rD_frg = recast<Array<SmemElementD , FragmentSize>>(tRS_rD);
CUTE_STATIC_ASSERT(size<0>(tRS_rAcc) % FragmentSize == 0, "Fragment size does not vectorize properly");
// (t)hread-partition for (s)mem to (r)egister copy (tSR_)
TiledCopy tiled_s2r = make_tiled_copy_S(Copy_Atom<CopyOpS2R, SmemElementC>{}, tiled_copy_C_atom);
ThrCopy thread_s2r = tiled_s2r.get_slice(thread_idx);
Tensor tSR_sC = thread_s2r.partition_S(sC_epi); // (S2R,S2R_M,S2R_N,PIPE_C)
Layout tSR_rC_layout = thread_s2r.retile_D(tRS_rD).layout(); // (S2R,S2R_M,S2R_N)
// Allocate C registers
// If C smem load is a non-vectorized dst(i) = src(i) then we can allocate C registers directly in the compute type
// to eliminate some redundant pack+unpack instruction sequences for sub-word types
constexpr bool IsDirectS2R = cute::is_same_v<CopyOpS2R, AutoVectorizingCopyWithAssumedAlignment<128>>
&& decltype(max_common_vector(tSR_rC_layout, tSR_sC.layout()))::value <= 1;
using RegisterElementC = cute::conditional_t<IsDirectS2R, ElementCompute, SmemElementC>;
Tensor tRS_rC = make_tensor<RegisterElementC>(tRS_rD_layout); // (R2S,R2S_M,R2S_N)
Tensor tSR_rC = thread_s2r.retile_D(tRS_rC); // (S2R,S2R_M,S2R_N)
// thread(b)lock-partition for (s)mem to (g)mem copy (bSG_)
ThrCopy thrblk_s2g = params.tma_store_d.get_slice(Int<0>{});
Tensor bSG_sD = thrblk_s2g.partition_S(sD_epi); // (S2G,S2G_M,S2G_N,PIPE_D)
Tensor bSG_gD = thrblk_s2g.partition_D(gD_epi); // (S2G,S2G_M,S2G_N,EPI_M,EPI_N)
// OOB predication for tile quantization "residue"
Tensor mD_crd = make_identity_tensor(make_shape(M,N));
Tensor cD = local_tile(mD_crd, take<0,2>(CtaTileMNK{}), make_coord(m_coord, n_coord));
Tensor tRS_cD = thread_r2s.partition_S(flat_divide(cD, EpilogueTile{}));
auto residue_mn = make_coord(M,N);
CUTE_STATIC_ASSERT(mma_tile_m == epi_tile_m, "EPI_TILE_M must equal MMA_TILE_M");
CUTE_STATIC_ASSERT(mma_tile_n % epi_tile_n == 0, "EPI_TILE_N must divide MMA_TILE_N");
// Get the fusion callbacks for the consumer store warps
constexpr bool RefSrc = true; // Register tensors reference R2S copy src layout
auto cst_args = cutlass::epilogue::fusion::detail::ConsumerStoreArgs{
problem_shape_mnkl,
CtaTileMNK{},
tile_coord_mnkl,
residue_mn,
EpilogueTile{},
tiled_copy_C_atom,
thread_idx,
cD,
tRS_cD,
tRS_rC
};
auto cst_callbacks = fusion_callbacks.get_consumer_store_callbacks<RefSrc>(cst_args);
bool is_producer_load_needed = fusion_callbacks.is_producer_load_needed();
bool is_C_load_needed = is_source_supported && fusion_callbacks.is_C_load_needed();
// Thread synchronizer for previously issued waits or fences
// to ensure visibility of smem reads/writes to threads or TMA unit
auto synchronize = [&] () { cutlass::arch::NamedBarrier::sync(size(TiledMma{}), cutlass::arch::ReservedNamedBarriers::EpilogueBarrier); };
// Predication for TMA store (one warp issues TMA store)
bool issue_tma_store = (thread_idx / NumThreadsPerWarp) == 0;
// In the reuse smem configuration we have StagesC smem buffers and at most StagesD committed TMA stores in flight.
// The TMA store pipeline producer acquire returns when at most StagesD-1 committed stores are in-flight, so we can
// only guarantee store completion after StagesD iterations, then we can begin issuing releases on the smem buffer locks.
// store_pipe_producer_state tracks the acquire and load_pipe_consumer_state tracks the release, in circular buffer fashion.
LoadPipelineState load_wait_state = load_pipe_consumer_state;
if constexpr (ReuseSmemC) {
load_wait_state = store_pipe_producer_state;
load_wait_state.phase_ ^= 1;
}
// We can delay issue of TMA store by one iteration to achieve better interleaving of non-TMA instructions
// Sync requirements of smem reuse may preclude this optimization
// Delayed stores cause delayed stage releases which causes deadlock when StagesC == StagesD
int epi_m_prev = 0, epi_n_prev = 0;
static_assert(not (DelayTmaStore and ReuseSmemC and StagesC == StagesD), "This TMA epilogue configuration will deadlock");
// The TMA store sequence for one subtile iteration
auto tma_store_fn = [&] (int epi_m, int epi_n) {
// Write the tile from smem to gmem with TMA
cutlass::arch::fence_view_async_shared(); // ensure smem writes are visible to TMA
synchronize(); // ensure all threads have issued their async fence
if constexpr (is_destination_supported) {
if (issue_tma_store) {
copy(params.tma_store_d, bSG_sD(_,_,_,store_pipe_producer_state.index()), bSG_gD(_,_,_,epi_m,epi_n));
}
}
// Post async fence, pre TMA commit callback entry point
cst_callbacks.tma_store(epi_m, epi_n, store_pipe_producer_state.count(), issue_tma_store);
// Commit the TMA stores for this stage
if (issue_tma_store) {
store_pipeline.producer_commit(store_pipe_producer_state);
}
++store_pipe_producer_state;
++issued_stores;
// Wait for the next smem buffer to be available
if (issue_tma_store) {
store_pipeline.producer_acquire(store_pipe_producer_state);
}
synchronize();
if constexpr (ReuseSmemC) {
// producer_acquire returns when at most StagesD-1 committed stores are pending
bool store_finished = issued_stores > StorePipeline::UnacquiredStages;
// Let dma warp know earliest smem buffer is consumed and empty after StagesD producer commits
if (store_finished) {
if (is_producer_load_needed) {
load_pipeline.consumer_release(load_pipe_consumer_state);
}
++load_pipe_consumer_state;
}
}
};
//
// BEGIN EPILOGUE
//
// Pre-loop fusion callback entry point
cst_callbacks.begin();
// For each output tile
CUTLASS_PRAGMA_UNROLL
for (int epi_n = 0; epi_n < size<3>(gD_epi); ++epi_n) {
CUTLASS_PRAGMA_UNROLL
for (int epi_m = 0; epi_m < size<2>(gD_epi); ++epi_m) {
bool is_first_iteration = epi_m == 0 && epi_n == 0;
bool is_last_iteration = epi_m == size<2>(gD_epi)-1 && epi_n == size<3>(gD_epi)-1;
if (subtile_idx != -1 && (epi_n * static_cast<int>(size<2>(gD_epi)) + epi_m) != subtile_idx) {
continue;
}
// The current tile in accumulator
int mma_m = epi_m;
int mma_n = (epi_n * size<1>(EpilogueTile{})) / mma_tile_n;
Tensor tRS_rAcc_frg_mn = tRS_rAcc_frg(_,mma_m,mma_n);
if (is_producer_load_needed) {
// Wait for the producer load to fill smem
load_pipeline.consumer_wait(load_wait_state);
if (is_C_load_needed) {
// Copy source tile from smem to register
copy(tiled_s2r, tSR_sC(_,_,_,load_wait_state.index()), tSR_rC);
}
}
// First loop fusion callback entry point
cst_callbacks.previsit(epi_m, epi_n, load_wait_state.count(), is_producer_load_needed);
if (is_producer_load_needed) {
if constexpr (not ReuseSmemC) {
// Let producer load warp know smem buffers are consumed and empty
cutlass::arch::fence_view_async_shared();
load_pipeline.consumer_release(load_pipe_consumer_state);
++load_pipe_consumer_state;
}
++load_wait_state;
}
// Vectorized fragment loop with visitor callback entry point
int epi_n_in_mma = epi_n % (mma_tile_n / epi_tile_n);
int r2s_v = epi_n_in_mma * size(tRS_rD_frg);
CUTLASS_PRAGMA_UNROLL
for (int epi_v = 0; epi_v < size(tRS_rD_frg); ++epi_v) {
tRS_rD_frg(epi_v) = cst_callbacks.visit(tRS_rAcc_frg_mn(r2s_v + epi_v), epi_v, epi_m, epi_n);
}
// The latest we can delay the TMA store is right before the smem store of the next iteration
// since the current TMA store needs to be committed before we can acquire the next smem buffer
if constexpr (DelayTmaStore) {
// Issue TMA stores for the previous subtile
if (not is_first_iteration and subtile_idx == -1) {
tma_store_fn(epi_m_prev, epi_n_prev);
}
epi_m_prev = epi_m;
epi_n_prev = epi_n;
}
// Smem reduction callback entry point using current store buffer for workspace
cst_callbacks.reduce(sD_epi(_,_,store_pipe_producer_state.index()),
synchronize, epi_m, epi_n, is_last_iteration);
// Copy tile from register to smem
if constexpr (is_destination_supported) {
copy(tiled_r2s, tRS_rD, tRS_sD(_,_,_,store_pipe_producer_state.index()));
}
// Post reduction, pre TMA store callback entry point
constexpr bool issue_smem_store = true; // No smem store predication
cst_callbacks.postreduce(epi_m, epi_n, store_pipe_producer_state.count(), issue_smem_store);
if constexpr (not DelayTmaStore) {
// Issue TMA stores for this subtile
tma_store_fn(epi_m, epi_n);
}
} // for epi_m
} // for epi_n
if constexpr (DelayTmaStore) {
// Issue TMA stores for the last subtile
tma_store_fn(epi_m_prev, epi_n_prev);
}
// Post-loop fusion callback entry point
cst_callbacks.end();
return cute::make_tuple(load_pipe_consumer_state, store_pipe_producer_state);
}
CUTLASS_DEVICE auto
store_tail(
LoadPipeline load_pipeline,
LoadPipelineState load_pipe_consumer_state,
StorePipeline store_pipeline,
StorePipelineState store_pipe_producer_state) {
// wait for all TMA stores to complete
store_pipeline.producer_tail(store_pipe_producer_state);
// reset store counter
issued_stores = 0;
if constexpr (ReuseSmemC) {
if (fusion_callbacks.is_producer_load_needed()) {
// Issue releases on up to StagesD-1 previously issued TMA stores
constexpr int release_stages = cute::min(StorePipeline::UnacquiredStages, get_load_pipe_increment(CtaTileMNK{}));
CUTLASS_PRAGMA_UNROLL
for (int stage = 0; stage < release_stages; ++stage) {
load_pipeline.consumer_release(load_pipe_consumer_state);
++load_pipe_consumer_state;
}
}
}
return cute::make_tuple(load_pipe_consumer_state, store_pipe_producer_state);
}
private:
Params const& params;
FusionCallbacks fusion_callbacks;
int issued_stores = 0;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace collective
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
|
cutlass/include/cutlass/epilogue/collective/sm90_epilogue_tma_warpspecialized.hpp/0
|
{
"file_path": "cutlass/include/cutlass/epilogue/collective/sm90_epilogue_tma_warpspecialized.hpp",
"repo_id": "cutlass",
"token_count": 15129
}
| 29 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Functor performing linear scaling operations used by epilogues. Values are clamped before
converting to the output element type.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/epilogue/thread/scale_type.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
/// Single source of truth for whether to unroll for `LinearCombinationClamp()`
constexpr bool LinearCombinationClampIsHeavy() {
return false;
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Applies a linear combination operator to an array of elements then clamps the output before
/// converting to the output element type.
///
/// D = alpha * accumulator + beta * source + uniform
///
template <
typename ElementOutput_, ///< Data type used to load and store tensors
int Count, ///< Number of elements computed per operation
///< Usually it is 128/sizeof_bits<ElementOutput_>,
///< but we use 64 or 32 sometimes when there are not enough data to store
typename ElementAccumulator_ = ElementOutput_, ///< Accumulator data type
typename ElementCompute_ = ElementOutput_, ///< Data type used to compute linear combination
ScaleType::Kind Scale = ScaleType::Default, ///< Control Alpha and Beta scaling
FloatRoundStyle Round = FloatRoundStyle::round_to_nearest
>
class LinearCombinationClamp {
public:
using ElementOutput = ElementOutput_;
using ElementAccumulator = ElementAccumulator_;
using ElementCompute = ElementCompute_;
static int const kCount = Count;
using FragmentOutput = Array<ElementOutput, kCount>;
using FragmentAccumulator = Array<ElementAccumulator, kCount>;
using ComputeFragment = Array<ElementCompute, kCount>;
using FragmentSource = Array<ElementOutput, kCount>;
static FloatRoundStyle const kRound = Round;
static bool const kIsHeavy = detail::LinearCombinationClampIsHeavy();
/// Host-constructable parameters structure
struct Params {
ElementCompute alpha; ///< scales accumulators
ElementCompute beta; ///< scales source tensor
ElementCompute const *alpha_ptr; ///< pointer to accumulator scalar - if not null, loads it from memory
ElementCompute const *beta_ptr; ///< pointer to source scalar - if not null, loads it from memory
//
// Methods
//
CUTLASS_HOST_DEVICE
Params():
alpha(ElementCompute(1)),
beta(ElementCompute(0)),
alpha_ptr(nullptr),
beta_ptr(nullptr) { }
CUTLASS_HOST_DEVICE
Params(
ElementCompute alpha,
ElementCompute beta
): alpha(alpha), beta(beta), alpha_ptr(nullptr), beta_ptr(nullptr) {
}
CUTLASS_HOST_DEVICE
Params(
ElementCompute alpha
): alpha(alpha), beta(0), alpha_ptr(nullptr), beta_ptr(nullptr) {
}
CUTLASS_HOST_DEVICE
Params(
ElementCompute const *alpha_ptr,
ElementCompute const *beta_ptr
): alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) {
}
CUTLASS_HOST_DEVICE
Params(
ElementCompute const *alpha_ptr
): alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(nullptr) {
}
};
private:
//
// Data members
//
ElementCompute alpha_;
ElementCompute beta_;
public:
/// Constructs the function object, possibly loading from pointers in host memory
CUTLASS_HOST_DEVICE
LinearCombinationClamp(Params const ¶ms) {
alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha);
beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta);
}
/// Returns true if source is needed
CUTLASS_HOST_DEVICE
bool is_source_needed() const {
if (Scale == ScaleType::NoBetaScaling) return true;
if (Scale == ScaleType::OnlyAlphaScaling) return false;
if (Scale == ScaleType::Nothing) return false;
return beta_ != ElementCompute(0);
}
/// Functionally required for serial reduction in the epilogue
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {
if (k_partition) {
beta_ = ElementCompute(1);
}
}
/// Computes linear scaling: D = alpha * accumulator + beta * source
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const &accumulator,
FragmentOutput const &source,
ElementCompute uniform = ElementCompute(0)) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementOutput, kCount, Round> source_converter;
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
ComputeFragment converted_source = source_converter(source);
ComputeFragment converted_accumulator = accumulator_converter(accumulator);
// Perform binary operations
ComputeFragment intermediate;
multiplies<ComputeFragment> mul_add_source;
multiply_add<ComputeFragment> mul_add_accumulator;
minimum<ComputeFragment> min_accumulator;
maximum<ComputeFragment> max_accumulator;
if (Scale == ScaleType::NoBetaScaling) {
intermediate = converted_source;
intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X
} else if (Scale == ScaleType::Nothing) {
intermediate = converted_accumulator;
} else {
intermediate = mul_add_source(beta_, converted_source); // X = beta * C + uniform
intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X
}
/// Clamping constant value
ElementCompute const kClampMax =
ElementCompute(platform::numeric_limits<ElementOutput>::max());
ElementCompute const kClampMin =
ElementCompute(platform::numeric_limits<ElementOutput>::lowest());
intermediate = max_accumulator(intermediate, kClampMin);
intermediate = min_accumulator(intermediate, kClampMax);
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter;
return destination_converter(intermediate);
}
/// Computes linear scaling: D = alpha * accumulator
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const &accumulator) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
ComputeFragment converted_accumulator = accumulator_converter(accumulator);
// Perform binary operations
ComputeFragment intermediate;
multiplies<ComputeFragment> mul_accumulator;
minimum<ComputeFragment> min_accumulator;
maximum<ComputeFragment> max_accumulator;
if (Scale == ScaleType::Nothing) {
intermediate = converted_accumulator;
} else {
intermediate = mul_accumulator(alpha_, converted_accumulator); // D = alpha * Accum
}
/// Clamping constant value
ElementCompute const kClampMax =
ElementCompute(platform::numeric_limits<ElementOutput>::max());
ElementCompute const kClampMin =
ElementCompute(platform::numeric_limits<ElementOutput>::lowest());
intermediate = max_accumulator(intermediate, kClampMin);
intermediate = min_accumulator(intermediate, kClampMax);
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter;
return destination_converter(intermediate);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Conditional guards to enable partial specialization for packed integers
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 720) && ((__CUDACC_VER_MAJOR__ > 10) || ((__CUDACC_VER_MAJOR__ >= 10) && (__CUDACC_VER_MINOR__ >= 2)))
/// Applies a linear combination operator to an array of elements then clamps the output before
/// converting to the output element type.
///
/// D = alpha * accumulator + beta * source + uniform
///
template <
typename ElementOutput_, ///< Data type used to load and store tensors
int Count, ///< Number of elements computed per operation
ScaleType::Kind Scale, ///< Control Alpha and Beta scaling
FloatRoundStyle Round
>
class LinearCombinationClamp<ElementOutput_, Count, int, float, Scale, Round> {
public:
using ElementOutput = ElementOutput_;
using ElementAccumulator = int;
using ElementCompute = float;
static_assert(
platform::numeric_limits<ElementOutput>::is_integer,
"This elementwise op expects the output to be int.");
static int const kCount = Count;
using FragmentOutput = Array<ElementOutput, kCount>;
using FragmentAccumulator = Array<ElementAccumulator, kCount>;
using ComputeFragment = Array<ElementCompute, kCount>;
static FloatRoundStyle const kRound = Round;
static bool const kIsHeavy = detail::LinearCombinationClampIsHeavy();
/// Host-constructable parameters structure
struct Params {
ElementCompute alpha; ///< scales accumulators
ElementCompute beta; ///< scales source tensor
ElementCompute const *alpha_ptr; ///< pointer to accumulator scalar - if not null, loads it from memory
ElementCompute const *beta_ptr; ///< pointer to source scalar - if not null, loads it from memory
//
// Methods
//
CUTLASS_HOST_DEVICE
Params():
alpha(ElementCompute(1)),
beta(ElementCompute(0)),
alpha_ptr(nullptr),
beta_ptr(nullptr) { }
CUTLASS_HOST_DEVICE
Params(
ElementCompute alpha,
ElementCompute beta
): alpha(alpha), beta(beta), alpha_ptr(nullptr), beta_ptr(nullptr) {
}
CUTLASS_HOST_DEVICE
Params(
ElementCompute alpha
): alpha(alpha), beta(0), alpha_ptr(nullptr), beta_ptr(nullptr) {
}
CUTLASS_HOST_DEVICE
Params(
ElementCompute const *alpha_ptr,
ElementCompute const *beta_ptr
): alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) {
}
CUTLASS_HOST_DEVICE
Params(
ElementCompute const *alpha_ptr
): alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(nullptr) {
}
};
private:
//
// Data members
//
ElementCompute alpha_;
ElementCompute beta_;
public:
/// Constructs the function object, possibly loading from pointers in host memory
CUTLASS_HOST_DEVICE
LinearCombinationClamp(Params const ¶ms) {
alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha);
beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta);
}
/// Returns true if source is needed
CUTLASS_HOST_DEVICE
bool is_source_needed() const {
if (Scale == ScaleType::NoBetaScaling) return true;
if (Scale == ScaleType::OnlyAlphaScaling) return false;
if (Scale == ScaleType::Nothing) return false;
return beta_ != ElementCompute(0);
}
/// Functionally required for serial reduction in the epilogue
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {
if (k_partition) {
beta_ = ElementCompute(1);
}
}
/// Computes linear scaling: D = alpha * accumulator + beta * source
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const &accumulator,
FragmentOutput const &source,
ElementCompute uniform = ElementCompute(0)) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementOutput, kCount, Round> source_converter;
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
ComputeFragment converted_source = source_converter(source);
ComputeFragment converted_accumulator = accumulator_converter(accumulator);
// Compute linear scaling in floating point
ComputeFragment intermediate;
multiplies<ComputeFragment> mul_add_source;
multiply_add<ComputeFragment> mul_add_accumulator;
// Float min-max
if (Scale == ScaleType::NoBetaScaling) {
intermediate = converted_source;
intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X
} else if (Scale == ScaleType::Nothing) {
intermediate = converted_accumulator;
} else {
intermediate = mul_add_source(beta_, converted_source); // X = beta * C + uniform
intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X
}
//
// Convert float => ElementOutput_ with clamping
//
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter;
return destination_converter(intermediate);
}
/// Computes linear scaling: D = alpha * accumulator
CUTLASS_HOST_DEVICE
FragmentOutput operator()(FragmentAccumulator const &accumulator) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
ComputeFragment converted_accumulator = accumulator_converter(accumulator);
// Compute linear scaling in floating point
ComputeFragment intermediate;
multiplies<ComputeFragment> mul_add_accumulator;
// Float min-max
if (Scale == ScaleType::Nothing) {
intermediate = converted_accumulator;
} else {
intermediate = mul_add_accumulator(alpha_, converted_accumulator); // D = alpha * Accum
}
//
// Convert float => ElementOutput_ with clamping
//
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter;
return destination_converter(intermediate);
}
};
#endif // Conditional guards to enable partial specialization for packed integers
////////////////////////////////////////////////////////////////////////////////
/// Applies a linear combination operator to an array of elements then clamps
/// the output before converting to the output element type.
///
/// D = alpha * accumulator + beta * source + uniform
///
/// Note: The below method only when problem_size_K <= 256 for signed int8 gemm
/// or problem_size_K <= 128 for unsigned int8 gemm. The default approach is
/// above.
/// TODO: Add logic to fallback to the default approach
template <
/// Data type used to load and store< tensors
typename ElementOutput_,
/// Number of elements computed per operation
int Count,
///< Control Alpha and Beta scaling
ScaleType::Kind Scale = ScaleType::Default,
/// Rounding mode
FloatRoundStyle Round = FloatRoundStyle::round_to_nearest>
class FastLinearCombinationClamp {
public:
using ElementOutput = ElementOutput_;
using ElementAccumulator = int;
using ElementCompute = float;
static_assert(
platform::numeric_limits<ElementOutput>::is_integer,
"This elementwise op expects the output to be int.");
static int const kCount = Count;
using FragmentOutput = Array<ElementOutput, kCount>;
using FragmentAccumulator = Array<ElementAccumulator, kCount>;
using ComputeFragment = Array<ElementCompute, kCount>;
static FloatRoundStyle const kRound = Round;
static bool const kIsHeavy = false;
/// Host-constructable parameters structure
struct Params {
/// scales accumulators
ElementCompute alpha;
/// scales source tensor
ElementCompute beta;
/// pointer to accumulator scalar - if not null, loads it from memory
ElementCompute const *alpha_ptr;
/// pointer to source scalar - if not null, loads it from memory
ElementCompute const *beta_ptr;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params()
: alpha(ElementCompute(1)),
beta(ElementCompute(0)),
alpha_ptr(nullptr),
beta_ptr(nullptr) {}
CUTLASS_HOST_DEVICE
Params(ElementCompute alpha, ElementCompute beta)
: alpha(alpha), beta(beta), alpha_ptr(nullptr), beta_ptr(nullptr) {}
CUTLASS_HOST_DEVICE
Params(ElementCompute alpha)
: alpha(alpha), beta(0), alpha_ptr(nullptr), beta_ptr(nullptr) {}
CUTLASS_HOST_DEVICE
Params(ElementCompute const *alpha_ptr, ElementCompute const *beta_ptr)
: alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) {}
CUTLASS_HOST_DEVICE
Params(ElementCompute const *alpha_ptr)
: alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(nullptr) {}
};
private:
//
// Data members
//
ElementCompute alpha_;
ElementCompute beta_;
public:
/// Constructs the function object, possibly loading from pointers in host
/// memory
CUTLASS_HOST_DEVICE
FastLinearCombinationClamp(Params const ¶ms) {
alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha);
beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta);
}
/// Returns true if source is needed
CUTLASS_HOST_DEVICE
bool is_source_needed() const {
if (Scale == ScaleType::NoBetaScaling) return true;
if (Scale == ScaleType::OnlyAlphaScaling) return false;
if (Scale == ScaleType::Nothing) return false;
return beta_ != ElementCompute(0);
}
/// Functionally required for serial reduction in the epilogue
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {
if (k_partition) {
beta_ = ElementCompute(1);
}
}
/// Computes linear scaling: D = alpha * accumulator + beta * source
CUTLASS_HOST_DEVICE
FragmentOutput operator()(FragmentAccumulator const &accumulator,
FragmentOutput const &source,
ElementCompute uniform = ElementCompute(0)) const {
// Convert source to interal compute numeric type
FastNumericArrayConverter<ElementCompute, ElementOutput, kCount, Round>
source_converter;
FastNumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round>
accumulator_converter;
ComputeFragment converted_source = source_converter(source);
ComputeFragment converted_accumulator = accumulator_converter(accumulator);
// Compute linear scaling in floating point
ComputeFragment intermediate;
multiplies<ComputeFragment> mul_add_source;
multiply_add<ComputeFragment> mul_add_accumulator;
minimum<ComputeFragment> min_accumulator;
maximum<ComputeFragment> max_accumulator;
// Float min-max
if (Scale == ScaleType::NoBetaScaling) {
intermediate = converted_source;
intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X
} else if (Scale == ScaleType::Nothing) {
intermediate = converted_accumulator;
} else {
intermediate =
mul_add_source(beta_, converted_source); // X = beta * C + uniform
intermediate = mul_add_accumulator(alpha_, converted_accumulator,
intermediate); // D = alpha * Accum + X
}
/// Clamping constant value
ElementCompute const kClamp =
ElementCompute(1 << (sizeof_bits<ElementOutput>::value - 1));
intermediate = max_accumulator(intermediate, -kClamp);
intermediate = min_accumulator(intermediate, kClamp - ElementCompute(1));
// Convert to destination numeric type
FastNumericArrayConverter<ElementOutput, ElementCompute, kCount, Round>
destination_converter;
return destination_converter(intermediate);
}
/// Computes linear scaling: D = alpha * accumulator + beta * source
CUTLASS_HOST_DEVICE
FragmentOutput operator()(FragmentAccumulator const &accumulator) const {
// Convert source to interal compute numeric type
FastNumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round>
accumulator_converter;
ComputeFragment converted_accumulator = accumulator_converter(accumulator);
// Compute linear scaling in floating point
ComputeFragment intermediate;
multiplies<ComputeFragment> mul_accumulator;
minimum<ComputeFragment> min_accumulator;
maximum<ComputeFragment> max_accumulator;
// Float min-max
if (Scale == ScaleType::Nothing) {
intermediate = converted_accumulator;
} else {
intermediate = mul_accumulator(alpha_, converted_accumulator);
}
/// Clamping constant value
ElementCompute const kClamp =
ElementCompute(1 << (sizeof_bits<ElementOutput>::value - 1));
intermediate = max_accumulator(intermediate, -kClamp);
intermediate = min_accumulator(intermediate, kClamp - ElementCompute(1));
// Convert to destination numeric type
FastNumericArrayConverter<ElementOutput, ElementCompute, kCount, Round>
destination_converter;
return destination_converter(intermediate);
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace epilogue
} // namespace cutlass
|
cutlass/include/cutlass/epilogue/thread/linear_combination_clamp.h/0
|
{
"file_path": "cutlass/include/cutlass/epilogue/thread/linear_combination_clamp.h",
"repo_id": "cutlass",
"token_count": 7901
}
| 30 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Functor performing linear combination with elementwise
*/
#pragma once
#include "cutlass/half.h"
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/constants.h"
#include "cutlass/fast_math.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/epilogue/thread/activation.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Applies a linear combination operator to an array of elements.
///
/// D = alpha * accumulator + beta * source + uniform
///
template <
typename ElementCompute_, ///< Data type returned by this functor
typename ElementAccumulator_, ///< Data type of accumulators
typename ElementSource_, ///< Data type of source tensor
typename ElementTensor_, ///< Data type of additional tensor
int Count, ///< Number of elements computed per operation
///< Usually it is 128/sizeof_bits<ElementOutput_>,
///< but we use 64 or 32 sometimes when there are not enough data to store
FloatRoundStyle Round = FloatRoundStyle::round_to_nearest
>
class LinearCombinationWithElementwise {
public:
using ElementOutput = ElementSource_;
using ElementCompute = ElementCompute_;
using ElementAccumulator = ElementAccumulator_;
using ElementSource = ElementSource_;
using ElementTensor = ElementTensor_;
static bool const kIsHeavy = true;
static int const kCount = Count;
using FragmentCompute = Array<ElementCompute, kCount>;
using FragmentAccumulator = Array<ElementAccumulator, kCount>;
using FragmentSource = Array<ElementSource, kCount>;
using FragmentTensor = Array<ElementTensor, kCount>;
static FloatRoundStyle const kRound = Round;
/// Host-constructable parameters structure
struct Params {
ElementCompute alpha; ///< scales accumulators
ElementCompute beta; ///< scales source tensor
ElementCompute threshold; ///< minimum value that is output
ElementCompute const *alpha_ptr; ///< pointer to accumulator scalar - if not null, loads it from memory
ElementCompute const *beta_ptr; ///< pointer to source scalar - if not null, loads it from memory
//
// Methods
//
CUTLASS_HOST_DEVICE
Params():
alpha(ElementCompute(1)),
beta(ElementCompute(0)),
threshold(ElementCompute(0)),
alpha_ptr(nullptr),
beta_ptr(nullptr) { }
CUTLASS_HOST_DEVICE
Params(
ElementCompute alpha,
ElementCompute beta,
ElementCompute threshold = ElementCompute(0)
): alpha(alpha), beta(beta), threshold(threshold), alpha_ptr(nullptr), beta_ptr(nullptr) {
}
CUTLASS_HOST_DEVICE
Params(
ElementCompute const *alpha_ptr,
ElementCompute const *beta_ptr,
ElementCompute threshold = ElementCompute(0)
): alpha(0), beta(0), threshold(threshold), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) {
}
};
private:
//
// Data members
//
ElementCompute alpha_;
ElementCompute beta_;
ElementCompute threshold_;
bool participates_in_reduction_;
public:
/// Constructs the function object, possibly loading from pointers in host memory
CUTLASS_HOST_DEVICE
LinearCombinationWithElementwise(Params const ¶ms) {
alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha);
beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta);
threshold_ = params.threshold;
participates_in_reduction_ = true;
}
/// Returns true if source is needed
CUTLASS_HOST_DEVICE
bool is_source_needed() const {
return beta_ != ElementCompute(0);
}
/// Returns true if the threadblock computes the reduction
CUTLASS_HOST_DEVICE
bool participates_in_reduction() const {
return participates_in_reduction_;
}
/// Functionally required for serial reduction in the epilogue
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {
if (k_partition) {
beta_ = ElementCompute(1);
}
if (k_partition != k_partition_count - 1) {
// set to NaN to make ReLU no-op for all except last k partitions
int64_t allones = -1;
threshold_ = reinterpret_cast<ElementCompute const &>(allones);
// Avoid computing the reduction if this isn't the final Split-K slice
participates_in_reduction_ = false;
}
}
/// Computes linear scaling: D = alpha * accumulator + beta * source
CUTLASS_HOST_DEVICE
FragmentCompute operator()(
FragmentAccumulator const &accumulator,
FragmentSource const &source,
FragmentTensor const &tensor) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementSource, kCount, Round> source_converter;
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
FragmentCompute converted_source = source_converter(source);
FragmentCompute converted_accumulator = accumulator_converter(accumulator);
// Perform binary operations
FragmentCompute intermediate;
multiplies<FragmentCompute> mul_add_source;
multiply_add<FragmentCompute> mul_add_accumulator;
intermediate = mul_add_source(beta_, converted_source); // X = beta * C + uniform
intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X
return intermediate;
}
/// Computes linear scaling: D = alpha * accumulator
CUTLASS_HOST_DEVICE
FragmentCompute operator()(
FragmentAccumulator const &accumulator,
FragmentTensor const &tensor) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
FragmentCompute converted_accumulator = accumulator_converter(accumulator);
// Perform binary operations
FragmentCompute intermediate;
multiplies<FragmentCompute> mul_accumulator;
intermediate = mul_accumulator(alpha_, converted_accumulator); // D = alpha * Accum
return intermediate;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
|
cutlass/include/cutlass/epilogue/thread/linear_combination_with_elementwise.h/0
|
{
"file_path": "cutlass/include/cutlass/epilogue/thread/linear_combination_with_elementwise.h",
"repo_id": "cutlass",
"token_count": 2835
}
| 31 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <assert.h>
#endif
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/functional.h"
#include "cutlass/fast_math.h"
#include "cutlass/layout/vector.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/transform/threadblock/regular_tile_iterator.h"
#include "cutlass/epilogue/threadblock/epilogue_base.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Epilogue operator with reduction over each column
template <
typename Shape_, ///< Shape of threadblock tile (concept: GemmShape)
typename WarpMmaOperator_, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp)
int PartitionsK, ///< Number of partitions of the K dimension
typename OutputTileIterator_, ///< Tile iterator reading and writing output tensors
typename TensorTileIterator_, ///< Additional tile iterator for tensor-valued operands
typename ElementVector_, ///< Pointer to reduction vector
typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators
typename WarpTileIterator_, ///< Warp-scoped tile iterator writing accumulators to SMEM
typename SharedLoadIterator_, ///< Threadblock-scoped tile iterator loading from SMEM
typename OutputOp_, ///< Output operator
typename ReductionOp_, ///< Reduction operator
typename Padding_, ///< Padding added to SMEM allocation to avoid bank conflicts (concept: MatrixShape)
int IterationsUnroll = ///< Used to reduce binary size when epilogue op is large
(!IsEpilogueFunctorHeavy<OutputOp_>::value)
>
class EpilogueWithReduction :
public EpilogueBase<
Shape_,
typename WarpMmaOperator_::Shape,
PartitionsK,
AccumulatorFragmentIterator_,
WarpTileIterator_,
Padding_> {
public:
using Base = EpilogueBase<
Shape_,
typename WarpMmaOperator_::Shape,
PartitionsK,
AccumulatorFragmentIterator_,
WarpTileIterator_,
Padding_>;
using Shape = Shape_;
using WarpMmaOperator = WarpMmaOperator_;
static int const kPartitionsK = PartitionsK;
using OutputTileIterator = OutputTileIterator_;
using TensorTileIterator = TensorTileIterator_;
using ElementVector = ElementVector_;
using AccumulatorFragmentIterator = AccumulatorFragmentIterator_;
using WarpTileIterator = WarpTileIterator_;
using SharedLoadIterator = SharedLoadIterator_;
using OutputOp = OutputOp_;
using ReductionOp = ReductionOp_;
using Padding = Padding_;
using Layout = layout::RowMajor;
using LongIndex = typename Layout::LongIndex;
static bool const kIsSingleSource = true;
/// The complete warp-level accumulator tile
using AccumulatorTile = typename Base::AccumulatorTile;
/// Accumulator element
using ElementAccumulator = typename WarpTileIterator::Element;
/// Compute data type produced by the output op
using ElementCompute = typename OutputOp::ElementCompute;
/// Compute fragment
using FragmentCompute = Array<ElementCompute, OutputTileIterator::Fragment::kElements>;
/// Thread map used by output tile iterators
using ThreadMap = typename OutputTileIterator::ThreadMap;
/// Fragment object used in reduction
using ReductionFragment = Array<
ElementAccumulator,
ThreadMap::Iterations::kColumn * ThreadMap::kElementsPerAccess>;
/// Output element
using ElementOutput = typename OutputTileIterator::Element;
/// Data type of additional tensor
using ElementTensor = typename TensorTileIterator::Element;
/// Output access size
static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess;
/// Tensor reference to destination tensor
using TensorRef = typename OutputTileIterator::TensorRef;
/// Tensor reference to sync tensor
using SyncTensorRef = typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>;
/// Const tensor reference to source tensor
using ConstTensorRef = typename OutputTileIterator::ConstTensorRef;
/// Array type used to output
using OutputAccessType = Array<
typename OutputTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
/// Array type used by output functor
using AccumulatorAccessType = Array<typename WarpTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
/// Array type used by output functor
using ComputeAccessType = Array<ElementCompute, OutputTileIterator::kElementsPerAccess>;
/// Tensor access type
using TensorAccessType = Array<ElementTensor, OutputTileIterator::kElementsPerAccess>;
/// Number of warps
using WarpCount = typename Base::WarpCount;
/// Shared memory allocation from epilogue base class
using BaseSharedStorage = typename Base::SharedStorage;
/// Used for the reduction
struct ReductionDetail {
/// If true, accumulator coordinates are computed and out-of-bounds checks are enabled when
/// performing the reduction.
static bool const kOobCheck = false;
/// Number of threads per warp
static int const kWarpSize = 32;
/// Number of distinct scalar column indices handled by each thread
static int const kColumnsPerThread = ThreadMap::Iterations::kColumn * ThreadMap::kElementsPerAccess;
/// Number of distinct scalar row indices handled by each thread
static int const kRowsPerThread = ThreadMap::Iterations::kCount / ThreadMap::Iterations::kColumn;
/// Number of threads per threadblock
static int const kThreadCount = kWarpSize * WarpCount::kCount;
/// Number of distinct threads per row of output tile
static int const kThreadsPerRow = (Shape::kN / kColumnsPerThread);
/// Number of distinct threads which must be reduced during the final reduction phase within the threadblock.
static int const kThreadRows = kThreadCount / kThreadsPerRow;
/// I'm not sure what I meant here.
static int const kThreadAccessesPerRow = const_max(1, (Shape::kN + kThreadCount - 1) / kThreadCount);
/// Shape of the shared memory allocation for the epilogue
using StorageShape = MatrixShape<
kThreadRows,
Shape::kN
>;
/// Debug printing
CUTLASS_DEVICE
static void print() {
#if 0
printf("ReductionDetail {\n");
printf(
" kElementsPerAccess:%d\nkColumnsPerThread: %d\nkRowsPerThread: %d\n,kThreadCount: %d\nkThreadsPerRow: %d\n"
"kThreadRows: %d\nThreadAccessesPerRow: %d\nStorageShape: %d x %d (count: %d)\n",
kElementsPerAccess,
kColumnsPerThread,
kRowsPerThread,
kThreadCount,
kThreadsPerRow,
kThreadRows,
kThreadAccessesPerRow,
StorageShape::kRow,
StorageShape::kColumn,
StorageShape::kCount
);
printf("};\n");
#endif
}
};
/// Shared storage structure (shadows base) with additional SMEM buffer for reduction
struct SharedStorage {
union {
BaseSharedStorage base;
AlignedArray<ElementAccumulator, ReductionDetail::StorageShape::kCount, 16> reduction; ///< Shared storage for reduction
};
CUTLASS_HOST_DEVICE
SharedStorage() { }
};
public:
static_assert(SharedLoadIterator::Fragment::kElements == OutputTileIterator::Fragment::kElements,
"Mismatch between shared load iterator and output tile iterator.");
static_assert(OutputTileIterator::kElementsPerAccess, "OutputTileIterator::kElementsPerAccess must not be zero.");
static_assert(!(OutputTileIterator::Fragment::kElements % OutputTileIterator::kElementsPerAccess),
"Divisibility");
private:
/// Loads fragment from shared memory aligned with output tensor
SharedLoadIterator shared_load_iterator_;
/// Shared memory pointer fo rreduction
ElementAccumulator *reduction_ptr_;
/// Thread index within the threadblock
int thread_idx_;
public:
/// Constructor
CUTLASS_DEVICE
EpilogueWithReduction(
SharedStorage &shared_storage, ///< Shared storage object
int thread_idx, ///< ID of a thread within the threadblock
int warp_idx, ///< ID of warp within threadblock
int lane_idx ///< Id of thread within warp
):
Base(shared_storage.base, thread_idx, warp_idx, lane_idx),
shared_load_iterator_(shared_storage.base.reference(), thread_idx),
reduction_ptr_(shared_storage.reduction.data()),
thread_idx_(thread_idx)
{
}
/// Streams the result to global memory
CUTLASS_DEVICE
void operator()(
OutputOp const &output_op, ///< Output operator
ElementVector * reduction_output_ptr, ///< Reduction output vector
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
OutputTileIterator source_iterator, ///< Tile iterator for source accumulator matrix
TensorTileIterator tensor_iterator, ///< Threadblock tile iterator for additional tensor operand
MatrixCoord const &problem_size = ///< Problem size needed to guard against out-of-bounds accesses
MatrixCoord(Shape::kM, Shape::kN),
MatrixCoord const &threadblock_offset = ///< Threadblock's initial offset within the problem size space
MatrixCoord()) {
ReductionFragment reduction_fragment;
reduction_fragment.clear();
if (!output_op.is_source_needed()) {
compute_source_not_needed_(
output_op,
reduction_fragment,
destination_iterator,
accumulators,
tensor_iterator,
problem_size,
threadblock_offset);
}
else {
compute_source_needed_(
output_op,
reduction_fragment,
destination_iterator,
accumulators,
source_iterator,
tensor_iterator,
problem_size,
threadblock_offset);
}
if (output_op.participates_in_reduction()) {
reduction_(problem_size, threadblock_offset, reduction_output_ptr, reduction_fragment);
}
}
private:
/// Perform the reduction
CUTLASS_DEVICE
void reduction_(
MatrixCoord const &problem_size, ///< Problem size needed to guard against out-of-bounds accesses
MatrixCoord const &threadblock_offset, ///< Problem size needed to guard against out-of-bounds accesses
ElementVector * reduction_output_ptr, ///< Reduction output vector
ReductionFragment const & reduction_fragment) {
//
// Store the partially reduced value to SMEM
//
// Guard against uses of the existing SMEM tile
__syncthreads();
using AccessType = AlignedArray<ElementAccumulator, ThreadMap::kElementsPerAccess>;
//
// Determine a compacted thread arrangement to store to SMEM.
//
int const kThreadsPerRow = Shape::kN / (ThreadMap::Iterations::kColumn * ThreadMap::kElementsPerAccess);
MatrixCoord thread_offset(
thread_idx_ / kThreadsPerRow,
(thread_idx_ % kThreadsPerRow) * ThreadMap::kElementsPerAccess);
//
// Each thread store its fragment to a SMEM
//
AccessType *aligned_reduction_ptr = reinterpret_cast<AccessType *>(
&reduction_ptr_[thread_offset.row() * Shape::kN + thread_offset.column()]);
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&reduction_fragment);
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
int col_idx = column * ThreadMap::Delta::kColumn / ThreadMap::kElementsPerAccess;
aligned_reduction_ptr[col_idx] = frag_ptr[column];
}
__syncthreads();
//
// Now, threads are assigned several columns of the output. They fetch over all rows from
// the compacted SMEM tile and perform a reduction.
//
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < ReductionDetail::kThreadAccessesPerRow; ++j) {
int column_idx = thread_idx_ + j * ReductionDetail::kThreadCount;
ReductionOp reduction_op;
ElementAccumulator reduction_element = ElementAccumulator();
int output_column_idx = threadblock_offset.column() + column_idx;
if (column_idx < Shape::kN && output_column_idx < problem_size.column()) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ReductionDetail::kThreadRows; ++row) {
if (row) {
auto frag = reduction_ptr_[row * Shape::kN + column_idx];
reduction_element = reduction_op(reduction_element, frag);
}
else {
reduction_element = reduction_ptr_[column_idx];
}
}
// Store
reduction_output_ptr[column_idx] = ElementVector(reduction_element);
}
}
}
template<class Seq>
struct acc2smem;
template <size_t... Seq>
struct acc2smem<cutlass::index_sequence<Seq...>> {
template<int Advance>
CUTLASS_DEVICE
static void helper(AccumulatorFragmentIterator accum_fragment_iterator,
WarpTileIterator &warp_tile_iterator) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Advance; i++) {
++accum_fragment_iterator;
}
typename AccumulatorFragmentIterator::Fragment accum_fragment;
accum_fragment_iterator.load(accum_fragment);
warp_tile_iterator.store(accum_fragment);
}
CUTLASS_DEVICE
static void push(size_t pos,
AccumulatorFragmentIterator const &iterator_begin,
WarpTileIterator &warp_tile_iterator) {
int dummy[] = {(pos == Seq) && (helper<Seq>(iterator_begin, warp_tile_iterator), 0)...};
}
};
/// Streams the result to global memory
CUTLASS_DEVICE
void compute_source_not_needed_(
OutputOp const &output_op, ///< Output operator
ReductionFragment &reduction_fragment, ///< Fragment containing the accumulated partial reduction over columns
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
TensorTileIterator tensor_iterator, ///< Threadblock tile iterator for additioanl tensor operand
MatrixCoord const &problem_size, ///< Problem size needed to guard against out-of-bounds accesses
MatrixCoord const &threadblock_offset ///< Threadblock's initial offset within the problem size space
) {
//
// Iterator over warp-level accumulator fragment
//
typename TensorTileIterator::Fragment tensor_fragment;
tensor_fragment.clear();
AccumulatorFragmentIterator accum_fragment_iterator(accumulators);
//
// Iterate over accumulator tile
//
#pragma unroll(IterationsUnroll ? OutputTileIterator::kIterations : 1)
for (int iter = 0; iter < OutputTileIterator::kIterations; ++iter) {
//
// Convert and store fragment
//
tensor_iterator.load(tensor_fragment);
++tensor_iterator;
__syncthreads();
acc2smem<cutlass::make_index_sequence<OutputTileIterator::kIterations>>::push(
iter, accum_fragment_iterator, this->warp_tile_iterator_);
__syncthreads();
//
// Load fragments from shared memory
//
typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK];
shared_load_iterator_.load(aligned_accum_fragment[0]);
//
// If the number of k-slices is > 1 - perform a reduction amongst the k-slices
//
if (kPartitionsK > 1)
{
plus <typename SharedLoadIterator::Fragment> add_fragments;
const int tile_row_offset = Base::SharedStorage::StorageShape::kRow / PartitionsK;
CUTLASS_PRAGMA_UNROLL
for ( int i = 1; i < kPartitionsK; ++i) {
shared_load_iterator_.add_tile_offset({tile_row_offset , 0});
shared_load_iterator_.load(aligned_accum_fragment[i]);
aligned_accum_fragment[0] = add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]);
}
shared_load_iterator_.add_tile_offset({-1 * (kPartitionsK-1) * tile_row_offset, 0});
}
//
// Compute the output result
//
FragmentCompute compute_fragment;
apply_output_operator_source_not_needed_(
reduction_fragment,
compute_fragment,
output_op,
aligned_accum_fragment[0],
tensor_fragment,
destination_iterator);
//
// Store the final result
//
NumericArrayConverter<ElementOutput, ElementCompute, FragmentCompute::kElements> converter;
typename OutputTileIterator::Fragment output_fragment = converter(compute_fragment);
destination_iterator.store(output_fragment);
++destination_iterator;
}
}
/// Streams the result to global memory
CUTLASS_DEVICE
void compute_source_needed_(
OutputOp const &output_op, ///< Output operator
ReductionFragment &reduction_fragment, ///< Fragment containing the accumulated partial reduction over columns
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
OutputTileIterator source_iterator, ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
TensorTileIterator tensor_iterator, ///< Threadblock tile iterator for additioanl tensor operand
MatrixCoord const &problem_size, ///< Problem size needed to guard against out-of-bounds accesses
MatrixCoord const &threadblock_offset ///< Threadblock's initial offset within the problem size space
) {
typename OutputTileIterator::Fragment source_fragment;
source_fragment.clear();
typename TensorTileIterator::Fragment tensor_fragment;
tensor_fragment.clear();
//
// Iterator over warp-level accumulator fragment
//
AccumulatorFragmentIterator accum_fragment_iterator(accumulators);
//
// Iterate over accumulator tile
//
#pragma unroll(IterationsUnroll ? OutputTileIterator::kIterations : 1)
for (int iter = 0; iter < OutputTileIterator::kIterations; ++iter) {
//
// Load the source
//
source_fragment.clear();
source_iterator.load(source_fragment);
++source_iterator;
tensor_iterator.load(tensor_fragment);
++tensor_iterator;
//
// Convert and store fragment
//
__syncthreads();
acc2smem<cutlass::make_index_sequence<OutputTileIterator::kIterations>>::push(
iter, accum_fragment_iterator, this->warp_tile_iterator_);
__syncthreads();
//
// Load fragments from shared memory
//
typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK];
shared_load_iterator_.load(aligned_accum_fragment[0]);
// If the number of k-slices is > 1 - perform a reduction amongst the k-slices
if (kPartitionsK > 1)
{
plus <typename SharedLoadIterator::Fragment> add_fragments;
const int tile_row_offset = Base::SharedStorage::StorageShape::kRow / PartitionsK;
CUTLASS_PRAGMA_UNROLL
for ( int i = 1; i < kPartitionsK; ++i) {
shared_load_iterator_.add_tile_offset({tile_row_offset , 0});
shared_load_iterator_.load(aligned_accum_fragment[i]);
aligned_accum_fragment[0] = add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]);
}
shared_load_iterator_.add_tile_offset({-1 * (kPartitionsK-1) * tile_row_offset, 0});
}
//
// Compute the output result
//
FragmentCompute compute_fragment;
apply_output_operator_(
reduction_fragment,
compute_fragment,
output_op,
aligned_accum_fragment[0],
source_fragment,
tensor_fragment,
destination_iterator);
//
// Convert and store the final result
//
NumericArrayConverter<ElementOutput, ElementCompute, FragmentCompute::kElements> converter;
typename OutputTileIterator::Fragment output_fragment = converter(compute_fragment);
destination_iterator.store(output_fragment);
++destination_iterator;
}
}
/// Helper to invoke the output functor over each vector of output
CUTLASS_DEVICE
void apply_output_operator_(
ReductionFragment &reduction_fragment,
FragmentCompute &compute_fragment,
OutputOp const &output_op, ///< Output operator
typename SharedLoadIterator::Fragment const &aligned_accum_fragment,
typename OutputTileIterator::Fragment const &source_fragment,
typename TensorTileIterator::Fragment const &tensor_fragment,
OutputTileIterator const & destination_iterator) {
ComputeAccessType *compute_frag_ptr =
reinterpret_cast<ComputeAccessType *>(&compute_fragment);
AccumulatorAccessType const *accum_frag_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment);
OutputAccessType const *source_frag_ptr =
reinterpret_cast<OutputAccessType const *>(&source_fragment);
TensorAccessType const *tensor_frag_ptr =
reinterpret_cast<TensorAccessType const *>(&tensor_fragment);
int const kOutputOpIterations =
OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kOutputOpIterations; ++i) {
// Call the output operator
compute_frag_ptr[i] = output_op(accum_frag_ptr[i], source_frag_ptr[i], tensor_frag_ptr[i]);
}
//
// Partial reduction over each column
//
ReductionOp reduction_op;
typename OutputTileIterator::Mask mask;
destination_iterator.get_mask(mask);
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ReductionDetail::kColumnsPerThread; ++column) {
int column_vector_idx = column / ThreadMap::kElementsPerAccess;
bool column_guard = mask.predicates[column_vector_idx];
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ReductionDetail::kRowsPerThread; ++row) {
bool fetch;
if (ReductionDetail::kOobCheck) {
int row_idx = (row % ThreadMap::Iterations::kRow);
int residual = (row / ThreadMap::Iterations::kRow);
int group_idx = (residual % ThreadMap::Iterations::kGroup);
residual = (residual / ThreadMap::Iterations::kGroup);
int cluster_idx = (residual % ThreadMap::Iterations::kCluster);
int row_offset = row_idx * ThreadMap::Delta::kRow
+ group_idx * ThreadMap::Delta::kGroup
+ cluster_idx * ThreadMap::Delta::kCluster;
int output_row = destination_iterator.thread_start_row() + row_offset;
fetch = (output_row < destination_iterator.extent_row() && column_guard);
}
else {
fetch = true;
}
ElementCompute value = ElementCompute();
if (fetch) {
value = compute_fragment[row * ReductionDetail::kColumnsPerThread + column];
}
reduction_fragment[column] = reduction_op(
reduction_fragment[column],
value);
}
}
}
/// Helper to invoke the output functor over each vector of output
CUTLASS_DEVICE
void apply_output_operator_source_not_needed_(
ReductionFragment &reduction_fragment,
FragmentCompute &compute_fragment,
OutputOp const &output_op, ///< Output operator
typename SharedLoadIterator::Fragment const &aligned_accum_fragment,
typename TensorTileIterator::Fragment const &tensor_fragment,
OutputTileIterator const & destination_iterator
) {
ComputeAccessType *compute_frag_ptr =
reinterpret_cast<ComputeAccessType *>(&compute_fragment);
AccumulatorAccessType const *accum_frag_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment);
TensorAccessType const *tensor_frag_ptr =
reinterpret_cast<TensorAccessType const *>(&tensor_fragment);
int const kOutputOpIterations =
OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kOutputOpIterations; ++i) {
// Call the output operator
compute_frag_ptr[i] = output_op(accum_frag_ptr[i], tensor_frag_ptr[i]);
}
//
// Partial reduction over each column
//
ReductionOp reduction_op;
typename OutputTileIterator::Mask mask;
destination_iterator.get_mask(mask);
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ReductionDetail::kColumnsPerThread; ++column) {
int column_vector_idx = column / ThreadMap::kElementsPerAccess;
bool column_guard = mask.predicates[column_vector_idx];
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ReductionDetail::kRowsPerThread; ++row) {
bool fetch;
if (ReductionDetail::kOobCheck) {
int row_idx = (row % ThreadMap::Iterations::kRow);
int residual = (row / ThreadMap::Iterations::kRow);
int group_idx = (residual % ThreadMap::Iterations::kGroup);
residual = (residual / ThreadMap::Iterations::kGroup);
int cluster_idx = (residual % ThreadMap::Iterations::kCluster);
int row_offset = row_idx * ThreadMap::Delta::kRow
+ group_idx * ThreadMap::Delta::kGroup
+ cluster_idx * ThreadMap::Delta::kCluster;
int output_row = destination_iterator.thread_start_row() + row_offset;
fetch = (output_row < destination_iterator.extent_row() && column_guard);
}
else {
fetch = true;
}
ElementCompute value = ElementCompute();
if (fetch) {
value = compute_fragment[row * ReductionDetail::kColumnsPerThread + column];
}
reduction_fragment[column] = reduction_op(
reduction_fragment[column],
value);
}
}
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
|
cutlass/include/cutlass/epilogue/threadblock/epilogue_with_reduction.h/0
|
{
"file_path": "cutlass/include/cutlass/epilogue/threadblock/epilogue_with_reduction.h",
"repo_id": "cutlass",
"token_count": 10808
}
| 32 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/permute.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/epilogue/threadblock/output_tile_thread_map.h"
#include "cutlass/arch/arch.h"
#include "cutlass/arch/memory.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator_params.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/conv3d_problem_size.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator used to load and store output tile from global memory in epilogue.
///
/// Satisfies: ReadableTileIterator | PredicatedTileIteratorConv | ForwardTileIterator
///
template <
typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap)
typename Element_, ///< Element data type
bool ScatterD = false, ///< Scatter D operand or not
typename PermuteDLayout = layout::NoPermute, ///< Permute D operand or not
bool UseCUDAStore = false,
int Rank = 4
>
class PredicatedTileIteratorConv {
public:
using ThreadMap = ThreadMap_;
using Shape = typename ThreadMap::Shape;
using Element = Element_;
static int const kRank = Rank;
using Layout = typename platform::conditional<kRank == 4,
layout::TensorNHWC,
layout::TensorNDHWC>::type;
using Stride = typename Layout::Stride;
static int const kStrideRank = Layout::kStrideRank;
using TensorRef = TensorRef<Element, Layout>;
using ConstTensorRef = typename TensorRef::ConstTensorRef;
using MappedLayout = layout::RowMajor;
using Index = typename MappedLayout::Index;
using LongIndex = typename MappedLayout::LongIndex;
using TensorCoord = typename MappedLayout::TensorCoord;
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
static int const kThreads = ThreadMap::kThreads;
static int const kIterations = ThreadMap::Count::kTile;
static bool constexpr PermuteD = !layout::is_trivial_permute<PermuteDLayout>;
static_assert( ThreadMap::Iterations::kRow > 0,"ThreadMap::Iterations::kRow must be > 0");
static_assert( ThreadMap::Iterations::kGroup > 0,"ThreadMap::Iterations::kGroup must be > 0");
static_assert( ThreadMap::Iterations::kCluster > 0,"ThreadMap::Iterations::kCluster must be > 0");
static_assert( ThreadMap::Iterations::kColumn > 0,"ThreadMap::Iterations::kColumn must be > 0");
/// Fragment object
using Fragment = Array<
Element,
ThreadMap::Iterations::kColumn *
ThreadMap::Iterations::kRow *
ThreadMap::Iterations::kGroup *
ThreadMap::Iterations::kCluster * ThreadMap::kElementsPerAccess>;
/// Memory access size
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
//
// Parameters struct
//
/// Uses a non-template class
struct Params : PredicatedTileIteratorParams {
using Base = PredicatedTileIteratorParams;
/// Fast divmod objects divided by tensor extents
FastDivmod divmod[kStrideRank - 1];
Stride tensor_stride;
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(Layout const &layout, conv::Conv2dProblemSize const &problem_size):
PredicatedTileIteratorParams(
layout.stride()[0] * int(sizeof(AccessType)) / kElementsPerAccess,
make_OutputTileThreadMapDesc<ThreadMap>()
) {
divmod[0] = FastDivmod(problem_size.Q);
divmod[1] = FastDivmod(problem_size.P);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kStrideRank; ++i) {
tensor_stride[i] = layout.stride()[i];
}
}
CUTLASS_HOST_DEVICE
Params(Layout const &layout, conv::Conv3dProblemSize const &problem_size):
PredicatedTileIteratorParams(
layout.stride()[0] * int(sizeof(AccessType)) / kElementsPerAccess,
make_OutputTileThreadMapDesc<ThreadMap>()
) {
divmod[0] = FastDivmod(problem_size.Q);
divmod[1] = FastDivmod(problem_size.P);
divmod[2] = FastDivmod(problem_size.Z);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kStrideRank; ++i) {
tensor_stride[i] = layout.stride()[i];
}
}
CUTLASS_HOST_DEVICE
Params(Base const &base) :
Base(base) { }
};
/// Mask object
struct Mask {
static int const kCount = ThreadMap::Iterations::kColumn;
/// Predicate state
bool predicates[kCount];
//
// Mask
//
CUTLASS_HOST_DEVICE
Mask() {
enable();
}
///< Efficiently disables all accesses guarded by mask
CUTLASS_HOST_DEVICE void clear() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = false;
}
}
///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask
CUTLASS_DEVICE void enable() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = true;
}
}
};
private:
//
// Data members
//
/// Parameters structure containing reference and precomputed state.
Params params_;
/// Byte-level pointer. This pointer is usually for both load() and store(), unless PermuteD is performed. When having PermuteD, byte_pointer_ is only for load().
uint8_t *byte_pointer_;
/// Array of boolean values to contain steady-state predicates
Mask mask_;
/// Extent of the matrix tile in rows
Index extent_row_;
/// Extent of the matrix tile in rows
Index extent_column_;
/// A thread's starting row position (assuming steady-state predicates have been computed)
Index thread_start_row_;
/// A thread's starting column
Index thread_start_column_;
/// Internal state counter
int state_[3];
//
// Static asserts about internal strides
//
static_assert(sizeof(extent_row_) == 4, "Expected 32b extents");
static_assert(sizeof(thread_start_row_) == 4, "Expected 32b extents");
static_assert(sizeof(PredicatedTileIteratorParams::stride) == 8, "Expected 64b strides");
private:
//
// Methods
//
public:
//
// Methods
//
/// Constructor
CUTLASS_DEVICE
PredicatedTileIteratorConv(
Params const & params,
Element *pointer,
TensorCoord extent,
int thread_idx,
TensorCoord threadblock_offset = TensorCoord()
):
params_(params)
{
TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx) + threadblock_offset;
extent_row_ = extent.row();
extent_column_ = extent.column();
thread_start_row_ = thread_offset.row();
thread_start_column_ = thread_offset.column();
// Initialize predicates
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kColumn; ++c) {
mask_.predicates[c] = ((thread_offset.column()
+ ThreadMap::Delta::kColumn * c) < extent.column());
}
// Null pointer performs no accesses
if (!pointer) {
mask_.clear();
}
// Initialize byte_pointer_
byte_pointer_ = reinterpret_cast<uint8_t *>(pointer) +
LongIndex(thread_offset.column()) * sizeof(AccessType) / kElementsPerAccess;
// Initialize internal state counter
state_[0] = state_[1] = state_[2] = 0;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_byte_offset(Fragment &frag, int64_t byte_offset) const {
uint8_t *byte_pointer = byte_pointer_;
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
int row_offset = row * ThreadMap::Delta::kRow
+ group * ThreadMap::Delta::kGroup
+ cluster * ThreadMap::Delta::kCluster;
bool row_guard = ((row_offset + thread_start_row_) < extent_row_);
AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset);
Stride tensor_coord = CoordinateDecompositionLittleEndian<kStrideRank>(row_offset + thread_start_row_, params_.divmod);
LongIndex tensor_offset = dot(tensor_coord, params_.tensor_stride);
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
bool guard = row_guard && mask_.predicates[column];
cutlass::arch::global_load<
AccessType,
sizeof(AccessType)
>(
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn +
column],
(void *)&memory_pointer[column * ThreadMap::Delta::kColumn /
kElementsPerAccess + tensor_offset / kElementsPerAccess],
guard);
}
}
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) const {
load_with_byte_offset(frag, 0);
}
/// Stores a fragment to memory
CUTLASS_DEVICE
void store_with_byte_offset(Fragment const &frag, int64_t byte_offset) const {
uint8_t *byte_pointer = byte_pointer_;
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
int row_offset = row * ThreadMap::Delta::kRow
+ group * ThreadMap::Delta::kGroup
+ cluster * ThreadMap::Delta::kCluster;
bool row_guard = ((row_offset + thread_start_row_) < extent_row_);
Stride tensor_coord = CoordinateDecompositionLittleEndian<kStrideRank>((row_offset + thread_start_row_), params_.divmod);
LongIndex tensor_offset = dot(tensor_coord, params_.tensor_stride);
AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset);
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
bool guard = row_guard && mask_.predicates[column];
if (UseCUDAStore) {
if (guard) {
memory_pointer[tensor_offset / kElementsPerAccess] =
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column];
}
} else {
cutlass::arch::global_store<AccessType, sizeof(AccessType)>(
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column],
(void *)&memory_pointer[tensor_offset / kElementsPerAccess],
guard);
}
memory_pointer += (ThreadMap::Delta::kColumn / kElementsPerAccess);
}
}
}
}
}
/// Stores a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) const {
store_with_byte_offset(frag, 0);
}
CUTLASS_DEVICE
MatrixCoord thread_start() const {
return MatrixCoord(thread_start_row_, thread_start_column_);
}
/// Need to get the thread start row from the tile iterator
CUTLASS_DEVICE
int32_t thread_start_row() const {
return thread_start_row_;
}
/// Need to get the thread start row from the tile iterator
CUTLASS_DEVICE
int32_t thread_start_column() const {
return thread_start_column_;
}
/// Extent of the matrix in rows
CUTLASS_DEVICE
Index extent_row() const {
return extent_row_;
}
/// Extent of the matrix in columns
CUTLASS_DEVICE
Index extent_column() const {
return extent_column_;
}
/// Advances to the next position to load or store
CUTLASS_HOST_DEVICE
PredicatedTileIteratorConv &operator++() {
++state_[0];
thread_start_row_ += ThreadMap::Shape::kRow;
if (state_[0] == ThreadMap::Count::kRow) {
state_[0] = 0;
++state_[1];
thread_start_row_ += (ThreadMap::Shape::kGroup - 1) *
ThreadMap::Shape::kRow * ThreadMap::Count::kRow;
if (state_[1] == ThreadMap::Count::kGroup) {
state_[1] = 0;
++state_[2];
thread_start_row_ += ThreadMap::Count::kGroup *
ThreadMap::Shape::kGroup * ThreadMap::Count::kRow * ThreadMap::Shape::kRow;
if (state_[2] == ThreadMap::Count::kCluster) {
state_[2] = 0;
thread_start_row_ += ThreadMap::Shape::kGroup * ThreadMap::Shape::kRow
* ThreadMap::Shape::kCluster * ThreadMap::Shape::kTile;
}
}
}
return *this;
}
/// Advances a number of positions to load or store
CUTLASS_HOST_DEVICE
PredicatedTileIteratorConv &operator+=(int increment)
{
// Row
state_[0] += increment;
int increment_row = state_[0] / ThreadMap::Count::kRow;
state_[0] = state_[0] % ThreadMap::Count::kRow;
thread_start_row_ += (ThreadMap::Shape::kRow * increment);
// Group
state_[1] += increment_row;
int increment_group = state_[1] / ThreadMap::Count::kGroup;
state_[1] = state_[1] % ThreadMap::Count::kGroup;
thread_start_row_ +=
(ThreadMap::Shape::kGroup - 1) *
ThreadMap::Shape::kRow *
ThreadMap::Count::kRow *
increment_row;
// Cluster
state_[2] += increment_group;
int increment_cluster = state_[2] / ThreadMap::Count::kCluster;
state_[2] = state_[2] % ThreadMap::Count::kCluster;
thread_start_row_ +=
ThreadMap::Count::kGroup *
ThreadMap::Shape::kGroup *
ThreadMap::Count::kRow *
ThreadMap::Shape::kRow *
increment_group;
// Tile
thread_start_row_ +=
ThreadMap::Shape::kGroup *
ThreadMap::Shape::kRow *
ThreadMap::Shape::kCluster *
ThreadMap::Shape::kTile *
increment_cluster;
return *this;
}
///< Efficiently disables all accesses guarded by mask
CUTLASS_DEVICE void clear_mask() {
mask_.clear();
}
///< Efficiently enables all accesses guarded by mask
CUTLASS_DEVICE void enable_mask() {
mask_.enable();
}
///< Sets the mask
CUTLASS_DEVICE void get_mask(Mask &mask) const {
mask = mask_;
}
///< Sets the mask
CUTLASS_DEVICE void set_mask(Mask const &mask) {
mask_ = mask;
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
|
cutlass/include/cutlass/epilogue/threadblock/predicated_tile_iterator_conv.h/0
|
{
"file_path": "cutlass/include/cutlass/epilogue/threadblock/predicated_tile_iterator_conv.h",
"repo_id": "cutlass",
"token_count": 6619
}
| 33 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/epilogue/warp/simt_policy.h"
#define CUTLASS_SIMT_EPILOGUE_USE_SCALAR_STORES 1
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template for reading and writing tiles of accumulators to shared memory
template <
typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape)
typename Operator, ///< matrix multiply operation (concept: arch::Mma)
typename Element, ///< data type of element to be written
typename Layout, ///< target shared memory layout
typename MmaSimtPolicy ///< policy defining lane arrangement (concept: MmaSimtPolicy)
>
class TileIteratorSimt;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template for reading and writing tiles of accumulators to shared memory
template <
typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape)
typename Operator_, ///< matrix multiply operation (concept: arch::Mma)
typename Element_, ///< data type of element to be written
typename MmaSimtPolicy_ ///< policy defining lane arrangement (concept: MmaSimtPolicy)
>
class TileIteratorSimt<WarpShape_, Operator_, Element_, layout::RowMajor, MmaSimtPolicy_> {
public:
using WarpShape = WarpShape_;
using Operator = Operator_;
using Element = Element_;
using Layout = layout::RowMajor;
using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object
using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor
using Index = typename TensorRef::Index;
using LongIndex = typename TensorRef::LongIndex;
using Policy = SimtPolicy<WarpShape, Operator, Layout, MmaSimtPolicy_>;
/// Shape of the tile in memory
using Shape = MatrixShape<
Policy::kRowsPerIteration,
WarpShape::kN
>;
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<
typename Operator::ElementC,
Policy::kElementsPerIteration>;
/// This is the complete warp-level accumulator tile.
using AccumulatorTile = Array<
typename Operator::ElementC,
Policy::kAccumulatorElementCount>;
/// Number of times this iterator can be incremented
static int const kIterations = Policy::kIterations;
/// Padding quantity
using Padding = MatrixShape<
0,
4 * Policy::kElementsPerAccess
#if CUTLASS_SIMT_EPILOGUE_USE_SCALAR_STORES
+ 1
#endif
>;
private:
#if CUTLASS_SIMT_EPILOGUE_USE_SCALAR_STORES
/// Storage type for accessing memory
using AccessType = AlignedArray<
Element,
1
>;
#else
/// Storage type for accessing memory
using AccessType = AlignedArray<
Element,
Policy::kElementsPerAccess
>;
#endif
//
// Data members
//
/// Internal pointer to memory
AccessType *pointer_;
/// Internal layout object
Layout layout_;
public:
/// Default constructor
CUTLASS_HOST_DEVICE
TileIteratorSimt(): pointer_(nullptr) { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
TileIteratorSimt(
TensorRef const &ref,
unsigned lane_id
):
pointer_(reinterpret_cast<AccessType *>(ref.data())),
layout_(ref.stride()[0] / AccessType::kElements) {
auto lane_layout = Policy::MmaSimtPolicy::get_lane_layout();
MatrixCoord lane_offset = lane_layout.inverse(lane_id);
pointer_ += layout_({
lane_offset.row(),
lane_offset.column() * Policy::kElementsPerAccess / int(AccessType::kElements)
});
}
/// Adds a pointer offset
CUTLASS_HOST_DEVICE
TileIteratorSimt & add_pointer_offset(Index pointer_offset) {
pointer_ += pointer_offset / AccessType::kElements;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorSimt & add_tile_offset(TensorCoord const &tile_offset) {
pointer_ += layout_({
tile_offset.row() * Shape::kRow,
(tile_offset.column() * Shape::kColumn / int(AccessType::kElements))
});
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorSimt & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
/// Store
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
#if CUTLASS_SIMT_EPILOGUE_USE_SCALAR_STORES
// de-vectorized stores
using ScalarAccessType = AlignedArray<Element, 1>;
ScalarAccessType const *scalarFragPtr = reinterpret_cast<ScalarAccessType const *>(&frag);
ScalarAccessType *scalarPointer = reinterpret_cast<ScalarAccessType *>(pointer_) + pointer_offset;
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::kAccessesPerIteration; ++n) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < Policy::kElementsPerAccess; s++) {
scalarPointer[n * Policy::MmaSimtPolicy::WarpShape::kColumn * Policy::kElementsPerAccess + s] = scalarFragPtr[n * Policy::kElementsPerAccess + s];
}
}
#else
// original vector stores
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::kAccessesPerIteration; ++n) {
pointer_[n * Policy::MmaSimtPolicy::WarpShape::kColumn + pointer_offset / int(AccessType::kElements)] = frag_ptr[n];
}
#endif
}
/// Store
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
/// Load
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::kAccessesPerIteration; ++n) {
frag_ptr[n] = pointer_[n * Policy::MmaSimtPolicy::WarpShape::kColumn + pointer_offset / int(AccessType::kElements)];
}
}
/// Load
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Set smem base address
CUTLASS_HOST_DEVICE
void set_smem_base_address(Index address) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template for reading and writing tiles of accumulators to shared memory
template <typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape)
typename Operator_, ///< matrix multiply operation (concept: arch::Mma)
typename Element_, ///< data type of element to be written
typename Layout_, ///< target shared memory layout
typename MmaSimtPolicy_ ///< policy defining lane arrangement (concept: MmaSimtPolicy)
>
class TileIteratorSimtDirectConv {
public:
using WarpShape = WarpShape_;
using Operator = Operator_;
using Element = Element_;
using Layout = layout::RowMajor;
using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object
using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor
using Index = typename TensorRef::Index;
using LongIndex = typename TensorRef::LongIndex;
using Policy = SimtPolicy<WarpShape, Operator, Layout, MmaSimtPolicy_>;
/// Shape of the tile in memory
using Shape = MatrixShape<Policy::kRowsPerIteration, WarpShape::kN>;
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<typename Operator::ElementC, Policy::kElementsPerIteration>;
/// This is the complete warp-level accumulator tile.
using AccumulatorTile = Array<typename Operator::ElementC, Policy::kAccumulatorElementCount>;
/// Number of times this iterator can be incremented
static int const kIterations = Policy::kIterations;
/// Padding quantity
using Padding = MatrixShape<0,
0
>;
private:
/// Storage type for accessing memory
using AccessType = AlignedArray<
Element,
Policy::kElementsPerAccess
>;
//
// Data members
//
/// Internal pointer to memory
AccessType *pointer_;
/// Internal layout object
Layout layout_;
/// Base smem offset;
Index base_smem_address_;
public:
/// Default constructor
CUTLASS_HOST_DEVICE
TileIteratorSimtDirectConv() : pointer_(nullptr) {}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
TileIteratorSimtDirectConv(
TensorRef const &ref,
unsigned lane_id
):
pointer_(reinterpret_cast<AccessType *>(ref.data())),
layout_(ref.stride()[0] / AccessType::kElements) {
auto lane_layout = Policy::MmaSimtPolicy::get_lane_layout();
MatrixCoord lane_offset = lane_layout.inverse(lane_id);
pointer_ += layout_({
lane_offset.row(),
lane_offset.column() * Policy::kElementsPerAccess / int(AccessType::kElements)
});
}
/// Adds a pointer offset
CUTLASS_HOST_DEVICE
TileIteratorSimtDirectConv & add_pointer_offset(Index pointer_offset) {
pointer_ += pointer_offset / AccessType::kElements;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorSimtDirectConv & add_tile_offset(TensorCoord const &tile_offset) {
pointer_ += layout_({
tile_offset.row() * Shape::kRow,
(tile_offset.column() * Shape::kColumn / int(AccessType::kElements))
});
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorSimtDirectConv & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
/// Store
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
// original vector stores
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
AccessType * load_pointer_ = reinterpret_cast<AccessType *>(reinterpret_cast<uint8_t *>(pointer_) + base_smem_address_);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::kAccessesPerIteration; ++n) {
load_pointer_[n * Policy::MmaSimtPolicy::WarpShape::kColumn + pointer_offset / int(AccessType::kElements)] = frag_ptr[n];
}
}
/// Store
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
/// Load
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::kAccessesPerIteration; ++n) {
frag_ptr[n] = pointer_[n * Policy::MmaSimtPolicy::WarpShape::kColumn + pointer_offset / int(AccessType::kElements)];
}
}
/// Load
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Set smem base address
CUTLASS_HOST_DEVICE
void set_smem_base_address(Index address){
base_smem_address_ = address;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template for reading and writing tiles of accumulators to shared memory
template <typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape)
typename ThreadOutputShape_, /// Size of the matrix to load (concept: TensorNHWC)
typename ThreadBlockOutputShape_, /// Size of the matrix to load (concept: TensorNHWC)
typename Operator_, ///< matrix multi ply operation (concept: arch::Mma)
typename Element_, ///< data type of element to be written
typename Layout_, ///< target shared memory layout
typename MmaSimtPolicy_ ///< policy defining lane arrangement (concept: MmaSimtPolicy)
>
class TileIteratorSimtDirect2dConv {
public:
using WarpShape = WarpShape_;
using ThreadOutputShape = ThreadOutputShape_;
using ThreadBlockOutputShape = ThreadBlockOutputShape_;
using Operator = Operator_;
using Element = Element_;
using Layout = layout::RowMajor;
using MmaSimtPolicy = MmaSimtPolicy_;
using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object
using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor
using Index = typename TensorRef::Index;
using LongIndex = typename TensorRef::LongIndex;
// Thread-level shape of a fragment
using ThreadShape = MatrixShape<ThreadOutputShape::kNHW, ThreadOutputShape::kC>;
static_assert(!(ThreadShape::kColumn % MmaSimtPolicy::LaneMmaShape::kN),
"Thread-level GEMM must be divisible by Policy::LaneMmaShape.");
using ThreadTileCount = MatrixShape<ThreadBlockOutputShape::kH / ThreadOutputShape::kH,
ThreadBlockOutputShape::kW / ThreadOutputShape::kW>;
using Iterations =
MatrixShape<ThreadShape::kRow, ThreadShape::kColumn / MmaSimtPolicy::LaneMmaShape::kN>;
/// This is the complete warp-level accumulator tile.
using AccumulatorTile = typename Operator::FragmentC;
/// This is the fragment size produced by one access of the iterator.
using Fragment = AccumulatorTile;
/// Padding quantity
using Padding = MatrixShape<0, 0>;
private:
// Storage type for accessing memory
using AccessType = AlignedArray<Element, MmaSimtPolicy::LaneMmaShape::kN>;
//
// Data members
//
/// Internal pointer to memory
AccessType *pointer_;
/// Internal layout object
Layout layout_;
/// Base smem offset;
Index base_smem_address_;
public:
/// Default constructor
CUTLASS_HOST_DEVICE
TileIteratorSimtDirect2dConv() : pointer_(nullptr) {}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
TileIteratorSimtDirect2dConv(TensorRef const &ref, unsigned thread_id, unsigned lane_id)
: pointer_(reinterpret_cast<AccessType *>(ref.data())),
layout_(ref.stride()[0] / AccessType::kElements) {
auto lane_layout = MmaSimtPolicy::get_lane_layout();
MatrixCoord lane_offset = lane_layout.inverse(lane_id);
// Get base HW offset of current threads
const int threadgroup = thread_id / (ThreadBlockOutputShape::kC / ThreadOutputShape::kC);
const int base_p = (threadgroup / (ThreadTileCount::kColumn)) * ThreadOutputShape::kH;
const int base_q = (threadgroup % (ThreadTileCount::kColumn)) * ThreadOutputShape::kW;
const int row_offset = base_p * ThreadBlockOutputShape::kW + base_q;
pointer_ += layout_(
{row_offset,
lane_offset.column() * MmaSimtPolicy::LaneMmaShape::kN / int(AccessType::kElements)});
}
/// Adds a pointer offset
CUTLASS_HOST_DEVICE
TileIteratorSimtDirect2dConv &add_pointer_offset(Index pointer_offset) {
pointer_ += pointer_offset / AccessType::kElements;
return *this;
}
/// Store
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType *storer_pointer_ =
reinterpret_cast<AccessType *>(reinterpret_cast<uint8_t *>(pointer_) + base_smem_address_);
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int h = 0; h < ThreadOutputShape::kH; ++h) {
CUTLASS_PRAGMA_UNROLL
for (int w = 0; w < ThreadOutputShape::kW; ++w) {
CUTLASS_PRAGMA_UNROLL
for (int col = 0; col < Iterations::kColumn; ++col) {
int offset = (w + h * ThreadBlockOutputShape::kW) *
(ThreadBlockOutputShape::kC / AccessType::kElements) +
col;
storer_pointer_[offset + pointer_offset / int(AccessType::kElements)] =
frag_ptr[w + h * ThreadOutputShape::kW + col];
}
}
}
}
/// Store
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); }
/// Set smem base address
CUTLASS_HOST_DEVICE
void set_smem_base_address(Index address) { base_smem_address_ = address; }
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template for reading and writing tiles of accumulators to shared memory
template <
typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape)
typename Operator_, ///< matrix multiply operation (concept: arch::Mma)
typename Element_, ///< data type of element to be written
typename Layout_, ///< target shared memory layout
typename MmaSimtPolicy_ ///< policy defining lane arrangement (concept: MmaSimtPolicy)
>
class TileIteratorSimtCanonical {
public:
using WarpShape = WarpShape_;
using Operator = Operator_;
using Element = Element_;
using Layout = Layout_;
using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object
using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor
using Index = typename TensorRef::Index;
using LongIndex = typename TensorRef::LongIndex;
using Policy = SimtPolicy<WarpShape, Operator, Layout, MmaSimtPolicy_>;
/// Shape of the tile in memory
using Shape = MatrixShape<
Policy::kRowsPerIteration,
WarpShape::kN
>;
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<
typename Operator::ElementC,
Policy::kElementsPerIteration>;
/// This is the complete warp-level accumulator tile.
using AccumulatorTile = Array<
typename Operator::ElementC,
Policy::kAccumulatorElementCount>;
/// Number of times this iterator can be incremented
static int const kIterations = Policy::kIterations;
/// Padding quantity
using Padding = MatrixShape<
0,
4 * Policy::kElementsPerAccess + 1
>;
private:
/// Storage type for accessing memory
using AccessType = AlignedArray<
Element,
1
>;
//
// Data members
//
/// Internal pointer to memory
AccessType *pointer_;
/// Internal layout object
Layout layout_;
/// Guard to indicate whether the shape is divisible
bool divisible_;
/// Extent of the output tensor
MatrixCoord extent_;
/// Thread offset
MatrixCoord thread_offset_;
public:
/// Default constructor
CUTLASS_HOST_DEVICE
TileIteratorSimtCanonical(): pointer_(nullptr) { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
TileIteratorSimtCanonical(
TensorRef const &ref,
unsigned lane_id
):
pointer_(reinterpret_cast<AccessType *>(ref.data())),
layout_(ref.stride()[0] / AccessType::kElements),
divisible_(true),
extent_(WarpShape::kM, WarpShape::kN) {
auto lane_layout = Policy::MmaSimtPolicy::get_lane_layout();
MatrixCoord lane_offset = lane_layout.inverse(lane_id);
thread_offset_ = {
lane_offset.row() * Shape::kRow,
lane_offset.column() * Policy::kElementsPerAccess
};
pointer_ += layout_({
lane_offset.row() * Shape::kRow,
lane_offset.column() * Policy::kElementsPerAccess / int(AccessType::kElements)
});
}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
TileIteratorSimtCanonical(
TensorRef const &ref,
TensorCoord const &extent,
unsigned lane_id
):
pointer_(reinterpret_cast<AccessType *>(ref.data())),
layout_(ref.stride()[0] / AccessType::kElements),
divisible_(false),
extent_(extent) {
auto lane_layout = Policy::MmaSimtPolicy::get_lane_layout();
MatrixCoord lane_offset = lane_layout.inverse(lane_id);
thread_offset_ = {
lane_offset.row() * Shape::kRow,
lane_offset.column() * Policy::kElementsPerAccess
};
pointer_ += layout_({
lane_offset.row() * Shape::kRow,
lane_offset.column() * Policy::kElementsPerAccess / int(AccessType::kElements)
});
}
/// Adds a pointer offset
CUTLASS_HOST_DEVICE
TileIteratorSimtCanonical & add_pointer_offset(Index pointer_offset) {
pointer_ += pointer_offset / AccessType::kElements;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorSimtCanonical & add_tile_offset(TensorCoord const &tile_offset) {
MatrixCoord coord_offset(
tile_offset.row(),
tile_offset.column() * Shape::kColumn
);
thread_offset_ += coord_offset;
pointer_ += layout_({
coord_offset.row(),
coord_offset.column()
});
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorSimtCanonical & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
/// Store
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
// de-vectorized stores
using ScalarAccessType = AlignedArray<Element, 1>;
ScalarAccessType const *scalarFragPtr = reinterpret_cast<ScalarAccessType const *>(&frag);
ScalarAccessType *scalarPointer = reinterpret_cast<ScalarAccessType *>(pointer_) + pointer_offset;
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::kAccessesPerIteration; ++n) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < Policy::kElementsPerAccess; s++) {
int ptr_idx = n * Policy::MmaSimtPolicy::WarpShape::kColumn * Policy::kElementsPerAccess + s;
int frag_idx = n * Policy::kElementsPerAccess + s;
int col = thread_offset_.column() + ptr_idx;
if (divisible_ || (thread_offset_.row() < extent_.row() && col < extent_.column())) {
scalarPointer[ptr_idx] = scalarFragPtr[frag_idx];
}
}
}
}
/// Store
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
/// Load
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const {
// de-vectorized loads
using ScalarAccessType = AlignedArray<Element, 1>;
ScalarAccessType *scalarFragPtr = reinterpret_cast<ScalarAccessType *>(&frag);
ScalarAccessType const *scalarPointer = reinterpret_cast<ScalarAccessType const*>(pointer_) + pointer_offset;
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::kAccessesPerIteration; ++n) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < Policy::kElementsPerAccess; s++) {
int ptr_idx = n * Policy::MmaSimtPolicy::WarpShape::kColumn * Policy::kElementsPerAccess + s;
int frag_idx = n * Policy::kElementsPerAccess + s;
int col = thread_offset_.column() + ptr_idx;
if (divisible_ || (thread_offset_.row() < extent_.row() && col < extent_.column())) {
scalarFragPtr[frag_idx] = scalarPointer[ptr_idx];
}
}
}
}
/// Load
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
CUTLASS_HOST_DEVICE
TileIteratorSimtCanonical & operator++() {
return add_tile_offset({1, 0});
}
/// Set smem base address
CUTLASS_HOST_DEVICE
void set_smem_base_address(Index address) {
}
};
} // namespace warp
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
|
cutlass/include/cutlass/epilogue/warp/tile_iterator_simt.h/0
|
{
"file_path": "cutlass/include/cutlass/epilogue/warp/tile_iterator_simt.h",
"repo_id": "cutlass",
"token_count": 9058
}
| 34 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/dispatch_policy.hpp"
#include "cute/algorithm/functional.hpp"
#include "cute/atom/mma_atom.hpp"
#include "cute/algorithm/gemm.hpp"
#include "cute/atom/mma_atom.hpp"
#include "cute/tensor_predicate.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass::gemm::collective {
using namespace cute;
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
class TileShape_,
class ElementA_,
class StrideA_,
class ElementB_,
class StrideB_,
class TiledMma_,
class GmemTiledCopyA_,
class SmemLayoutAtomA_,
class SmemCopyAtomA_,
class TransformA_,
class GmemTiledCopyB_,
class SmemLayoutAtomB_,
class SmemCopyAtomB_,
class TransformB_>
struct CollectiveMma<
MainloopSm70TwoStageUnpredicated,
TileShape_,
ElementA_,
StrideA_,
ElementB_,
StrideB_,
TiledMma_,
GmemTiledCopyA_,
SmemLayoutAtomA_,
SmemCopyAtomA_,
TransformA_,
GmemTiledCopyB_,
SmemLayoutAtomB_,
SmemCopyAtomB_,
TransformB_>
{
//
// Type Aliases
//
using DispatchPolicy = MainloopSm70TwoStageUnpredicated;
using TileShape = TileShape_;
using ElementA = ElementA_;
using StrideA = StrideA_;
using ElementB = ElementB_;
using StrideB = StrideB_;
using TiledMma = TiledMma_;
using ElementAccumulator = typename TiledMma::ValTypeC;
using GmemTiledCopyA = GmemTiledCopyA_;
using GmemTiledCopyB = GmemTiledCopyB_;
using SmemLayoutAtomA = SmemLayoutAtomA_;
using SmemLayoutAtomB = SmemLayoutAtomB_;
using SmemCopyAtomA = SmemCopyAtomA_;
using SmemCopyAtomB = SmemCopyAtomB_;
using TransformA = TransformA_;
using TransformB = TransformB_;
using ArchTag = typename DispatchPolicy::ArchTag;
static_assert(cute::rank(SmemLayoutAtomA{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)");
static_assert((size<0>(TileShape{}) % size<0>(SmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert((size<2>(TileShape{}) % size<1>(SmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert(cute::rank(SmemLayoutAtomB{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)");
static_assert((size<1>(TileShape{}) % size<0>(SmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert((size<2>(TileShape{}) % size<1>(SmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
using SmemLayoutA = decltype(tile_to_shape(
SmemLayoutAtomA{},
make_shape(shape<0>(TileShape{}), shape<2>(TileShape{}))));
using SmemLayoutB = decltype(tile_to_shape(
SmemLayoutAtomB{},
make_shape(shape<1>(TileShape{}), shape<2>(TileShape{}))));
struct SharedStorage
{
cute::array_aligned<ElementA, cute::cosize_v<SmemLayoutA>> smem_a;
cute::array_aligned<ElementB, cute::cosize_v<SmemLayoutB>> smem_b;
};
// Host side kernel arguments
struct Arguments {
ElementA const* ptr_A;
StrideA dA;
ElementB const* ptr_B;
StrideB dB;
};
// Device side kernel params
using Params = Arguments;
//
// Methods
//
CollectiveMma() = default;
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(ProblemShape const& _, Arguments const& args, void* workspace) {
(void) workspace;
return args;
}
/// Perform a threadblock-scoped matrix multiply-accumulate
template <
class FrgTensorD,
class TensorA,
class TensorB,
class FrgTensorC,
class KTileIterator,
class ResidueMNK
>
CUTLASS_DEVICE void
operator() (
FrgTensorD &accum,
TensorA gA,
TensorB gB,
FrgTensorC const &src_accum,
KTileIterator k_tile_iter, int k_tile_count,
ResidueMNK residue_mnk,
int thread_idx,
char *smem_buf)
{
using namespace cute;
(void)residue_mnk;
static_assert(is_rmem<FrgTensorD>::value, "D tensor must be rmem resident.");
static_assert(is_gmem<TensorA>::value, "A tensor must be gmem resident.");
static_assert(is_gmem<TensorB>::value, "B tensor must be gmem resident.");
static_assert(is_rmem<FrgTensorC>::value, "C tensor must be rmem resident.");
static_assert(cute::rank(SmemLayoutA{}) == 2,
"MainloopTwoStage must not have a smem shape with a pipeline mode.");
static_assert(cute::rank(SmemLayoutB{}) == 2,
"MainloopTwoStage must not have a smem shape with a pipeline mode.");
// Construct shared memory tiles
SharedStorage& storage = *reinterpret_cast<SharedStorage*>(smem_buf);
Tensor sA = make_tensor(make_smem_ptr(storage.smem_a.data()), SmemLayoutA{}); // (BLK_M,BLK_K,PIPE)
Tensor sB = make_tensor(make_smem_ptr(storage.smem_b.data()), SmemLayoutB{}); // (BLK_N,BLK_K,PIPE)
// Partition the copying of A and B tiles across the threads
GmemTiledCopyA gmem_tiled_copy_a;
GmemTiledCopyB gmem_tiled_copy_b;
auto copy_a_thr = gmem_tiled_copy_a.get_slice(thread_idx);
auto copy_b_thr = gmem_tiled_copy_b.get_slice(thread_idx);
Tensor tAgA = copy_a_thr.partition_S(gA); // (ACPY,ACPY_M,ACPY_K,k)
Tensor tAsA = copy_a_thr.partition_D(sA); // (ACPY,ACPY_M,ACPY_K)
Tensor tBgB = copy_b_thr.partition_S(gB); // (BCPY,BCPY_N,BCPY_K,k)
Tensor tBsB = copy_b_thr.partition_D(sB); // (BCPY,BCPY_N,BCPY_K)
// Allocate the register tiles for double buffering -- same shape as partitioned data
Tensor tArA = make_fragment_like(tAsA); // (ACPY,ACPY_M,ACPY_K)
Tensor tBrB = make_fragment_like(tBsB); // (BCPY,BCPY_N,BCPY_K)
// Tile MMA compute thread partitions and allocate accumulators
TiledMma tiled_mma;
auto thr_mma = tiled_mma.get_thread_slice(thread_idx);
Tensor tCrA = thr_mma.partition_fragment_A(sA); // (MMA,MMA_M,MMA_K)
Tensor tCrB = thr_mma.partition_fragment_B(sB); // (MMA,MMA_M,MMA_K)
CUTE_STATIC_ASSERT_V(size<1>(tCrA) == size<1>(accum)); // MMA_M
CUTE_STATIC_ASSERT_V(size<1>(tCrA) == size<1>(src_accum)); // MMA_M
CUTE_STATIC_ASSERT_V(size<1>(tCrB) == size<2>(accum)); // MMA_N
CUTE_STATIC_ASSERT_V(size<1>(tCrB) == size<2>(src_accum)); // MMA_N
CUTE_STATIC_ASSERT_V(size<2>(tCrA) == size<2>(tCrB)); // MMA_K
//
// Copy Atom retiling
//
auto thr_copy_A = make_tiled_copy_A(SmemCopyAtomA{}, tiled_mma).get_thread_slice(thread_idx);
Tensor tCsA = thr_copy_A.partition_S(sA);
Tensor tCrA_copy_view = thr_copy_A.retile_D(tCrA);
CUTE_STATIC_ASSERT_V(size<1>(tCsA) == size<1>(tCrA_copy_view)); // M
auto thr_copy_B = make_tiled_copy_B(SmemCopyAtomB{}, tiled_mma).get_thread_slice(thread_idx);
Tensor tCsB = thr_copy_B.partition_S(sB);
Tensor tCrB_copy_view = thr_copy_B.retile_D(tCrB);
CUTE_STATIC_ASSERT_V(size<1>(tCsB) == size<1>(tCrB_copy_view)); // N
//
// Prologue
//
// Copy gmem to rmem for the first k_tile
copy(gmem_tiled_copy_a, tAgA(_,_,_,*k_tile_iter), tArA);
copy(gmem_tiled_copy_b, tBgB(_,_,_,*k_tile_iter), tBrB);
if (--k_tile_count > 0) ++k_tile_iter;
// Copy rmem to smem
copy(tArA, tAsA);
copy(tBrB, tBsB);
// Clear accumulators
__syncthreads();
// Load A, B smem->rmem for k=0
copy(tCsA(_,_,0), tCrA_copy_view(_,_,0));
copy(tCsB(_,_,0), tCrB_copy_view(_,_,0));
//
// Mainloop
//
// Size of the k-tiles's outer product mode (k)
auto K_BLOCK_MAX = size<2>(tCrA);
CUTLASS_PRAGMA_NO_UNROLL
while (k_tile_count > -1)
{
// Pipeline the outer products with a static for loop
for_each(make_int_sequence<K_BLOCK_MAX>{}, [&] (auto k_block)
{
if (k_block == K_BLOCK_MAX - 1)
{
__syncthreads();
// Copy rmem to smem
copy(tArA, tAsA);
copy(tBrB, tBsB);
__syncthreads();
}
// Load A, B smem->rmem for k+1
int k_block_next = (k_block + Int<1>{}) % K_BLOCK_MAX; // static
copy(tCsA(_,_,k_block_next), tCrA_copy_view(_,_,k_block_next));
copy(tCsB(_,_,k_block_next), tCrB_copy_view(_,_,k_block_next));
if (k_block == 0)
{
// Copy gmem to rmem
copy(gmem_tiled_copy_a, tAgA(_,_,_,*k_tile_iter), tArA);
copy(gmem_tiled_copy_b, tBgB(_,_,_,*k_tile_iter), tBrB);
if (--k_tile_count > 0) ++k_tile_iter;
}
// transform before compute
cute::transform(tCrA(_,_,k_block), TransformA{});
cute::transform(tCrB(_,_,k_block), TransformB{});
// Thread-level register gemm for k
// disambiguate gemm (shared with the namespace name)
cute::gemm(tiled_mma, accum, tCrA(_,_,k_block), tCrB(_,_,k_block), src_accum);
});
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
class TileShape_,
class ElementA_,
class StrideA_,
class ElementB_,
class StrideB_,
class TiledMma_,
class GmemTiledCopyA_,
class SmemLayoutAtomA_,
class SmemCopyAtomA_,
class TransformA_,
class GmemTiledCopyB_,
class SmemLayoutAtomB_,
class SmemCopyAtomB_,
class TransformB_>
struct CollectiveMma<
MainloopSm70TwoStage,
TileShape_,
ElementA_,
StrideA_,
ElementB_,
StrideB_,
TiledMma_,
GmemTiledCopyA_,
SmemLayoutAtomA_,
SmemCopyAtomA_,
TransformA_,
GmemTiledCopyB_,
SmemLayoutAtomB_,
SmemCopyAtomB_,
TransformB_>
{
//
// Type Aliases
//
using DispatchPolicy = MainloopSm70TwoStage;
using TileShape = TileShape_;
using ElementA = ElementA_;
using StrideA = StrideA_;
using ElementB = ElementB_;
using StrideB = StrideB_;
using TiledMma = TiledMma_;
using ElementAccumulator = typename TiledMma::ValTypeC;
using GmemTiledCopyA = GmemTiledCopyA_;
using GmemTiledCopyB = GmemTiledCopyB_;
using SmemLayoutAtomA = SmemLayoutAtomA_;
using SmemLayoutAtomB = SmemLayoutAtomB_;
using SmemCopyAtomA = SmemCopyAtomA_;
using SmemCopyAtomB = SmemCopyAtomB_;
using TransformA = TransformA_;
using TransformB = TransformB_;
using ArchTag = typename DispatchPolicy::ArchTag;
static_assert(cute::rank(SmemLayoutAtomA{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)");
static_assert((size<0>(TileShape{}) % size<0>(SmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert((size<2>(TileShape{}) % size<1>(SmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert(cute::rank(SmemLayoutAtomB{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)");
static_assert((size<1>(TileShape{}) % size<0>(SmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert((size<2>(TileShape{}) % size<1>(SmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
using SmemLayoutA = decltype(tile_to_shape(
SmemLayoutAtomA{},
make_shape(shape<0>(TileShape{}), shape<2>(TileShape{}))));
using SmemLayoutB = decltype(tile_to_shape(
SmemLayoutAtomB{},
make_shape(shape<1>(TileShape{}), shape<2>(TileShape{}))));
struct SharedStorage
{
cute::array_aligned<ElementA, cute::cosize_v<SmemLayoutA>> smem_a;
cute::array_aligned<ElementB, cute::cosize_v<SmemLayoutB>> smem_b;
};
// Host side kernel arguments
struct Arguments {
ElementA const* ptr_A;
StrideA dA;
ElementB const* ptr_B;
StrideB dB;
};
// Device side kernel params
using Params = Arguments;
//
// Methods
//
CollectiveMma() = default;
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(ProblemShape const& _, Arguments const& args, void* workspace) {
(void) workspace;
return args;
}
/// Perform a threadblock-scoped matrix multiply-accumulate
template <
class FrgTensorD,
class TensorA,
class TensorB,
class FrgTensorC,
class KTileIterator,
class ResidueMNK
>
CUTLASS_DEVICE void
operator() (
FrgTensorD &accum,
TensorA gA,
TensorB gB,
FrgTensorC const &src_accum,
KTileIterator k_tile_iter, int k_tile_count,
ResidueMNK residue_mnk,
int thread_idx,
char *smem_buf)
{
using namespace cute;
static_assert(is_rmem<FrgTensorD>::value, "D tensor must be rmem resident.");
static_assert(is_gmem<TensorA>::value, "A tensor must be gmem resident.");
static_assert(is_gmem<TensorB>::value, "B tensor must be gmem resident.");
static_assert(is_rmem<FrgTensorC>::value, "C tensor must be rmem resident.");
static_assert(cute::rank(SmemLayoutA{}) == 2,
"MainloopTwoStage must not have a smem shape with a pipeline mode.");
static_assert(cute::rank(SmemLayoutB{}) == 2,
"MainloopTwoStage must not have a smem shape with a pipeline mode.");
// Construct shared memory tiles
SharedStorage& storage = *reinterpret_cast<SharedStorage*>(smem_buf);
Tensor sA = make_tensor(make_smem_ptr(storage.smem_a.data()), SmemLayoutA{}); // (BLK_M,BLK_K,PIPE)
Tensor sB = make_tensor(make_smem_ptr(storage.smem_b.data()), SmemLayoutB{}); // (BLK_N,BLK_K,PIPE)
// Shift tensor so residue_k is at origin (Can't read any k_coord < residue_k)
// This aligns the tensor with BLK_K for all but the 0th k_tile
gA.data() = &gA(0, get<2>(residue_mnk), 0);
gB.data() = &gB(0, get<2>(residue_mnk), 0);
// Partition the copying of A and B tiles across the threads
GmemTiledCopyA gmem_tiled_copy_a;
GmemTiledCopyB gmem_tiled_copy_b;
auto gmem_thr_copy_a = gmem_tiled_copy_a.get_slice(thread_idx);
auto gmem_thr_copy_b = gmem_tiled_copy_b.get_slice(thread_idx);
Tensor tAgA = gmem_thr_copy_a.partition_S(gA); // (ACPY,ACPY_M,ACPY_K,k)
Tensor tAsA = gmem_thr_copy_a.partition_D(sA); // (ACPY,ACPY_M,ACPY_K,PIPE)
Tensor tBgB = gmem_thr_copy_b.partition_S(gB); // (BCPY,BCPY_N,BCPY_K,k)
Tensor tBsB = gmem_thr_copy_b.partition_D(sB); // (BCPY,BCPY_N,BCPY_K,PIPE)
// Allocate the register tiles for double buffering -- same shape as partitioned data
Tensor tArA = make_fragment_like(tAsA); // (ACPY,ACPY_M,ACPY_K)
Tensor tBrB = make_fragment_like(tBsB); // (BCPY,BCPY_N,BCPY_K)
//
// PREDICATES
//
// Allocate predicate tensors for m and n
Tensor tApA = make_tensor<bool>(make_shape(size<1>(tAsA), size<2>(tAsA)), Stride<_1,_0>{});
Tensor tBpB = make_tensor<bool>(make_shape(size<1>(tBsB), size<2>(tBsB)), Stride<_1,_0>{});
// Construct identity layout for sA and sB
Tensor cA = make_identity_tensor(make_shape(size<0>(sA), size<1>(sA))); // (BLK_M,BLK_K) -> (blk_m,blk_k)
Tensor cB = make_identity_tensor(make_shape(size<0>(sB), size<1>(sB))); // (BLK_N,BLK_K) -> (blk_n,blk_k)
// Repeat the partitioning with identity layouts
Tensor tAcA = gmem_thr_copy_a.partition_S(cA); // (ACPY,ACPY_M,ACPY_K) -> (blk_m,blk_k)
Tensor tBcB = gmem_thr_copy_b.partition_S(cB); // (BCPY,BCPY_N,BCPY_K) -> (blk_n,blk_k)
// Set predicates for m bounds
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < size<0>(tApA); ++m) {
tApA(m,0) = get<0>(tAcA(0,m,0)) < get<0>(residue_mnk); // blk_m coord < residue_m
}
// Set predicates for n bounds
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < size<0>(tBpB); ++n) {
tBpB(n,0) = get<0>(tBcB(0,n,0)) < get<1>(residue_mnk); // blk_n coord < residue_n
}
//
// PREFETCH
//
// Clear the rmem tiles to account for predicated off loads
clear(tArA);
clear(tBrB);
// Start async loads for 0th k-tile, where we take care of the k residue
{
Tensor tAgAk = tAgA(_,_,_,*k_tile_iter);
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < size<2>(tArA); ++k) {
if (get<1>(tAcA(0,0,k)) >= -get<2>(residue_mnk)) { // blk_k coord < residue_k (gA shifted)
copy_if(gmem_tiled_copy_a, tApA(_,k), tAgAk(_,_,k), tArA(_,_,k));
}
}
Tensor tBgBk = tBgB(_,_,_,*k_tile_iter);
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < size<2>(tBrB); ++k) {
if (get<1>(tBcB(0,0,k)) >= -get<2>(residue_mnk)) { // blk_k coord < residue_k (gB shifted)
copy_if(gmem_tiled_copy_b, tBpB(_,k), tBgBk(_,_,k), tBrB(_,_,k));
}
}
++k_tile_iter;
--k_tile_count;
}
// Tile MMA compute thread partitions and allocate accumulators
TiledMma tiled_mma;
auto thr_mma = tiled_mma.get_thread_slice(thread_idx);
Tensor tCrA = thr_mma.make_fragment_A(thr_mma.partition_A(sA)); // (MMA,MMA_M,MMA_K)
Tensor tCrB = thr_mma.make_fragment_B(thr_mma.partition_B(sB)); // (MMA,MMA_M,MMA_K)
CUTE_STATIC_ASSERT_V(size<1>(tCrA) == size<1>(accum)); // MMA_M
CUTE_STATIC_ASSERT_V(size<1>(tCrA) == size<1>(src_accum)); // MMA_M
CUTE_STATIC_ASSERT_V(size<1>(tCrB) == size<2>(accum)); // MMA_N
CUTE_STATIC_ASSERT_V(size<1>(tCrB) == size<2>(src_accum)); // MMA_N
CUTE_STATIC_ASSERT_V(size<2>(tCrA) == size<2>(tCrB)); // MMA_K
//
// Copy Atom retiling
//
auto thr_copy_A = make_tiled_copy_A(SmemCopyAtomA{}, tiled_mma).get_thread_slice(thread_idx);
Tensor tCsA = thr_copy_A.partition_S(sA);
Tensor tCrA_copy_view = thr_copy_A.retile_D(tCrA);
CUTE_STATIC_ASSERT_V(size<1>(tCsA) == size<1>(tCrA_copy_view)); // M
auto thr_copy_B = make_tiled_copy_B(SmemCopyAtomB{}, tiled_mma).get_thread_slice(thread_idx);
Tensor tCsB = thr_copy_B.partition_S(sB);
Tensor tCrB_copy_view = thr_copy_B.retile_D(tCrB);
CUTE_STATIC_ASSERT_V(size<1>(tCsB) == size<1>(tCrB_copy_view)); // N
//
// Prologue
//
// Copy rmem to smem
copy(tArA, tAsA);
copy(tBrB, tBsB);
// Clear accumulators
__syncthreads();
// Load A, B smem->rmem for k=0
copy(tCsA(_,_,0), tCrA_copy_view(_,_,0));
copy(tCsB(_,_,0), tCrB_copy_view(_,_,0));
//
// Mainloop
//
// Size of the k-tiles's outer product mode (k)
auto K_BLOCK_MAX = size<2>(tCrA);
CUTLASS_PRAGMA_NO_UNROLL
while (k_tile_count > -1)
{
// Pipeline the outer products with a static for loop
for_each(make_int_sequence<K_BLOCK_MAX>{}, [&] (auto k_block)
{
if (k_block == K_BLOCK_MAX - 1)
{
__syncthreads();
// Copy rmem to smem
copy(tArA, tAsA);
copy(tBrB, tBsB);
__syncthreads();
}
// Load A, B smem->rmem for k+1
int k_block_next = (k_block + Int<1>{}) % K_BLOCK_MAX; // static
copy(tCsA(_,_,k_block_next), tCrA_copy_view(_,_,k_block_next));
copy(tCsB(_,_,k_block_next), tCrB_copy_view(_,_,k_block_next));
if (k_block == 0)
{
if (k_tile_count <= 0) {
clear(tApA);
clear(tBpB);
}
copy_if(gmem_tiled_copy_a, tApA, tAgA(_,_,_,*k_tile_iter), tArA);
copy_if(gmem_tiled_copy_b, tBpB, tBgB(_,_,_,*k_tile_iter), tBrB);
++k_tile_iter;
--k_tile_count;
}
// transform before compute
cute::transform(tCrA(_,_,k_block), TransformA{});
cute::transform(tCrB(_,_,k_block), TransformB{});
// Thread-level register gemm for k
// disambiguate gemm (shared with the namespace name)
cute::gemm(tiled_mma, accum, tCrA(_,_,k_block), tCrB(_,_,k_block), src_accum);
});
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::gemm::collective
/////////////////////////////////////////////////////////////////////////////////////////////////
|
cutlass/include/cutlass/gemm/collective/sm70_mma_twostage.hpp/0
|
{
"file_path": "cutlass/include/cutlass/gemm/collective/sm70_mma_twostage.hpp",
"repo_id": "cutlass",
"token_count": 10191
}
| 35 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level RankK definitions combine threadblock-scoped matrix multiply-add with
the appropriate threadblock-scoped epilogue.
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/arch/wmma.h"
#include "cutlass/epilogue/threadblock/epilogue.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/kernel/rank_k_universal.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm80.h"
#include "cutlass/gemm/threadblock/default_mma.h"
#include "cutlass/gemm/threadblock/default_multistage_mma_complex.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/epilogue/threadblock/default_epilogue_complex_tensor_op_blas3.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
#include "cutlass/epilogue/threadblock/default_epilogue_wmma_tensor_op.h"
#endif //CUTLASS_ARCH_WMMA_ENABLED
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC_,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Operation performed by GEMM
typename Operator,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Blas3 computation mode
BlasMode BlasMode_ = BlasMode::kSymmetric>
struct DefaultRankKComplex;
////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <
/// Layout type for A matrix operand
typename LayoutA_,
/// Complex elementwise transformation
ComplexTransform TransformA,
/// Blas3 computation mode (symmetric/hermitian)
BlasMode BlasMode_
> struct RankKTransposedComplexTransform {
static ComplexTransform const kTransformA = TransformA;
static ComplexTransform const kTransformB = TransformA;
};
// partial specializations for HERK CUBLAS_OP_N layout (ColumMajor)
template <>
struct RankKTransposedComplexTransform <
layout::ColumnMajor,
ComplexTransform::kNone,
BlasMode::kHermitian> {
static ComplexTransform const kTransformA = ComplexTransform::kConjugate;
static ComplexTransform const kTransformB = ComplexTransform::kNone;
};
// partial specializations for HERK CUBLAS_OP_C layout (RowMajor + Complex conjugate)
template <>
struct RankKTransposedComplexTransform <
layout::RowMajor,
ComplexTransform::kConjugate,
BlasMode::kHermitian> {
static ComplexTransform const kTransformA = ComplexTransform::kNone;
static ComplexTransform const kTransformB = ComplexTransform::kConjugate;
};
}
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Hopper Architecture complex datatype (symmetric)
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Element type for C and D matrix operands
typename ElementC,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Operation performed by GEMM
typename Operator,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial>
struct DefaultRankKComplex<
ElementA, LayoutA, ElementC,
layout::RowMajor, FillModeC, ElementAccumulator, arch::OpClassTensorOp,
arch::Sm90, ThreadblockShape, WarpShape, InstructionShape,
EpilogueOutputOp, ThreadblockSwizzle, Stages,
TransformA, Operator, SplitKSerial, BlasMode::kSymmetric> {
static BlasMode const kBlasMode = BlasMode::kSymmetric;
/// Define the threadblock-scoped matrix multiply-accumulate (A x B^T)
using Mma = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplex<
ElementA, LayoutA,
ElementA, typename layout::LayoutTranspose<LayoutA>::type,
ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm90,
ThreadblockShape, WarpShape, InstructionShape, Stages,
TransformA, TransformA, Operator>::ThreadblockMma;
/// Define the epilogue
using Epilogue =
typename cutlass::epilogue::threadblock::DefaultEpilogueComplexTensorOpBlas3<
ThreadblockShape, typename Mma::Operator, 1, EpilogueOutputOp,
EpilogueOutputOp::kCount, Operator, kBlasMode>::Epilogue;
/// Define the kernel-level RankK operator.
using RankKkernel = kernel::RankKUniversal<Mma, Epilogue, ThreadblockSwizzle, FillModeC>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Hopper Architecture complex datatype (hermitian)
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Element type for C and D matrix operands
typename ElementC,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Operation performed by GEMM
typename Operator,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial>
struct DefaultRankKComplex<
ElementA, LayoutA, ElementC,
layout::RowMajor, FillModeC, ElementAccumulator, arch::OpClassTensorOp,
arch::Sm90, ThreadblockShape, WarpShape, InstructionShape,
EpilogueOutputOp, ThreadblockSwizzle, Stages,
TransformA, Operator, SplitKSerial, BlasMode::kHermitian> {
static BlasMode const kBlasMode = BlasMode::kHermitian;
// Complex transform for input A and B matrices (function on input layout)
static ComplexTransform const kTransformA = TransformA;
using TransposedComplexTransform = detail::RankKTransposedComplexTransform<
LayoutA,
TransformA,
kBlasMode>;
// Complex transform on operandA and operandB (function of blas3 computation)
static ComplexTransform const kTransformOperandA = TransposedComplexTransform::kTransformA;
static ComplexTransform const kTransformOperandB = TransposedComplexTransform::kTransformB;
/// Define the threadblock-scoped matrix multiply-accumulate (A x A^H)
using Mma = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplex<
ElementA, LayoutA,
ElementA, typename layout::LayoutTranspose<LayoutA>::type,
ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm90,
ThreadblockShape, WarpShape, InstructionShape, Stages,
kTransformOperandA, kTransformOperandB, Operator>::ThreadblockMma;
/// Define the epilogue
using Epilogue =
typename cutlass::epilogue::threadblock::DefaultEpilogueComplexTensorOpBlas3<
ThreadblockShape, typename Mma::Operator, 1, EpilogueOutputOp,
EpilogueOutputOp::kCount, Operator, kBlasMode>::Epilogue;
/// Define the kernel-level RankK operator.
using RankKkernel = kernel::RankKUniversal<Mma, Epilogue, ThreadblockSwizzle, FillModeC>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Ampere Architecture complex datatype (symmetric)
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Element type for C and D matrix operands
typename ElementC,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Operation performed by GEMM
typename Operator,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial>
struct DefaultRankKComplex<
ElementA, LayoutA, ElementC,
layout::RowMajor, FillModeC, ElementAccumulator, arch::OpClassTensorOp,
arch::Sm80, ThreadblockShape, WarpShape, InstructionShape,
EpilogueOutputOp, ThreadblockSwizzle, Stages,
TransformA, Operator, SplitKSerial, BlasMode::kSymmetric> {
static BlasMode const kBlasMode = BlasMode::kSymmetric;
/// Define the threadblock-scoped matrix multiply-accumulate (A x B^T)
using Mma = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplex<
ElementA, LayoutA,
ElementA, typename layout::LayoutTranspose<LayoutA>::type,
ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80,
ThreadblockShape, WarpShape, InstructionShape, Stages,
TransformA, TransformA, Operator>::ThreadblockMma;
/// Define the epilogue
using Epilogue =
typename cutlass::epilogue::threadblock::DefaultEpilogueComplexTensorOpBlas3<
ThreadblockShape, typename Mma::Operator, 1, EpilogueOutputOp,
EpilogueOutputOp::kCount, Operator, kBlasMode>::Epilogue;
/// Define the kernel-level RankK operator.
using RankKkernel = kernel::RankKUniversal<Mma, Epilogue, ThreadblockSwizzle, FillModeC>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Ampere Architecture complex datatype (hermitian)
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Element type for C and D matrix operands
typename ElementC,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Operation performed by GEMM
typename Operator,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial>
struct DefaultRankKComplex<
ElementA, LayoutA, ElementC,
layout::RowMajor, FillModeC, ElementAccumulator, arch::OpClassTensorOp,
arch::Sm80, ThreadblockShape, WarpShape, InstructionShape,
EpilogueOutputOp, ThreadblockSwizzle, Stages,
TransformA, Operator, SplitKSerial, BlasMode::kHermitian> {
static BlasMode const kBlasMode = BlasMode::kHermitian;
// Complex transform for input A and B matrices (function on input layout)
static ComplexTransform const kTransformA = TransformA;
using TransposedComplexTransform = detail::RankKTransposedComplexTransform<
LayoutA,
TransformA,
kBlasMode>;
// Complex transform on operandA and operandB (function of blas3 computation)
static ComplexTransform const kTransformOperandA = TransposedComplexTransform::kTransformA;
static ComplexTransform const kTransformOperandB = TransposedComplexTransform::kTransformB;
/// Define the threadblock-scoped matrix multiply-accumulate (A x A^H)
using Mma = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplex<
ElementA, LayoutA,
ElementA, typename layout::LayoutTranspose<LayoutA>::type,
ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80,
ThreadblockShape, WarpShape, InstructionShape, Stages,
kTransformOperandA, kTransformOperandB, Operator>::ThreadblockMma;
/// Define the epilogue
using Epilogue =
typename cutlass::epilogue::threadblock::DefaultEpilogueComplexTensorOpBlas3<
ThreadblockShape, typename Mma::Operator, 1, EpilogueOutputOp,
EpilogueOutputOp::kCount, Operator, kBlasMode>::Epilogue;
/// Define the kernel-level RankK operator.
using RankKkernel = kernel::RankKUniversal<Mma, Epilogue, ThreadblockSwizzle, FillModeC>;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
|
cutlass/include/cutlass/gemm/kernel/default_rank_k_complex.h/0
|
{
"file_path": "cutlass/include/cutlass/gemm/kernel/default_rank_k_complex.h",
"repo_id": "cutlass",
"token_count": 5386
}
| 36 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/gemm/gemm.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
namespace detail
{
template<typename ElementAlphaBeta, bool BetaIsZero>
struct GemvBatchedStridedEpilogueScaling
{
ElementAlphaBeta const & alpha;
ElementAlphaBeta const & beta;
CUTLASS_DEVICE
GemvBatchedStridedEpilogueScaling(ElementAlphaBeta& alpha_, ElementAlphaBeta& beta_) :
alpha(alpha_), beta(beta_)
{ }
template<typename FragmentCD, typename FragmentAccumulator>
CUTLASS_DEVICE
void operator()(FragmentAccumulator& accumulators,
FragmentCD const& fragment_C,
FragmentCD& fragment_D) const
{
using AccType = typename FragmentAccumulator::value_type;
using CDType = typename FragmentCD::value_type;
static_assert(FragmentCD::kElements == FragmentAccumulator::kElements,
"Mistmatch in fragment sizes.");
for (int i = 0; i < FragmentCD::kElements; ++i)
{
if (BetaIsZero)
{
fragment_D[i] = CDType(accumulators[i] * AccType(alpha));
}
else
{
fragment_D[i] = CDType(accumulators[i] * AccType(alpha)
+ AccType(fragment_C[i]) * AccType(beta));
}
}
}
};
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename GemvKernel, typename ElementAlphaBeta, bool BetaIsZero=false>
CUTLASS_DEVICE void GemvBatchedStridedDevice(
cutlass::gemm::BatchedGemmCoord problem_size,
ElementAlphaBeta alpha,
ElementAlphaBeta beta,
typename GemvKernel::IteratorA::TensorRef ref_A,
typename GemvKernel::IteratorA::TensorRef::LongIndex lda,
typename GemvKernel::IteratorB::TensorRef ref_B,
typename GemvKernel::IteratorB::TensorRef::LongIndex ldb,
typename GemvKernel::IteratorCD::TensorRef ref_C,
typename GemvKernel::IteratorCD::TensorRef::LongIndex ldc,
typename GemvKernel::IteratorCD::TensorRef ref_D,
typename GemvKernel::IteratorCD::TensorRef::LongIndex ldd)
{
using ThreadBlockGemv = typename GemvKernel::ThreadBlockGemv;
using ThreadBlockSwizzle = typename GemvKernel::ThreadBlockSwizzle;
using EpilogueScale = detail::GemvBatchedStridedEpilogueScaling<ElementAlphaBeta, BetaIsZero>;
ThreadBlockSwizzle swizzler;
// Compute initial location in logical coordinates
BatchedGemmCoord tb_offset = swizzler.get_tile_offset();
int const batch_idx = swizzler.get_batch_idx();
// Offset to the batch
ref_A.add_pointer_offset(batch_idx*lda);
ref_B.add_pointer_offset(batch_idx*ldb);
// Construct iterators to A and B operands
typename GemvKernel::IteratorA::Params params_A(ref_A.layout());
typename GemvKernel::IteratorA iterator_A(
params_A,
ref_A.data(),
{ 1, problem_size.k() },
0,
{ 0, 0 });
typename GemvKernel::IteratorB::Params params_B(ref_B.layout());
typename GemvKernel::IteratorB iterator_B(
params_B,
ref_B.data(),
{ problem_size.k(), problem_size.n() },
threadIdx.x,
{ 0, tb_offset.n()*ThreadBlockGemv::Shape::kN });
//
// Main loop
//
// Construct thread-scoped matrix multiply
ThreadBlockGemv mma;
typename ThreadBlockGemv::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped gemv
mma(problem_size.mnk(), accumulators, iterator_A, iterator_B, accumulators);
//
// Epilogue
//
typename GemvKernel::FragmentCD fragment_CD;
// Load C (skip if beta is zero)
if (!BetaIsZero)
{
tb_offset = swizzler.get_tile_offset();
ref_C.add_pointer_offset(batch_idx*ldc);
typename GemvKernel::IteratorCD::Params params_C(ref_C.layout());
typename GemvKernel::IteratorCD iterator_C(
params_C,
ref_C.data(),
{ 1, problem_size.n() },
threadIdx.x,
{ 0, tb_offset.n()*ThreadBlockGemv::Shape::kN });
iterator_C.load(fragment_CD);
}
// Apply alpha/beta scaling
EpilogueScale epilogue_scale(alpha, beta);
epilogue_scale(accumulators, fragment_CD, fragment_CD);
// Store D
tb_offset = swizzler.get_tile_offset();
ref_D.add_pointer_offset(batch_idx*ldd);
typename GemvKernel::IteratorCD::Params params_D(ref_D.layout());
typename GemvKernel::IteratorCD iterator_D(
params_D,
ref_D.data(),
{ 1, problem_size.n() },
threadIdx.x,
{ 0, tb_offset.n()*ThreadBlockGemv::Shape::kN });
iterator_D.store(fragment_CD);
}
template <typename GemvKernel, typename ElementAlphaBeta, bool BetaIsZero>
CUTLASS_GLOBAL void GemvBatchedStrided(
cutlass::gemm::BatchedGemmCoord problem_size,
ElementAlphaBeta alpha,
ElementAlphaBeta beta,
typename GemvKernel::IteratorA::TensorRef ref_A,
typename GemvKernel::IteratorA::TensorRef::LongIndex lda,
typename GemvKernel::IteratorB::TensorRef ref_B,
typename GemvKernel::IteratorB::TensorRef::LongIndex ldb,
typename GemvKernel::IteratorCD::TensorRef ref_C,
typename GemvKernel::IteratorCD::TensorRef::LongIndex ldc,
typename GemvKernel::IteratorCD::TensorRef ref_D,
typename GemvKernel::IteratorCD::TensorRef::LongIndex ldd)
{
GemvBatchedStridedDevice<GemvKernel, ElementAlphaBeta, BetaIsZero>(
problem_size, alpha, beta, ref_A, lda, ref_B, ldb, ref_C, ldc, ref_D, ldd
);
}
template <typename GemvKernel, typename ElementAlphaBeta>
CUTLASS_GLOBAL void GemvBatchedStrided(
cutlass::gemm::BatchedGemmCoord problem_size,
ElementAlphaBeta alpha,
typename GemvKernel::IteratorA::TensorRef ref_A,
typename GemvKernel::IteratorA::TensorRef::LongIndex lda,
typename GemvKernel::IteratorB::TensorRef ref_B,
typename GemvKernel::IteratorB::TensorRef::LongIndex ldb,
typename GemvKernel::IteratorCD::TensorRef ref_D,
typename GemvKernel::IteratorCD::TensorRef::LongIndex ldd)
{
GemvBatchedStridedDevice<GemvKernel, ElementAlphaBeta, true>(
problem_size, alpha, ElementAlphaBeta(0), ref_A, lda, ref_B, ldb, ref_D, ldd, ref_D, ldd
);
}
template <typename GemvKernel>
CUTLASS_GLOBAL void GemvBatchedStrided(
cutlass::gemm::BatchedGemmCoord problem_size,
typename GemvKernel::IteratorA::TensorRef ref_A,
typename GemvKernel::IteratorA::TensorRef::LongIndex lda,
typename GemvKernel::IteratorB::TensorRef ref_B,
typename GemvKernel::IteratorB::TensorRef::LongIndex ldb,
typename GemvKernel::IteratorCD::TensorRef ref_D,
typename GemvKernel::IteratorCD::TensorRef::LongIndex ldd)
{
using ElementAlphaBeta = typename GemvKernel::IteratorCD::Element;
GemvBatchedStridedDevice<GemvKernel, ElementAlphaBeta, true>(
problem_size, ElementAlphaBeta(1), ElementAlphaBeta(0), ref_A, lda, ref_B, ldb, ref_D, ldd, ref_D, ldd
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
|
cutlass/include/cutlass/gemm/kernel/gemv_batched_strided.h/0
|
{
"file_path": "cutlass/include/cutlass/gemm/kernel/gemv_batched_strided.h",
"repo_id": "cutlass",
"token_count": 3192
}
| 37 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/kernel_hardware_info.hpp"
#include "cute/arch/cluster_sm90.hpp"
#include "cutlass/arch/reg_reconfig.h"
#include "cutlass/arch/mma_sm90.h"
#include "cutlass/epilogue/collective/detail.hpp"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/dispatch_policy.hpp"
#include "cutlass/gemm/kernel/tile_scheduler.hpp"
#include "cutlass/pipeline/pipeline.hpp"
#include "cute/tensor.hpp"
///////////////////////////////////////////////////////////////////////////////
namespace cutlass::gemm::kernel {
///////////////////////////////////////////////////////////////////////////////
template <
class ProblemShape_,
class CollectiveMainloop_,
class CollectiveEpilogue_,
class TileScheduler_
>
class GemmUniversal<
ProblemShape_,
CollectiveMainloop_,
CollectiveEpilogue_,
TileScheduler_,
cute::enable_if_t<cute::is_base_of_v<KernelCpAsyncWarpSpecializedCooperative, typename CollectiveMainloop_::DispatchPolicy::Schedule>>>
{
public:
//
// Type Aliases
//
using ProblemShape = ProblemShape_;
static_assert(cute::rank(ProblemShape{}) == 3 or cute::rank(ProblemShape{}) == 4,
"ProblemShape{} should be <M,N,K> or <M,N,K,L>");
// Mainloop derived types
using CollectiveMainloop = CollectiveMainloop_;
using TileShape = typename CollectiveMainloop::TileShape;
using TiledMma = typename CollectiveMainloop::TiledMma;
using ArchTag = typename CollectiveMainloop::ArchTag;
using ElementA = typename CollectiveMainloop::ElementA;
using StrideA = typename CollectiveMainloop::StrideA;
using ElementB = typename CollectiveMainloop::ElementB;
using StrideB = typename CollectiveMainloop::StrideB;
using DispatchPolicy = typename CollectiveMainloop::DispatchPolicy;
using ElementAccumulator = typename CollectiveMainloop::ElementAccumulator;
using ClusterShape = typename DispatchPolicy::ClusterShape;
using MainloopArguments = typename CollectiveMainloop::Arguments;
using MainloopParams = typename CollectiveMainloop::Params;
static_assert(ArchTag::kMinComputeCapability >= 90);
// Epilogue derived types
using CollectiveEpilogue = CollectiveEpilogue_;
using ElementC = typename CollectiveEpilogue::ElementC;
using StrideC = typename CollectiveEpilogue::StrideC;
using ElementD = typename CollectiveEpilogue::ElementD;
using StrideD = typename CollectiveEpilogue::StrideD;
using EpilogueArguments = typename CollectiveEpilogue::Arguments;
using EpilogueParams = typename CollectiveEpilogue::Params;
using TileSchedulerTag = TileScheduler_;
using TileScheduler = typename detail::TileSchedulerSelector<
TileScheduler_, ArchTag, TileShape, ClusterShape>::Scheduler;
using TileSchedulerArguments = typename TileScheduler::Arguments;
using TileSchedulerParams = typename TileScheduler::Params;
using GmemTiledCopyA = typename CollectiveMainloop::GmemTiledCopyA;
using GmemTiledCopyB = typename CollectiveMainloop::GmemTiledCopyB;
static_assert(cute::size(GmemTiledCopyA{}) == cute::size(GmemTiledCopyB{}), "Number of threads in A/B tiled copies must be the same");
static constexpr uint32_t NumLoadWarpGroups = cute::size(GmemTiledCopyA{}) / NumThreadsPerWarpGroup;
static constexpr uint32_t NumMmaWarpGroups = cute::size(TiledMma{}) / NumThreadsPerWarpGroup;
static constexpr uint32_t NumWarpGroups = NumLoadWarpGroups + NumMmaWarpGroups;
static_assert(NumWarpGroups == 2 || NumWarpGroups == 3, "Number of warp groups must be 2 or 3 for good performance.");
static constexpr uint32_t MaxThreadsPerBlock = NumWarpGroups * NumThreadsPerWarpGroup;
static constexpr uint32_t MinBlocksPerMultiprocessor = 1;
// Kernel level shared memory storage
struct SharedStorage {
struct TensorStorage : cute::aligned_struct<128> {
using MainloopTensorStorage = typename CollectiveMainloop::TensorStorage;
using EpilogueTensorStorage = typename CollectiveEpilogue::TensorStorage;
MainloopTensorStorage mainloop;
EpilogueTensorStorage epilogue;
} tensors;
struct PipelineStorage : cute::aligned_struct<16> {
using MainloopPipelineStorage = typename CollectiveMainloop::PipelineStorage;
using EpiLoadPipelineStorage = typename CollectiveEpilogue::PipelineStorage;
alignas(16) MainloopPipelineStorage mainloop;
alignas(16) EpiLoadPipelineStorage epi_load;
} pipelines;
};
static constexpr int SharedStorageSize = sizeof(SharedStorage);
// Device side arguments
struct Arguments {
GemmUniversalMode mode{};
ProblemShape problem_shape{};
MainloopArguments mainloop{};
EpilogueArguments epilogue{};
KernelHardwareInfo hw_info{};
TileSchedulerArguments scheduler{};
};
// Kernel entry point API
struct Params {
GemmUniversalMode mode{};
ProblemShape problem_shape{};
MainloopParams mainloop{};
EpilogueParams epilogue{};
KernelHardwareInfo hw_info{};
TileSchedulerParams scheduler{};
};
//
// Methods
//
// Convert to underlying arguments. In this case, a simple copy for the aliased type.
static
Params
to_underlying_arguments(Arguments const& args, void* workspace) {
CUTLASS_TRACE_HOST("to_underlying_arguments():");
auto problem_shape = args.problem_shape;
if constexpr (detail::IF_SWAP_AB<CollectiveMainloop>::value) {
// swap M/N
get<0>(problem_shape) = get<1>(args.problem_shape);
get<1>(problem_shape) = get<0>(args.problem_shape);
}
auto problem_shape_MNKL = append<4>(problem_shape, 1);
// Get SM count if needed, otherwise use user supplied SM count
int sm_count = args.hw_info.sm_count;
if (sm_count <= 0) {
CUTLASS_TRACE_HOST(" WARNING: Arguments do not include a valid SM count.\n"
" For optimal performance, populate the arguments KernelHardwareInfo struct with the SM count.");
sm_count = KernelHardwareInfo::query_device_multiprocessor_count(args.hw_info.device_id);
}
CUTLASS_TRACE_HOST("to_underlying_arguments(): Setting persistent grid SM count to " << sm_count);
KernelHardwareInfo hw_info{args.hw_info.device_id, sm_count};
TileSchedulerParams scheduler = TileScheduler::to_underlying_arguments(
problem_shape_MNKL, TileShape{}, ClusterShape{}, hw_info, args.scheduler, workspace);
return {
args.mode,
problem_shape,
CollectiveMainloop::to_underlying_arguments(args.problem_shape, args.mainloop, workspace),
CollectiveEpilogue::to_underlying_arguments(args.problem_shape, args.epilogue, workspace),
hw_info,
scheduler
};
}
CUTLASS_HOST_DEVICE static
bool
can_implement(Arguments const& args) {
bool implementable = (args.mode == GemmUniversalMode::kGemm) or
(args.mode == GemmUniversalMode::kBatched && cute::rank(ProblemShape{}) == 4);
if (!implementable) {
CUTLASS_TRACE_HOST(" CAN IMPLEMENT: Arguments or Problem Shape don't meet the requirements.\n");
return implementable;
}
implementable &= CollectiveMainloop::can_implement(args.problem_shape, args.mainloop);
implementable &= CollectiveEpilogue::can_implement(args.problem_shape, args.epilogue);
implementable &= TileScheduler::can_implement(args.scheduler);
return implementable;
}
static
size_t
get_workspace_size(Arguments const& args) {
TileScheduler t;
return t.template get_workspace_size<ProblemShape, ElementAccumulator>(
args.scheduler, args.problem_shape, args.hw_info, NumMmaWarpGroups);
}
static
cutlass::Status
initialize_workspace(Arguments const& args, void* workspace = nullptr, cudaStream_t stream = nullptr,
CudaHostAdapter* cuda_adapter = nullptr) {
TileScheduler t;
return t.template initialize_workspace<ProblemShape, ElementAccumulator>(
args.scheduler, workspace, stream, args.problem_shape, args.hw_info, NumMmaWarpGroups);
}
// Computes the kernel launch grid shape based on runtime parameters
static dim3
get_grid_shape(Params const& params) {
// Given device SM count, set grid size s.t. we do not launch more thread blocks than we can run concurrently
TileSchedulerArguments args{};
if constexpr (!std::is_const_v<decltype(args.max_swizzle_size)>) {
args.max_swizzle_size = 1 << params.scheduler.log_swizzle_size_;
}
return TileScheduler::get_grid_shape(params.problem_shape, TileShape{}, ClusterShape{}, params.hw_info, args);
}
static dim3
get_block_shape() {
return dim3(MaxThreadsPerBlock, 1, 1);
}
CUTLASS_DEVICE
void
operator()(Params const& params, char* smem_buf) {
using namespace cute;
using X = Underscore;
// Any Tensor Op MMA Atom in the WGMMA ISA is arch conditional to sm90a.
#if ! defined(__CUDA_ARCH_FEAT_SM90_ALL)
printf("ERROR : Arch conditional MMA instruction used without targeting sm90a compute capability. Aborting.\n");
#else
static_assert(cute::rank(StrideA{}) == 3, "StrideA must be rank-3: [M, K, L]. If batch mode is not needed, set L stride to Int<0>.");
static_assert(cute::rank(StrideB{}) == 3, "StrideB must be rank-3: [N, K, L]. If batch mode is not needed, set L stride to Int<0>.");
static_assert(cute::rank(StrideC{}) == 3, "StrideC must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>.");
static_assert(cute::rank(StrideD{}) == 3, "StrideD must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>.");
/* In the Cooperative kernel, one or multiple Consumers collaborate on the same tile */
enum class WarpGroupRole {
Producer = 0,
Consumer = 1,
};
// Kernel level shared memory storage
SharedStorage& shared_storage = *reinterpret_cast<SharedStorage*>(smem_buf);
int thread_idx = int(threadIdx.x);
int mma_thread_idx = thread_idx % size(TiledMma{});
int warp_group_thread_idx = thread_idx % NumThreadsPerWarpGroup;
int warp_group_idx = canonical_warp_group_idx();
CUTLASS_ASSERT(warp_group_idx < NumWarpGroups);
WarpGroupRole warp_group_role = warp_group_idx < NumLoadWarpGroups ? WarpGroupRole::Producer : WarpGroupRole::Consumer;
// Mainloop Load pipeline
using MainloopPipeline = typename CollectiveMainloop::MainloopPipeline;
typename MainloopPipeline::Params mainloop_pipeline_params;
if (warp_group_role == WarpGroupRole::Producer) {
mainloop_pipeline_params.role = MainloopPipeline::ThreadCategory::Producer;
}
if (warp_group_role == WarpGroupRole::Consumer) {
mainloop_pipeline_params.role = MainloopPipeline::ThreadCategory::Consumer;
}
mainloop_pipeline_params.producer_arv_count = NumLoadWarpGroups * NumThreadsPerWarpGroup;
mainloop_pipeline_params.consumer_arv_count = NumMmaWarpGroups * NumThreadsPerWarpGroup;
MainloopPipeline mainloop_pipeline(shared_storage.pipelines.mainloop, mainloop_pipeline_params);
// Epilogue Load pipeline
using EpiLoadPipeline = typename CollectiveEpilogue::LoadPipeline;
typename EpiLoadPipeline::Params epi_load_pipeline_params;
if (warp_group_role == WarpGroupRole::Producer) {
epi_load_pipeline_params.role = EpiLoadPipeline::ThreadCategory::Producer;
}
if (warp_group_role == WarpGroupRole::Consumer) {
epi_load_pipeline_params.role = EpiLoadPipeline::ThreadCategory::Consumer;
}
epi_load_pipeline_params.producer_arv_count = NumLoadWarpGroups * NumThreadsPerWarpGroup;
epi_load_pipeline_params.consumer_arv_count = NumMmaWarpGroups * NumThreadsPerWarpGroup;
EpiLoadPipeline epi_load_pipeline(shared_storage.pipelines.epi_load, epi_load_pipeline_params);
// Epilogue Store pipeline
using EpiStorePipeline = typename CollectiveEpilogue::StorePipeline;
typename EpiStorePipeline::Params epi_store_pipeline_params;
epi_store_pipeline_params.always_wait = true;
EpiStorePipeline epi_store_pipeline(epi_store_pipeline_params);
// Initialize starting pipeline states for the collectives
// Epilogue store pipe is producer-only (consumer is TMA unit, waits via scoreboarding)
typename CollectiveMainloop::PipelineState mainloop_pipe_consumer_state;
typename CollectiveEpilogue::LoadPipelineState epi_load_pipe_consumer_state;
// For the DMA Load (producer) we start with an opposite phase
// i.e., we skip all waits since we know that the buffer is indeed empty
PipelineState mainloop_pipe_producer_state = cutlass::make_producer_start_state<MainloopPipeline>();
PipelineState epi_load_pipe_producer_state = cutlass::make_producer_start_state<EpiLoadPipeline>();
PipelineState epi_store_pipe_producer_state = cutlass::make_producer_start_state<EpiStorePipeline>();
// Separate out problem shape for convenience
// Optionally append 1s until problem shape is rank-4 in case its is only rank-3 (MNK)
auto problem_shape_MNKL = append<4>(params.problem_shape, Int<1>{});
auto M = get<0>(problem_shape_MNKL);
auto N = get<1>(problem_shape_MNKL);
auto K = get<2>(problem_shape_MNKL);
auto L = get<3>(problem_shape_MNKL);
// Represent the full tensors
Tensor mA_mkl = make_tensor(make_gmem_ptr(params.mainloop.ptr_A), make_shape(M,K,L), params.mainloop.dA); //(m,k,l)
Tensor mB_nkl = make_tensor(make_gmem_ptr(params.mainloop.ptr_B), make_shape(N,K,L), params.mainloop.dB); //(n,k,l)
// Get the appropriate blocks for this thread block -- potential for thread block locality
TiledMma tiled_mma;
auto blk_shape = TileShape{}; // (BLK_M,BLK_N,BLK_K)
// Make tiled views, defer the slice
Tensor gA_mkl = local_tile(mA_mkl, blk_shape, make_coord(_,_,_), Step<_1, X,_1>{}); // (BLK_M,BLK_K,m,k,l)
Tensor gB_nkl = local_tile(mB_nkl, blk_shape, make_coord(_,_,_), Step< X,_1,_1>{}); // (BLK_N,BLK_K,n,k,l)
TileScheduler scheduler{params.scheduler};
auto work_tile_info = scheduler.get_current_work();
// In a warp specialized kernel, collectives expose data movement and compute operations separately
CollectiveMainloop collective_mainloop;
CollectiveEpilogue collective_epilogue{params.epilogue, shared_storage.tensors.epilogue};
// Wait for all threads in the thread block
__syncthreads();
if (warp_group_role == WarpGroupRole::Producer) {
while (work_tile_info.is_valid()) {
// Compute m_coord, n_coord, l_coord with the post-tiled m-shape and n-shape
auto m_coord = idx2crd(work_tile_info.M_idx, shape<2>(gA_mkl));
auto n_coord = idx2crd(work_tile_info.N_idx, shape<2>(gB_nkl));
auto l_coord = idx2crd(work_tile_info.L_idx, shape<4>(gB_nkl));
auto blk_coord = make_coord(m_coord, n_coord, _, l_coord);
// Slice with our work tile coordinates to construct mainloop tensor views
Tensor gA = gA_mkl(_,_,m_coord,_,l_coord); // (BLK_M,BLK_K,k)
Tensor gB = gB_nkl(_,_,n_coord,_,l_coord); // (BLK_N,BLK_K,k)
// Get the number of K tiles to compute for this work as well as the starting K tile offset of the work.
auto work_k_tile_count = TileScheduler::get_work_k_tile_count(work_tile_info, problem_shape_MNKL, blk_shape);
auto work_k_tile_start = TileScheduler::get_work_k_tile_start(work_tile_info);
auto k_tile_iter = cute::make_coord_iterator(idx2crd(work_k_tile_start, shape<2>(gA)), shape<2>(gA));
// Compute tile residues for predication
auto m_max_coord = M - size<0>(gA) * get<0>(blk_coord); // M - BLK_M * m_coord
auto n_max_coord = N - size<0>(gB) * get<1>(blk_coord); // N - BLK_N * n_coord
auto k_residue = K - size<1>(gA) * size<2>(gA); // K - BLK_K * k_coord_max
auto residue_mnk = make_tuple(m_max_coord, n_max_coord, k_residue);
collective_mainloop.load(
mainloop_pipeline,
mainloop_pipe_producer_state,
gA,
gB,
k_tile_iter, work_k_tile_count,
residue_mnk,
thread_idx,
shared_storage.tensors.mainloop
);
// Update starting pipeline state for the next tile
mainloop_pipe_producer_state.advance(work_k_tile_count);
if (TileScheduler::compute_epilogue(work_tile_info, params.scheduler) &&
collective_epilogue.is_producer_load_needed()) {
epi_load_pipe_producer_state =
collective_epilogue.load(
epi_load_pipeline,
epi_load_pipe_producer_state,
problem_shape_MNKL,
blk_shape,
blk_coord,
tiled_mma,
warp_group_thread_idx,
shared_storage.tensors.epilogue
);
}
// Get next work tile
work_tile_info = fetch_next_work(work_tile_info, scheduler);
} // Scheduler work fetch loop
// Make sure all Consumer Warp Groups have been waited upon
collective_mainloop.load_tail(mainloop_pipeline, mainloop_pipe_producer_state);
if (collective_epilogue.is_producer_load_needed()) {
collective_epilogue.load_tail(epi_load_pipeline, epi_load_pipe_producer_state);
}
} // Producer Warp Group End
else if (warp_group_role == WarpGroupRole::Consumer) {
bool do_store_tail = false;
while (work_tile_info.is_valid()) {
// Compute m_coord, n_coord, l_coord with the post-tiled m-shape and n-shape
auto m_coord = idx2crd(work_tile_info.M_idx, shape<2>(gA_mkl));
auto n_coord = idx2crd(work_tile_info.N_idx, shape<2>(gB_nkl));
auto l_coord = idx2crd(work_tile_info.L_idx, shape<4>(gB_nkl));
auto blk_coord = make_coord(m_coord, n_coord, _, l_coord);
auto work_k_tile_count = TileScheduler::get_work_k_tile_count(work_tile_info, problem_shape_MNKL, blk_shape);
// Allocate the the accumulators for the (M,N) blk_shape
//
// MSVC CTAD breaks if we say "Tensor" here, so we use "auto" instead.
auto accumulators = partition_fragment_C(tiled_mma, take<0,2>(blk_shape)); // (MMA,MMA_M,MMA_N)
collective_mainloop.mma(
mainloop_pipeline,
mainloop_pipe_consumer_state,
accumulators,
work_k_tile_count,
mma_thread_idx,
shared_storage.tensors.mainloop,
params.mainloop
);
// Make sure the math instructions are done and free buffers before entering the epilogue
collective_mainloop.mma_tail(
mainloop_pipeline,
mainloop_pipe_consumer_state,
work_k_tile_count
);
// Update starting mainloop pipeline state for the next tile
mainloop_pipe_consumer_state.advance(work_k_tile_count);
// Index of warp group within consumer warp groups
int consumer_warp_group_idx = canonical_warp_group_idx() - NumLoadWarpGroups;
// Perform reduction across splits, if needed
TileScheduler::fixup(
params.scheduler, work_tile_info, accumulators, NumMmaWarpGroups, consumer_warp_group_idx);
if (TileScheduler::compute_epilogue(work_tile_info, params.scheduler)) {
// Epilogue and write to gD
auto [epi_load_pipe_consumer_state_next, epi_store_pipe_producer_state_next] =
collective_epilogue.store(
epi_load_pipeline,
epi_load_pipe_consumer_state,
epi_store_pipeline,
epi_store_pipe_producer_state,
problem_shape_MNKL,
blk_shape,
blk_coord,
accumulators,
tiled_mma,
mma_thread_idx,
shared_storage.tensors.epilogue
);
epi_load_pipe_consumer_state = epi_load_pipe_consumer_state_next;
epi_store_pipe_producer_state = epi_store_pipe_producer_state_next;
do_store_tail = true;
}
// Get next work tile
work_tile_info = fetch_next_work(work_tile_info, scheduler);
} // Scheduler work fetch loop
if (do_store_tail) {
collective_epilogue.store_tail(
epi_load_pipeline,
epi_load_pipe_consumer_state,
epi_store_pipeline,
epi_store_pipe_producer_state
);
}
} // Consumer Warp Groups End
#endif
}
private:
// Kernel helper function to get next work unit
CUTLASS_DEVICE
typename TileScheduler::WorkTileInfo
fetch_next_work(
typename TileScheduler::WorkTileInfo& work_tile_info,
TileScheduler& scheduler) const {
// Check whether we should continue on with the current work unit. If this is the case,
// the work unit will have been updated in continue_current_work to reflect the new
// tile to be computed.
if (scheduler.continue_current_work(work_tile_info)) {
return work_tile_info;
}
// Get next work tile
scheduler.advance_to_next_work();
return scheduler.get_current_work();
}
};
///////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::gemm::kernel
|
cutlass/include/cutlass/gemm/kernel/sm90_gemm_warpspecialized_cooperative.hpp/0
|
{
"file_path": "cutlass/include/cutlass/gemm/kernel/sm90_gemm_warpspecialized_cooperative.hpp",
"repo_id": "cutlass",
"token_count": 8859
}
| 38 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates exposing architecture support for multiply-add operations
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/thread/mma.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Gemplate that handles conventional layouts for IDP4A
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_
>
struct Mma<
Shape_,
int8_t,
layout::RowMajor,
int8_t,
layout::ColumnMajor,
int32_t,
LayoutC_,
arch::OpMultiplyAdd,
bool> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// Data type of operand A
using ElementA = int8_t;
/// Layout of A matrix (concept: layout::MapFunc)
using LayoutA = layout::RowMajor;
/// Data type of operand B
using ElementB = int8_t;
/// Layout of B matrix (concept: layout::MapFunc)
using LayoutB = layout::ColumnMajor;
/// Element type of operand C
using ElementC = int32_t;
/// Layout of C matrix (concept: layout::MapFunc)
using LayoutC = LayoutC_;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
/// A operand storage
using FragmentA = Array<ElementA, Shape::kMK>;
/// B operand storage
using FragmentB = Array<ElementB, Shape::kKN>;
/// C operand storage
using FragmentC = Array<ElementC, Shape::kMN>;
/// Underlying matrix multiply operator (concept: arch::Mma)
// Use 1x1x4 IDP4A sequence for bulk of computation
using ArchMmaOperator = arch::Mma<
gemm::GemmShape<1,1,4>,
1,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
arch::OpMultiplyAdd>;
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
TensorRef<ElementC, LayoutC> d(
reinterpret_cast<ElementC *>(&D), LayoutC::packed({ Shape::kM, Shape::kN }));
// Copy accumulators
D = C;
/// Use 1x1x4 IDP4A sequence for bulk of computation
ArchMmaOperator mma;
// Compute matrix product
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < Shape::kK / ArchMmaOperator::Shape::kK; ++k) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Shape::kN; ++n) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < Shape::kM; ++m) {
MatrixCoord mn(m, n);
Array<int8_t, 4> const *ptr_A = reinterpret_cast<Array<int8_t, 4> const *>(&A);
Array<int8_t, 4> const *ptr_B = reinterpret_cast<Array<int8_t, 4> const *>(&B);
Array<int32_t, 1> tmp = reinterpret_cast<Array<int32_t, 1> &>(d.at(mn));
mma(
tmp,
ptr_A[m * Shape::kK / ArchMmaOperator::Shape::kK + k],
ptr_B[n * Shape::kK / ArchMmaOperator::Shape::kK + k],
tmp);
d.at(mn) = reinterpret_cast<int32_t &>(tmp);
}
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Gemplate that handles conventional layouts for IDP4A
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_
>
struct Mma<
Shape_,
int8_t,
layout::ColumnMajor,
int8_t,
layout::RowMajor,
int32_t,
LayoutC_,
arch::OpMultiplyAdd,
int8_t> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// Data type of operand A
using ElementA = int8_t;
/// Layout of A matrix (concept: layout::MapFunc)
using LayoutA = layout::ColumnMajor;
/// Data type of operand B
using ElementB = int8_t;
/// Layout of B matrix (concept: layout::MapFunc)
using LayoutB = layout::RowMajor;
/// Element type of operand C
using ElementC = int32_t;
/// Layout of C matrix (concept: layout::MapFunc)
using LayoutC = LayoutC_;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
/// A operand storage
using FragmentA = Array<ElementA, Shape::kMK>;
/// B operand storage
using FragmentB = Array<ElementB, Shape::kKN>;
/// C operand storage
using FragmentC = Array<ElementC, Shape::kMN>;
/// Underlying matrix multiply operator (concept: arch::Mma)
/// Use 1x1x4 IDP4A sequence for bulk of computation
using ArchMmaOperator = arch::Mma<
gemm::GemmShape<1,1,4>,
1,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
arch::OpMultiplyAdd>;
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
TensorRef<ElementC, LayoutC> d(
reinterpret_cast<ElementC *>(&D), LayoutC::packed({ Shape::kM, Shape::kN }));
// Copy accumulators
D = C;
/// Underlying matrix multiply operator
ArchMmaOperator mma;
Array<int8_t, 4> const *ptr_A = reinterpret_cast<Array<int8_t, 4> const *>(&A);
Array<int8_t, 4> const *ptr_B = reinterpret_cast<Array<int8_t, 4> const *>(&B);
// Compute matrix product
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < Shape::kK / ArchMmaOperator::Shape::kK; ++k) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Shape::kN; ++n) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < Shape::kM; ++m) {
MatrixCoord mn(m, n);
Array<int32_t, 1> tmp = reinterpret_cast<Array<int32_t, 1> &>(d.at(mn));
mma(
tmp,
ptr_A[m + k * Shape::kM],
ptr_B[n + k * Shape::kN],
tmp);
d.at(mn) = reinterpret_cast<int32_t &>(tmp);
}
}
}
}
};
} // namespace thread
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
|
cutlass/include/cutlass/gemm/thread/mma_sm61.h/0
|
{
"file_path": "cutlass/include/cutlass/gemm/thread/mma_sm61.h",
"repo_id": "cutlass",
"token_count": 2984
}
| 39 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a double-buffered threadblock-scoped GEMM kernel.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/threadblock/mma_base.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Iterates over tiles of A operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator | MaskedTileIterator)
typename IteratorA_,
/// Iterates over tiles of A operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorA_,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator | MaskedTileIterator)
typename IteratorB_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB_,
/// Data type of accumulator matrix
typename ElementC_,
/// Data type of accumulator matrix
typename LayoutC_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// Transformation applied to A operand
typename TransformA_ = NumericArrayConverter<
typename SmemIteratorA_::Element,
typename IteratorA_::Element,
IteratorA_::Fragment::kElements>,
///
/// Transformation applied to B operand
typename TransformB_ = NumericArrayConverter<
typename SmemIteratorB_::Element,
typename IteratorB_::Element,
IteratorB_::Fragment::kElements>,
/// Used for partial specialization
typename Enable = bool
>
class MmaPipelined : public MmaBase<Shape_, Policy_, 2> {
public:
///< Base class
using Base = MmaBase<Shape_, Policy_, 2>;
using Shape = Shape_; ///< Size of the Gemm problem - concept: gemm::GemmShape<>
using IteratorA = IteratorA_; ///< Iterates over tiles of A operand in global memory
using IteratorB = IteratorB_; ///< Iterates over tiles of B operand in global memory
using ElementC = ElementC_; ///< Data type of accumulator matrix
using LayoutC = LayoutC_; ///< Layout of accumulator matrix
using Policy = Policy_; ///< Policy describing tuning details
using SmemIteratorA = SmemIteratorA_;
using SmemIteratorB = SmemIteratorB_;
using TransformA = TransformA_;
using TransformB = TransformB_;
//
// Dependent types
//
/// Fragment of operand A loaded from global memory
using FragmentA = typename IteratorA::Fragment;
/// Fragment of operand B loaded from global memory
using FragmentB = typename IteratorB::Fragment;
/// Fragment of accumulator tile
using FragmentC = typename Policy::Operator::FragmentC;
/// Warp-level Mma
using Operator = typename Policy::Operator;
/// Obtain the arch tag from the warp-level operator
using ArchTag = typename Policy::Operator::ArchTag;
/// Complex transform on A operand
static ComplexTransform const kTransformA = Operator::kTransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB = Operator::kTransformB;
// staticaly assert kStages for MmaPipelined is two (Double-buffered pipeline)
static_assert((Base::kStages==2), "MmaPipelined requires kStages set to value 2");
protected:
//
// Data members
//
/// Warp-level MMA operator
Operator warp_mma;
/// Iterator to write threadblock-scoped tile of A operand to shared memory
SmemIteratorA smem_iterator_A_;
/// Iterator to write threadblock-scoped tile of B operand to shared memory
SmemIteratorB smem_iterator_B_;
///< transformation applied to A fragment
TransformA transform_A_;
///< transformation applied to B fragment
TransformB transform_B_;
/// Shared memory write stage index
int smem_write_stage_idx;
public:
/// Construct from tensor references
CUTLASS_DEVICE
MmaPipelined(
typename Base::SharedStorage &shared_storage, ///< Shared storage needed for internal use by threadblock-scoped GEMM
int thread_idx, ///< ID within the threadblock
int warp_idx, ///< ID of warp
int lane_idx, ///< ID of each thread within a warp
TransformA transform_A = TransformA(), ///< transformation applied to A fragment
TransformB transform_B = TransformB() ///< transformation applied to B fragment
):
Base(shared_storage, thread_idx, warp_idx, lane_idx),
smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx),
smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx),
transform_A_(transform_A),
transform_B_(transform_B),
smem_write_stage_idx(0)
{
// Compute warp location within threadblock tile by mapping the warp_id to
// three coordinates:
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_m = warp_idx_mn % Base::WarpCount::kM;
int warp_idx_n = warp_idx_mn / Base::WarpCount::kM;
// Add per-warp offsets in units of warp-level tiles
this->warp_tile_iterator_A_.add_tile_offset({warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
this->warp_tile_iterator_B_.add_tile_offset({Base::kWarpGemmIterations * warp_idx_k, warp_idx_n});
}
/// Advance shared memory write-iterators to the next stage
CUTLASS_DEVICE
void advance_smem_write_stage()
{
++this->smem_iterator_A_;
++this->smem_iterator_B_;
// Add negative offsets to return iterators to the 'start' of the circular buffer in shared memory
if (smem_write_stage_idx == 1) {
this->smem_iterator_A_.add_tile_offset({0, -Base::kStages});
this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0});
}
smem_write_stage_idx ^= 1;
}
/// Advance shared memory read- and write-iterators to the next stage
CUTLASS_DEVICE
void advance_smem_stages()
{
++this->smem_iterator_A_;
++this->smem_iterator_B_;
// Add negative offsets to return iterators to the 'start' of the circular buffer in shared memory
if (smem_write_stage_idx == 1) {
// wrap write stage
this->smem_iterator_A_.add_tile_offset({0, -Base::kStages});
this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0});
}
else
{
// wrap read stage
this->warp_tile_iterator_A_.add_tile_offset(
{0, -Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations});
this->warp_tile_iterator_B_.add_tile_offset(
{-Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations, 0});
}
smem_write_stage_idx ^= 1;
}
/// GEMM prologue. Bootstrap the global->shared memory pipeline by fetching
/// the global fragments needed by the first kStages-1 threadblock mainloop iterations
CUTLASS_DEVICE
void prologue(
IteratorA &iterator_A, ///< [in|out] iterator over A operand in global memory
IteratorB &iterator_B, ///< [in|out] iterator over B operand in global memory
int &gemm_k_iterations) ///< [in|out] number of threadblock mainloop iterations remaining
{
// The last kblock is loaded in the prolog
// Load A fragment from global A
FragmentA tb_frag_A;
tb_frag_A.clear();
iterator_A.load(tb_frag_A);
++iterator_A;
// Load B fragment from global B
FragmentB tb_frag_B;
tb_frag_B.clear();
iterator_B.load(tb_frag_B);
++iterator_B;
// Store A and B fragments to shared
this->smem_iterator_A_.store(transform_A_(tb_frag_A));
this->smem_iterator_B_.store(transform_B_(tb_frag_B));
// Advance write stage
advance_smem_write_stage();
}
/// Wait until we have at least one completed global fetch stage
CUTLASS_DEVICE
void gmem_wait()
{
__syncthreads();
}
/// Perform the specified number of threadblock mainloop iterations of matrix
/// multiply-accumulate. Assumes prologue has been initiated.
CUTLASS_DEVICE
void gemm_iters(
int gemm_k_iterations, ///< number of threadblock mainloop iterations
FragmentC &accum, ///< [in|out] accumulator tile
IteratorA &iterator_A, ///< [in|out] iterator over A operand in global memory
IteratorB &iterator_B) ///< [in|out] iterator over B operand in global memory
{
using WarpFragmentA = typename Operator::FragmentA;
using WarpFragmentB = typename Operator::FragmentB;
// Pair of fragments used to overlap shared memory loads and math instructions
WarpFragmentA warp_frag_A[2];
WarpFragmentB warp_frag_B[2];
// Load A fragment from shared A
this->warp_tile_iterator_A_.set_kgroup_index(0);
this->warp_tile_iterator_A_.load(warp_frag_A[0]);
++this->warp_tile_iterator_A_;
// Load B fragment from shared B
this->warp_tile_iterator_B_.set_kgroup_index(0);
this->warp_tile_iterator_B_.load(warp_frag_B[0]);
++this->warp_tile_iterator_B_;
// Pair of fragments used to overlap global memory loads and math instructions;
FragmentA tb_frag_A;
FragmentB tb_frag_B;
// Avoid reading out of bounds
iterator_A.clear_mask(gemm_k_iterations <= 1);
iterator_B.clear_mask(gemm_k_iterations <= 1);
//
// Mainloop
//
// Note: The main loop does not support Base::kWarpGemmIterations == 2.
CUTLASS_GEMM_LOOP
for (; gemm_k_iterations > 0; --gemm_k_iterations) {
//
// Loop over GEMM K dimension
//
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k) {
// Load warp-level tiles from shared memory, wrapping to k offset if this is the last group
// as the case may be.
if (warp_mma_k == Base::kWarpGemmIterations - 1) {
// Write fragments to shared memory
this->smem_iterator_A_.store(transform_A_(tb_frag_A));
this->smem_iterator_B_.store(transform_B_(tb_frag_B));
// Wait until we have at least one completed global fetch stage
gmem_wait();
// Advance smem read and write stages
advance_smem_stages();
}
this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_A_.load(warp_frag_A[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_B_.load(warp_frag_B[(warp_mma_k + 1) % 2]);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
if (warp_mma_k == 0) {
// Load fragment from global A
tb_frag_A.clear();
iterator_A.load(tb_frag_A);
++iterator_A;
// Load fragment from global B
tb_frag_B.clear();
iterator_B.load(tb_frag_B);
++iterator_B;
// Avoid reading out of bounds if this was the last loop iteration
iterator_A.clear_mask(gemm_k_iterations <= 2);
iterator_B.clear_mask(gemm_k_iterations <= 2);
}
warp_mma(
accum,
warp_frag_A[warp_mma_k % 2],
warp_frag_B[warp_mma_k % 2],
accum);
}
}
}
/// Prepares the class for another prologue.
CUTLASS_DEVICE
void wind_down()
{
// First, increment remaining warp tiles to catch it up with the write stage.
#pragma unroll
for (int warp_mma_k = 1; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k)
{
this->warp_tile_iterator_A_.set_kgroup_index(warp_mma_k);
this->warp_tile_iterator_B_.set_kgroup_index(warp_mma_k);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
}
// If we bumped the read iterators to the end of the circular buffer, wrap them around to
// align them with the write iterators
if (smem_write_stage_idx == 0)
{
this->warp_tile_iterator_A_.add_tile_offset(
{0, -Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations});
this->warp_tile_iterator_B_.add_tile_offset(
{-Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations, 0});
}
}
/// Perform a threadblock-scoped matrix multiply-accumulate
CUTLASS_DEVICE
void operator()(
int gemm_k_iterations, ///< number of iterations of the mainloop
FragmentC &accum, ///< destination accumulator tile
IteratorA iterator_A, ///< iterator over A operand in global memory
IteratorB iterator_B, ///< iterator over B operand in global memory
FragmentC const &src_accum) ///< source accumulator tile
{
// Prologue
prologue(iterator_A, iterator_B, gemm_k_iterations);
// Wait until we have at least one completed global fetch stage
gmem_wait();
// Perform accumulation in the 'd' output operand
accum = src_accum;
// Perform the MAC-iterations
gemm_iters(gemm_k_iterations, accum, iterator_A, iterator_B);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
|
cutlass/include/cutlass/gemm/threadblock/mma_pipelined.h/0
|
{
"file_path": "cutlass/include/cutlass/gemm/threadblock/mma_pipelined.h",
"repo_id": "cutlass",
"token_count": 5846
}
| 40 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief This defines a "fragment" iterator for visiting the fragments of a warp tile
that participate in one warp-level mma operation.
Typically, this is used to access the accumulator tile/fragement of a warp-level mma operation.
The accumulator tile is then partitioned into smaller tiles/fragments that can be fed into
next warp-level mma operation.
This iterator is necessary to accomplish warp-level mma fusion where the accumulator tile is
reused as multiplicand tile for the next mma.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/numeric_conversion.h"
namespace cutlass {
namespace gemm {
namespace warp {
////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Size of the accumulation tile shape (concept: MatrixShape)
typename AccumulatorShape_,
/// KBlocks columns to compute residual
int KBlocksColumn_,
/// Accumulator Element type
typename ElementAccumulator_,
/// Element type
typename Element_,
/// Layout of operand in memory
typename Layout_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Output operation on the fragment
typename OutputOp_>
class MmaTensorOpFragmentIterator;
// Partial specialization for col-major accumulator tile
template <
/// Shape of warp tile to load (concept: MatrixShape)
typename Shape_,
/// Shape of the warp accumulation tile (concept: MatrixShape)
typename AccumulatorShape_,
/// KBlocks columns to compute residual
int KBlocksColumn_,
/// Accumulator Element type
typename ElementAccumulator_,
/// Element type
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Output operation on fragment
typename OutputOp_>
class MmaTensorOpFragmentIterator<Shape_, AccumulatorShape_, KBlocksColumn_, ElementAccumulator_, Element_,
cutlass::layout::ColumnMajor,
InstructionShape_, OutputOp_> {
public:
/// Shape of warp tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Shape of the warp accumulation tile (concept: MatrixShape)
using AccumulatorShape = AccumulatorShape_;
/// KBlocks columns to compute residual
static int const kKBlockColumn = KBlocksColumn_;
/// Accumulator Element type
using ElementAccumulator = ElementAccumulator_;
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::ColumnMajor;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Output operation on fragment
using OutputOp = OutputOp_;
/// Number of participating threads
static int const kThreads = 32;
/// Internal structure of iterator - made public to enable introspection
struct Policy {
static_assert(
!(Shape::kRow % InstructionShape::kM) &&
!(Shape::kColumn % InstructionShape::kN),
"Shape of warp-level Mma must be divisible by operator shape.");
static_assert(
AccumulatorShape::kRow == Shape::kRow,
"Rows of Warp Accumulator must be the same as rows of warp");
static_assert(
!(AccumulatorShape::kColumn % Shape::kColumn),
"Shape of Warp Accumulator must be divisible by warp shape.");
static_assert(
!(kKBlockColumn % Shape::kColumn),
"KBlock size must be divisible by warp shape.");
/// Number of times this iterator can be incremented
static int const kIterations = AccumulatorShape::kCount / Shape::kCount;
};
private:
static int const kElementsPerAccess = InstructionShape::kM * InstructionShape::kN / kThreads;
/// Number of mma operations performed by a warp
using MmaIterations = MatrixShape<Shape::kRow / InstructionShape::kM,
Shape::kColumn / InstructionShape::kN>;
/// Number of mma operations performed by the entire accumulator
using AccumulatorIterations = MatrixShape<AccumulatorShape::kRow / InstructionShape::kM,
AccumulatorShape::kColumn / InstructionShape::kN>;
/// Number of K iterations
static int const kKBlockIterations = (AccumulatorShape::kColumn + kKBlockColumn - 1) / kKBlockColumn;
static int const kResidualColumn = AccumulatorShape::kColumn - (kKBlockIterations - 1) * kKBlockColumn;
static int const kKBlockColumnIterations = kKBlockColumn / Shape::kColumn
* (AccumulatorShape::kRow / Shape::kRow);
static int const kResidualIndex = kResidualColumn / Shape::kColumn
* (AccumulatorShape::kRow / Shape::kRow);
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<Element, Shape::kCount / kThreads>;
/// Accumulator Fragment object
using AccumulatorFragment = Array<ElementAccumulator, AccumulatorShape::kCount / kThreads>;
/// Scale Bias Element Type
using ElementScaleBias = typename OutputOp::ElementCompute;
/// Scale Bias Fragment object
using ScaleBiasFragment = Array<ElementScaleBias, InstructionShape::kM * InstructionShape::kK / kThreads>;
private:
/// Internal access type
using AccessType = Array<ElementAccumulator, kElementsPerAccess>;
using FragmentAccessType = Array<Element, kElementsPerAccess>;
using ScaleBiasAccessType = Array<ElementScaleBias, kElementsPerAccess>;
private:
//
// Data members
//
/// Accumulator tile
AccessType const *accumulators_;
/// Internal index
int index_;
/// Used to access residual tile first
bool is_residual_tile_;
public:
/// Constructs an iterator
CUTLASS_HOST_DEVICE
MmaTensorOpFragmentIterator(AccumulatorFragment const &accum)
: accumulators_(reinterpret_cast<AccessType const *>(&accum)),
index_(0), is_residual_tile_(true) {}
/// Add offset
CUTLASS_HOST_DEVICE
void add_offset(int index_offset) {
index_ += index_offset;
if(is_residual_tile_ && index_ >= kKBlockColumnIterations) {
index_ = index_ - kKBlockColumnIterations + kResidualIndex;
is_residual_tile_ = false;
}
}
/// Increments
CUTLASS_HOST_DEVICE
MmaTensorOpFragmentIterator &operator++() {
add_offset(1);
return *this;
}
/// Decrements
CUTLASS_HOST_DEVICE
MmaTensorOpFragmentIterator &operator--() {
add_offset(-1);
return *this;
}
/// Loads a fragment from the referenced part of the accumulator tile
CUTLASS_HOST_DEVICE
void load(Fragment &frag, OutputOp output_op) const {
if (output_op.is_source_needed()) //beta must be zero
assert(0);
FragmentAccessType *frag_ptr = reinterpret_cast<FragmentAccessType *>(&frag);
int index = index_ * MmaIterations::kCount;
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; n++) {
for (int m = 0; m < MmaIterations::kRow; m++) {
int accumulator_access_offset =
n * AccumulatorIterations::kRow + m + index;
frag_ptr[m * MmaIterations::kColumn + n].clear();
if(!(is_residual_tile_ && index_ >= kResidualIndex))
frag_ptr[m * MmaIterations::kColumn + n] = output_op(accumulators_[accumulator_access_offset]);
}
}
}
/// Loads a fragment from the referenced part of the accumulator tile
/// Then apply per-channel scale and bias
CUTLASS_HOST_DEVICE
void load(Fragment &frag, ScaleBiasFragment &scale,
ScaleBiasFragment &bias, OutputOp output_op) const {
if (output_op.is_source_needed()) //beta must be zero
assert(0);
FragmentAccessType *frag_ptr = reinterpret_cast<FragmentAccessType *>(&frag);
ScaleBiasAccessType * scale_ptr = reinterpret_cast<ScaleBiasAccessType *>(&scale);
ScaleBiasAccessType * bias_ptr = reinterpret_cast<ScaleBiasAccessType *>(&bias);
int index = index_ * MmaIterations::kCount;
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; n++) {
for (int m = 0; m < MmaIterations::kRow; m++) {
int accumulator_access_offset =
n * AccumulatorIterations::kRow + m + index;
frag_ptr[m * MmaIterations::kColumn + n].clear();
if(!(is_residual_tile_ && index_ >= kResidualIndex))
frag_ptr[m * MmaIterations::kColumn + n] =
output_op(accumulators_[accumulator_access_offset],
scale_ptr[n] /*scale*/, bias_ptr[n] /*bias*/);
}
}
}
};
// Partial specialization for row-major accumulator tile
template <
/// Shape of warp tile to load (concept: MatrixShape)
typename Shape_,
/// Shape of the warp accumulation tile (concept: MatrixShape)
typename AccumulatorShape_,
/// KBlocks columns to compute residual
int KBlocksColumn_,
/// Accumulator Element type
typename ElementAccumulator_,
/// Element type
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Output operation on fragment
typename OutputOp_>
class MmaTensorOpFragmentIterator<Shape_, AccumulatorShape_, KBlocksColumn_, ElementAccumulator_, Element_,
cutlass::layout::RowMajor,
InstructionShape_, OutputOp_> {
public:
/// Shape of warp tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Shape of the warp accumulation tile (concept: MatrixShape)
using AccumulatorShape = AccumulatorShape_;
/// KBlocks columns to compute residual
static int const kKBlockColumn = KBlocksColumn_;
/// Accumulator Element type
using ElementAccumulator = ElementAccumulator_;
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::RowMajor;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Output operation on fragment
using OutputOp = OutputOp_;
/// Number of participating threads
static int const kThreads = 32;
/// Internal structure of iterator - made public to enable introspection
struct Policy {
static_assert(
!(Shape::kRow % InstructionShape::kM) &&
!(Shape::kColumn % InstructionShape::kN),
"Shape of warp-level Mma must be divisible by operator shape.");
static_assert(
AccumulatorShape::kRow == Shape::kRow,
"Rows of Warp Accumulator must be the same as rows of warp");
static_assert(
!(AccumulatorShape::kColumn % Shape::kColumn),
"Shape of Warp Accumulator must be divisible by warp shape.");
static_assert(
!(kKBlockColumn % Shape::kColumn),
"KBlock size must be divisible by warp shape.");
/// Number of times this iterator can be incremented
static int const kIterations = AccumulatorShape::kCount / Shape::kCount;
};
private:
static int const kRowsPerIteration = 8;
static int const kColumnsPerIteration = 16;
static int const kElementsPerIteration = kRowsPerIteration * InstructionShape::kN / kThreads;
static int const kElementsPerAccess = kRowsPerIteration * kColumnsPerIteration / kThreads;
static int const kIterationsPerAccess = kElementsPerAccess / kElementsPerIteration;
// Number of iterations per actual instruction
static int const kIterationsPerInstruction = InstructionShape::kM / kRowsPerIteration;
static int const kAccessStride = kIterationsPerInstruction;
/// Number of mma operations performed by a warp
using MmaIterations = MatrixShape<Shape::kRow / InstructionShape::kM,
Shape::kColumn / InstructionShape::kN>;
/// Number of mma operations performed by the entire accumulator
using AccumulatorIterations = MatrixShape<AccumulatorShape::kRow / InstructionShape::kM,
AccumulatorShape::kColumn / InstructionShape::kN>;
/// Number of Accesses in a warp
using AccessIterations = MatrixShape<MmaIterations::kRow * kIterationsPerInstruction,
MmaIterations::kColumn / kIterationsPerAccess>;
/// Number of K iterations
static int const kKBlockIterations = (AccumulatorShape::kColumn + kKBlockColumn - 1) / kKBlockColumn;
static int const kResidualColumn = AccumulatorShape::kColumn - (kKBlockIterations - 1) * kKBlockColumn;
static int const kKBlockColumnIterations = kKBlockColumn / Shape::kColumn;
static int const kResidualIndex = kResidualColumn / Shape::kColumn;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<Element, Shape::kCount / kThreads>;
/// Accumulator Fragment object
using AccumulatorFragment = Array<ElementAccumulator, AccumulatorShape::kCount / kThreads>;
/// Scale Bias Element Type
using ElementScaleBias = typename OutputOp::ElementCompute;
/// Scale Bias Fragment object
using ScaleBiasFragment = Array<ElementScaleBias, InstructionShape::kM * InstructionShape::kK / kThreads>;
private:
/// Internal access type
using AccessType = Array<ElementAccumulator, kElementsPerIteration>;
using FragmentAccessType = Array<Element, kElementsPerIteration>;
using ScaleBiasAccessType = Array<ElementScaleBias, kElementsPerIteration>;
private:
//
// Data members
//
/// Accumulator tile
AccessType const *accumulators_;
/// Internal index
int index_;
/// Used to access residual tile first
bool is_residual_tile_;
public:
/// Constructs an iterator
CUTLASS_HOST_DEVICE
MmaTensorOpFragmentIterator(AccumulatorFragment const &accum)
: accumulators_(reinterpret_cast<AccessType const *>(&accum)),
index_(0), is_residual_tile_(true) {}
/// Add offset
CUTLASS_HOST_DEVICE
void add_offset(int index_offset) {
index_ += index_offset;
if(is_residual_tile_ && index_ >= kKBlockColumnIterations) {
index_ = index_ - kKBlockColumnIterations + kResidualIndex;
is_residual_tile_ = false;
}
}
/// Increments
CUTLASS_HOST_DEVICE
MmaTensorOpFragmentIterator &operator++() {
add_offset(1);
return *this;
}
/// Decrements
CUTLASS_HOST_DEVICE
MmaTensorOpFragmentIterator &operator--() {
add_offset(-1);
return *this;
}
CUTLASS_HOST_DEVICE
void set_index(int idx) {
index_ = idx;
}
/// Loads a fragment from the referenced part of the accumulator tile
CUTLASS_HOST_DEVICE
void load(Fragment &frag, OutputOp output_op) const {
if (output_op.is_source_needed()) //beta must be zero
assert(0);
FragmentAccessType *frag_ptr = reinterpret_cast<FragmentAccessType *>(&frag);
int index = index_ * AccessIterations::kCount;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < AccessIterations::kCount; i++) {
int accumulator_access_offset = index / AccessIterations::kCount * (MmaIterations::kColumn * kIterationsPerInstruction) +
(index % AccessIterations::kCount) / (AccessIterations::kColumn * kIterationsPerInstruction) *
AccumulatorIterations::kColumn * kIterationsPerInstruction +
(index % (AccessIterations::kColumn * kIterationsPerInstruction)) / kIterationsPerInstruction *
(kIterationsPerInstruction * kIterationsPerAccess) +
(index % kIterationsPerInstruction);
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < kIterationsPerAccess; j++) {
frag_ptr[i*kIterationsPerAccess + j].clear();
if(!(is_residual_tile_ && index_ >= kResidualIndex))
frag_ptr[i*kIterationsPerAccess + j] = output_op(accumulators_[accumulator_access_offset + j * kAccessStride]);
}
index++;
}
}
/// Loads a fragment from the referenced part of the accumulator tile
/// Then apply per-channel scale and bias
CUTLASS_HOST_DEVICE
void load(Fragment &frag, ScaleBiasFragment &scale,
ScaleBiasFragment & bias, OutputOp output_op) const {
if (output_op.is_source_needed()) //beta must be zero
assert(0);
FragmentAccessType *frag_ptr = reinterpret_cast<FragmentAccessType *>(&frag);
ScaleBiasAccessType * scale_ptr = reinterpret_cast<ScaleBiasAccessType *>(&scale);
ScaleBiasAccessType * bias_ptr = reinterpret_cast<ScaleBiasAccessType *>(&bias);
int index = index_ * AccessIterations::kCount;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < AccessIterations::kCount; i++) {
int accumulator_access_offset = index / AccessIterations::kCount * (MmaIterations::kColumn * kIterationsPerInstruction) +
(index % AccessIterations::kCount) / (AccessIterations::kColumn * kIterationsPerInstruction) *
AccumulatorIterations::kColumn * kIterationsPerInstruction +
(index % (AccessIterations::kColumn * kIterationsPerInstruction)) / kIterationsPerInstruction *
(kIterationsPerInstruction * kIterationsPerAccess) +
(index % kIterationsPerInstruction);
int scale_bias_offset = (index
% (kIterationsPerInstruction * AccessIterations::kColumn))
* kIterationsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < kIterationsPerAccess; j++) {
frag_ptr[i*kIterationsPerAccess + j].clear();
if(!(is_residual_tile_ && index_ >= kResidualIndex))
frag_ptr[i*kIterationsPerAccess + j] = output_op(
accumulators_[accumulator_access_offset + j * kAccessStride],
scale_ptr[scale_bias_offset + j], bias_ptr[scale_bias_offset + j]);
}
index++;
}
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
|
cutlass/include/cutlass/gemm/warp/mma_tensor_op_fragment_iterator.h/0
|
{
"file_path": "cutlass/include/cutlass/gemm/warp/mma_tensor_op_fragment_iterator.h",
"repo_id": "cutlass",
"token_count": 7309
}
| 41 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*!
\file
\brief Defines a class for using IEEE half-precision floating-point types in host or
device code.
*/
#pragma once
#ifndef CUTLASS_ENABLE_F16C
#define CUTLASS_ENABLE_F16C 0
#endif
#if defined(__CUDACC_RTC__)
#include "cutlass/floating_point_nvrtc.h"
// F16C extensions are not meaningful when compiling for NVRTC which only accommodates device code.
#undef CUTLASS_ENABLE_F16C
#define CUTLASS_ENABLE_F16C 0
#else
//
// Standard Library headers belong here to avoid conflicts with NVRTC.
//
#include <cmath>
#include <limits>
#include <cstdint>
#include <cstring>
#endif
///////////////////////////////////////////////////////////////////////////////////////////////////
#include <cuda_fp16.h>
#include "cutlass/cutlass.h"
#include "cutlass/float8.h"
#include "cutlass/platform/platform.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
// Optionally target F16C extentions to accelerate half-precision conversion.
#if !defined(__CUDA_ARCH__) && (CUTLASS_ENABLE_F16C)
#if defined(_MSC_VER)
#include <immintrin.h>
#if defined(__i386__) || defined(__x86_64__)
#include <intrin.h>
#endif
#define F16C_ROUND_NEAREST 0
#if !defined(__CUDA_ARCH__)
extern __inline float _cvtsh_ss (unsigned short __S) {
__m128i packed;
std::memcpy(&packed, &__S, sizeof(__S));
__m128 result = _mm_cvtph_ps(packed);
float flt;
std::memcpy(&flt, &result, sizeof(flt));
return flt;
}
__inline unsigned short _cvtss_sh (float __F, const int) {
__m128 packed;
std::memcpy(&packed, &__F, sizeof(__F));
__m128i result = _mm_cvtps_ph(packed, F16C_ROUND_NEAREST);
unsigned short u;
std::memcpy(&u, &result, sizeof(u));
return u;
}
#endif
#else
// Linux
#include <x86intrin.h>
#if defined(__i386__) || defined(__x86_64__)
#include <cpuid.h>
#endif
#define F16C_ROUND_NEAREST (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC)
#endif // _MSC_VER
class CpuId {
bool f16c_enabled;
CpuId() {
#if defined(__i386__) || defined(__x86_64__)
#if defined(_MSC_VER)
int exx[4];
__cpuid (exx, 1);
f16c_enabled = exx[2] & 0x20000000;
#else
// GCC / Clang
int eax, ebx, ecx, edx;
__cpuid (1 , eax, ebx, ecx, edx);
f16c_enabled = ecx & 0x20000000;
#endif
#else
// Arm / PowerPC etc.
f16c_enabled = false;
#endif
}
public:
bool is_f16c_supported() const {
return f16c_enabled;
}
static const CpuId& instance() {
static CpuId cpu;
return cpu;
}
};
#endif // !defined(__CUDA_ARCH__) && CUTLASS_ENABLE_F16C
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
///////////////////////////////////////////////////////////////////////////////////////////////////
/// IEEE half-precision floating-point type
struct alignas(2) half_t {
//
// Data members
//
/// Storage type
uint16_t storage;
//
// Static conversion operators
//
/// Constructs from an unsigned short
CUTLASS_HOST_DEVICE
static half_t bitcast(uint16_t x) {
half_t h;
h.storage = x;
return h;
}
/// FP32 -> FP16 conversion - rounds to nearest even
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 530)
// Avoid inlining in device code if no hardware support
__device__ __noinline__
#else
CUTLASS_HOST_DEVICE
#endif
static half_t convert(float const& flt) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return half_t(__float2half_rn(flt));
#else
#if !defined(__CUDA_ARCH__) && CUTLASS_ENABLE_F16C
if( CpuId::instance().is_f16c_supported() ) {
unsigned short u = _cvtss_sh(flt, F16C_ROUND_NEAREST);
return bitcast(u);
}
#endif
// software implementation rounds toward nearest even
unsigned s;
#if defined(__CUDA_ARCH__)
s = reinterpret_cast<unsigned const &>(flt);
#else
std::memcpy(&s, &flt, sizeof(s));
#endif
uint16_t sign = uint16_t((s >> 16) & 0x8000);
int16_t exp = uint16_t(((s >> 23) & 0xff) - 127);
int mantissa = s & 0x7fffff;
uint16_t u = 0;
if ((s & 0x7fffffff) == 0) {
// sign-preserving zero
return bitcast(sign);
}
if (exp > 15) {
if (exp == 128 && mantissa) {
// not a number
u = 0x7fff;
} else {
// overflow to infinity
u = sign | 0x7c00;
}
return bitcast(u);
}
int sticky_bit = 0;
if (exp >= -14) {
// normal fp32 to normal fp16
exp = uint16_t(exp + uint16_t(15));
u = uint16_t(((exp & 0x1f) << 10));
u = uint16_t(u | (mantissa >> 13));
} else {
// normal single-precision to subnormal half_t-precision representation
int rshift = (-14 - exp);
if (rshift < 32) {
mantissa |= (1 << 23);
sticky_bit = ((mantissa & ((1 << rshift) - 1)) != 0);
mantissa = (mantissa >> rshift);
u = (uint16_t(mantissa >> 13) & 0x3ff);
} else {
mantissa = 0;
u = 0;
}
}
// round to nearest even
int round_bit = ((mantissa >> 12) & 1);
sticky_bit |= ((mantissa & ((1 << 12) - 1)) != 0);
if ((round_bit && sticky_bit) || (round_bit && (u & 1))) {
u = uint16_t(u + 1);
}
u |= sign;
return bitcast(u);
#endif
}
/// FP32 -> FP16 conversion - rounds to nearest even
CUTLASS_HOST_DEVICE
static half_t convert(int const& n) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return half_t(__int2half_rn(n));
#else
return convert(float(n));
#endif
}
/// FP32 -> FP16 conversion - rounds to nearest even
CUTLASS_HOST_DEVICE
static half_t convert(unsigned const& n) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return half_t(__uint2half_rn(n));
#else
return convert(float(n));
#endif
}
/// Converts a half-precision value stored as a uint16_t to a float
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 530)
// Avoid inlining in device code if no hardware support
__device__ __noinline__
#else
CUTLASS_HOST_DEVICE
#endif
static float convert(half_t const& x) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return __half2float(x.to_half());
#else
#if !defined(__CUDA_ARCH__) && CUTLASS_ENABLE_F16C
if( CpuId::instance().is_f16c_supported() ) {
unsigned short u = x.storage;
return _cvtsh_ss(u);
}
#endif
uint16_t const &h = x.storage;
uint32_t sign = ((h >> 15) & 1);
uint32_t exp = ((h >> 10) & 0x1f);
uint32_t mantissa = (h & 0x3ff);
unsigned f = 0;
if (exp > 0 && exp < 31) {
// normal
exp += 112;
f = (sign << 31) | (exp << 23) | (mantissa << 13);
} else if (exp == 0) {
if (mantissa) {
// subnormal
exp += 113;
while ((mantissa & (1 << 10)) == 0) {
mantissa <<= 1;
exp--;
}
mantissa &= 0x3ff;
f = (sign << 31) | (exp << 23) | (mantissa << 13);
} else {
// sign-preserving zero
f = (sign << 31);
}
} else if (exp == 31) {
if (mantissa) {
f = 0x7fffffff; // not a number
} else {
f = (0xff << 23) | (sign << 31); // inf
}
}
#if defined(__CUDA_ARCH__)
return reinterpret_cast<float const&>(f);
#else
float flt;
std::memcpy(&flt, &f, sizeof(flt));
return flt;
#endif
#endif
}
//
// Methods
//
/// Default constructor
half_t() = default;
/// Reinterpret cast from CUDA's half type
CUTLASS_HOST_DEVICE
explicit half_t(half const & x) {
#if defined(__CUDA_ARCH__)
storage = reinterpret_cast<uint16_t const &>(x);
#else
__half_raw raw(x);
std::memcpy(&storage, &raw.x, sizeof(storage));
#endif
}
/// Floating point conversion
CUTLASS_HOST_DEVICE
explicit half_t(float x) {
storage = convert(x).storage;
}
/// Floating point conversion
CUTLASS_HOST_DEVICE
explicit half_t(double x): half_t(float(x)) {
}
/// float_e4m3_t conversion
CUTLASS_HOST_DEVICE
explicit half_t(float_e4m3_t x): half_t(float(x)) {
}
/// float_e5m2_t conversion
CUTLASS_HOST_DEVICE
explicit half_t(float_e5m2_t x): half_t(float(x)) {
}
/// Integer conversion - round to nearest even
CUTLASS_HOST_DEVICE
explicit half_t(int x) {
storage = convert(x).storage;
}
/// Integer conversion - round toward zero
CUTLASS_HOST_DEVICE
explicit half_t(unsigned x) {
storage = convert(x).storage;
}
/// Assignment
CUTLASS_HOST_DEVICE
half_t & operator=(half const &x) {
#if defined(__CUDA_ARCH__)
storage = reinterpret_cast<uint16_t const &>(x);
#else
__half_raw raw(x);
std::memcpy(&storage, &raw.x, sizeof(storage));
#endif
return *this;
}
/// Converts to float
CUTLASS_HOST_DEVICE
operator float() const {
return convert(*this);
}
/// Converts to float
CUTLASS_HOST_DEVICE
explicit operator double() const {
return double(convert(*this));
}
/// Converts to float
CUTLASS_HOST_DEVICE
explicit operator int() const {
return int(convert(*this));
}
/// Casts to bool
CUTLASS_HOST_DEVICE
explicit operator bool() const {
return (convert(*this) != 0.0f);
}
/// Bitcasts to CUDA's half type
CUTLASS_HOST_DEVICE
half to_half() const {
#if defined(__CUDA_ARCH__)
return reinterpret_cast<half const &>(storage);
#else
__half_raw raw;
std::memcpy(&raw.x, &storage, sizeof(raw.x));
return half(raw);
#endif
}
/// Accesses raw internal state
CUTLASS_HOST_DEVICE
uint16_t& raw() {
return storage;
}
/// Accesses raw internal state
CUTLASS_HOST_DEVICE
uint16_t raw() const {
return storage;
}
/// Returns the sign bit
CUTLASS_HOST_DEVICE
bool signbit() const {
return ((storage & 0x8000) != 0);
}
/// Returns the biased exponent
CUTLASS_HOST_DEVICE
int exponent_biased() const {
return int((storage >> 10) & 0x1f);
}
/// Returns the unbiased exponent
CUTLASS_HOST_DEVICE
int exponent() const {
return exponent_biased() - 15;
}
/// Returns the mantissa
CUTLASS_HOST_DEVICE
int mantissa() const {
return int(storage & 0x3ff);
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
CUTLASS_HOST_DEVICE
bool signbit(cutlass::half_t const& h) {
return ((h.raw() & 0x8000) != 0);
}
CUTLASS_HOST_DEVICE
cutlass::half_t abs(cutlass::half_t const& h) {
return cutlass::half_t::bitcast(h.raw() & 0x7fff);
}
CUTLASS_HOST_DEVICE
bool isnan(cutlass::half_t const& h) {
return (h.exponent_biased() == 0x1f) && h.mantissa();
}
CUTLASS_HOST_DEVICE
bool isfinite(cutlass::half_t const& h) {
return (h.exponent_biased() != 0x1f);
}
CUTLASS_HOST_DEVICE
cutlass::half_t nanh(const char*) {
// NVIDIA canonical NaN
return cutlass::half_t::bitcast(0x7fff);
}
CUTLASS_HOST_DEVICE
bool isinf(cutlass::half_t const& h) {
return (h.exponent_biased() == 0x1f) && !h.mantissa();
}
CUTLASS_HOST_DEVICE
bool isnormal(cutlass::half_t const& h) {
return h.exponent_biased() && h.exponent_biased() != 0x1f;
}
CUTLASS_HOST_DEVICE
int fpclassify(cutlass::half_t const& h) {
int exp = h.exponent_biased();
int mantissa = h.mantissa();
if (exp == 0x1f) {
if (mantissa) {
return FP_NAN;
}
else {
return FP_INFINITE;
}
}
else if (!exp) {
if (mantissa) {
return FP_SUBNORMAL;
}
else {
return FP_ZERO;
}
}
return FP_NORMAL;
}
CUTLASS_HOST_DEVICE
cutlass::half_t sqrt(cutlass::half_t const& h) {
#if defined(__CUDACC_RTC__)
return cutlass::half_t(sqrtf(float(h)));
#else
return cutlass::half_t(std::sqrt(float(h)));
#endif
}
CUTLASS_HOST_DEVICE
half_t copysign(half_t const& a, half_t const& b) {
uint16_t a_mag = (a.raw() & 0x7fff);
uint16_t b_sign = (b.raw() & 0x8000);
uint16_t result = (a_mag | b_sign);
return half_t::bitcast(result);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// Standard Library operations and definitions
//
///////////////////////////////////////////////////////////////////////////////////////////////////
#if !defined(__CUDACC_RTC__)
namespace std {
/// Numeric limits
template <>
struct numeric_limits<cutlass::half_t> {
static bool const is_specialized = true;
static bool const is_signed = true;
static bool const is_integer = false;
static bool const is_exact = false;
static bool const has_infinity = true;
static bool const has_quiet_NaN = true;
static bool const has_signaling_NaN = false;
static std::float_denorm_style const has_denorm = std::denorm_present;
static bool const has_denorm_loss = true;
static std::float_round_style const round_style = std::round_to_nearest;
static bool const is_iec559 = true;
static bool const is_bounded = true;
static bool const is_modulo = false;
static int const digits = 10;
/// Least positive value
static cutlass::half_t min() { return cutlass::half_t::bitcast(0x0001); }
/// Minimum finite value
static cutlass::half_t lowest() { return cutlass::half_t::bitcast(0xfbff); }
/// Maximum finite value
static cutlass::half_t max() { return cutlass::half_t::bitcast(0x7bff); }
/// Returns smallest finite value
static cutlass::half_t epsilon() { return cutlass::half_t::bitcast(0x1800); }
/// Returns maximum rounding error
static cutlass::half_t round_error() { return cutlass::half_t(0.5f); }
/// Returns positive infinity value
static cutlass::half_t infinity() { return cutlass::half_t::bitcast(0x7c00); }
/// Returns quiet NaN value
static cutlass::half_t quiet_NaN() { return cutlass::half_t::bitcast(0x7fff); }
/// Returns signaling NaN value
static cutlass::half_t signaling_NaN() { return cutlass::half_t::bitcast(0x7fff); }
/// Returns smallest positive subnormal value
static cutlass::half_t denorm_min() { return cutlass::half_t::bitcast(0x0001); }
};
} // namespace std
#endif
namespace platform {
/// std::numeric_limits
template <class T>
struct numeric_limits;
/// Numeric limits
template <>
struct numeric_limits<cutlass::half_t> {
static bool const is_specialized = true;
static bool const is_signed = true;
static bool const is_integer = false;
static bool const is_exact = false;
static bool const has_infinity = true;
static bool const has_quiet_NaN = true;
static bool const has_signaling_NaN = false;
#if !defined(__CUDACC_RTC__)
static std::float_denorm_style const has_denorm = std::denorm_present;
#endif
static bool const has_denorm_loss = true;
#if !defined(__CUDACC_RTC__)
static std::float_round_style const round_style = std::round_to_nearest;
#endif
static bool const is_iec559 = true;
static bool const is_bounded = true;
static bool const is_modulo = false;
static int const digits = 10;
/// Least positive value
CUTLASS_HOST_DEVICE
static cutlass::half_t min() { return cutlass::half_t::bitcast(0x0001); }
/// Minimum finite value
CUTLASS_HOST_DEVICE
static cutlass::half_t lowest() { return cutlass::half_t::bitcast(0xfbff); }
/// Maximum finite value
CUTLASS_HOST_DEVICE
static cutlass::half_t max() { return cutlass::half_t::bitcast(0x7bff); }
/// Returns smallest finite value
CUTLASS_HOST_DEVICE
static cutlass::half_t epsilon() { return cutlass::half_t::bitcast(0x1800); }
/// Returns maximum rounding error
CUTLASS_HOST_DEVICE
static cutlass::half_t round_error() { return cutlass::half_t(0.5f); }
/// Returns positive infinity value
CUTLASS_HOST_DEVICE
static cutlass::half_t infinity() { return cutlass::half_t::bitcast(0x7c00); }
/// Returns quiet NaN value
CUTLASS_HOST_DEVICE
static cutlass::half_t quiet_NaN() { return cutlass::half_t::bitcast(0x7fff); }
/// Returns signaling NaN value
CUTLASS_HOST_DEVICE
static cutlass::half_t signaling_NaN() { return cutlass::half_t::bitcast(0x7fff); }
/// Returns smallest positive subnormal value
CUTLASS_HOST_DEVICE
static cutlass::half_t denorm_min() { return cutlass::half_t::bitcast(0x0001); }
};
} // namespace platform
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// Arithmetic operators
//
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
///////////////////////////////////////////////////////////////////////////////////////////////////
CUTLASS_HOST_DEVICE
bool operator==(half_t const& lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return __heq(lhs.to_half(), rhs.to_half());
#else
return float(lhs) == float(rhs);
#endif
}
CUTLASS_HOST_DEVICE
bool operator!=(half_t const& lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return __hne(lhs.to_half(), rhs.to_half());
#else
return float(lhs) != float(rhs);
#endif
}
CUTLASS_HOST_DEVICE
bool operator<(half_t const& lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return __hlt(lhs.to_half(), rhs.to_half());
#else
return float(lhs) < float(rhs);
#endif
}
CUTLASS_HOST_DEVICE
bool operator<=(half_t const& lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return __hle(lhs.to_half(), rhs.to_half());
#else
return float(lhs) <= float(rhs);
#endif
}
CUTLASS_HOST_DEVICE
bool operator>(half_t const& lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return __hgt(lhs.to_half(), rhs.to_half());
#else
return float(lhs) > float(rhs);
#endif
}
CUTLASS_HOST_DEVICE
bool operator>=(half_t const& lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return __hge(lhs.to_half(), rhs.to_half());
#else
return float(lhs) >= float(rhs);
#endif
}
CUTLASS_HOST_DEVICE
half_t operator+(half_t const& lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return half_t(__hadd(lhs.to_half(), rhs.to_half()));
#else
return half_t(float(lhs) + float(rhs));
#endif
}
CUTLASS_HOST_DEVICE
half_t operator-(half_t const& lhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return half_t(__hneg(lhs.to_half()));
#else
return half_t(-float(lhs));
#endif
}
CUTLASS_HOST_DEVICE
half_t operator-(half_t const& lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return half_t(__hsub(lhs.to_half(), rhs.to_half()));
#else
return half_t(float(lhs) - float(rhs));
#endif
}
CUTLASS_HOST_DEVICE
half_t operator*(half_t const& lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return half_t(__hmul(lhs.to_half(), rhs.to_half()));
#else
return half_t(float(lhs) * float(rhs));
#endif
}
CUTLASS_HOST_DEVICE
half_t operator/(half_t const& lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return half_t(__hdiv(lhs.to_half(), rhs.to_half()));
#else
return half_t(float(lhs) / float(rhs));
#endif
}
CUTLASS_HOST_DEVICE
half_t& operator+=(half_t & lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
lhs = half_t(__hadd(lhs.to_half(), rhs.to_half()));
#else
lhs = half_t(float(lhs) + float(rhs));
#endif
return lhs;
}
CUTLASS_HOST_DEVICE
half_t& operator-=(half_t & lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
lhs = half_t(__hsub(lhs.to_half(), rhs.to_half()));
#else
lhs = half_t(float(lhs) - float(rhs));
#endif
return lhs;
}
CUTLASS_HOST_DEVICE
half_t& operator*=(half_t & lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
lhs = half_t(__hmul(lhs.to_half(), rhs.to_half()));
#else
lhs = half_t(float(lhs) * float(rhs));
#endif
return lhs;
}
CUTLASS_HOST_DEVICE
half_t& operator/=(half_t & lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
lhs = half_t(__hdiv(lhs.to_half(), rhs.to_half()));
#else
lhs = half_t(float(lhs) / float(rhs));
#endif
return lhs;
}
CUTLASS_HOST_DEVICE
half_t& operator++(half_t & lhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
lhs = half_t(__hadd(lhs.to_half(), half_t(1.0f).to_half()));
#else
float tmp(lhs);
++tmp;
lhs = half_t(tmp);
#endif
return lhs;
}
CUTLASS_HOST_DEVICE
half_t& operator--(half_t & lhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
lhs = half_t(__hsub(lhs.to_half(), half_t(1.0f).to_half()));
#else
float tmp(lhs);
--tmp;
lhs = half_t(tmp);
#endif
return lhs;
}
CUTLASS_HOST_DEVICE
half_t operator++(half_t & lhs, int) {
half_t ret(lhs);
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
lhs = half_t(__hadd(lhs.to_half(), half_t(1.0f).to_half()));
#else
float tmp(lhs);
tmp++;
lhs = half_t(tmp);
#endif
return ret;
}
CUTLASS_HOST_DEVICE
half_t operator--(half_t & lhs, int) {
half_t ret(lhs);
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
lhs = half_t(__hsub(lhs.to_half(), half_t(1.0f).to_half()));
#else
float tmp(lhs);
tmp--;
lhs = half_t(tmp);
#endif
return ret;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// User-defined literals
//
CUTLASS_HOST_DEVICE
cutlass::half_t operator "" _hf(long double x) {
return cutlass::half_t(float(x));
}
CUTLASS_HOST_DEVICE
cutlass::half_t operator "" _hf(unsigned long long int x) {
return cutlass::half_t(int(x));
}
///////////////////////////////////////////////////////////////////////////////////////////////////
|
cutlass/include/cutlass/half.h/0
|
{
"file_path": "cutlass/include/cutlass/half.h",
"repo_id": "cutlass",
"token_count": 9165
}
| 42 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines a Shape template for matrix tiles
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/coord.h"
namespace cutlass {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Describes the size of a matrix tile
template <
int Row_, ///< rows of a matrix
int Column_ ///< columns of a matrix
>
struct MatrixShape {
static int const kRow = Row_; ///< rows of a matrix
static int const kColumn = Column_; ///< columns of a matrix
static int const kCount = Row_ * Column_; ///< total number of elements in a matrix
//
// Static member functions
//
CUTLASS_HOST_DEVICE
static Coord<2> toCoord() {
return make_Coord(kRow, kColumn);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
|
cutlass/include/cutlass/matrix_shape.h/0
|
{
"file_path": "cutlass/include/cutlass/matrix_shape.h",
"repo_id": "cutlass",
"token_count": 721
}
| 43 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Kernel performing a reduction over densely packed tensors in global memory
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/functional.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/layout/matrix.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace reduction {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_, ///< shape of CTA (concept: MatrixShape)
typename OutputOp_ , ///< output operator (concept: epilogue::thread operator)
typename ReductionOp_, ///< reduction operator (concept: ReductionOperator)
int PartitionsPerStage = 4 ///< number of partitions to issue
>
class ReduceSplitK {
public:
using Shape = Shape_;
using ReductionOp = ReductionOp_;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = OutputOp::kCount;
static int const kPartitionsPerStage = PartitionsPerStage;
using ElementWorkspace = typename ReductionOp::Element;
using ElementAccumulator = typename ReductionOp::ElementAccumulator;
using ElementOutput = typename OutputOp::ElementOutput;
using WorkspaceTensorRef = TensorRef<ElementWorkspace, layout::RowMajor>;
using OutputTensorRef = TensorRef<ElementOutput, layout::RowMajor>;
using StrideIndex = typename WorkspaceTensorRef::Layout::Stride::Index;
using FragmentWorkspace = AlignedArray<ElementWorkspace, kElementsPerAccess>;
using FragmentAccumulator = Array<ElementAccumulator, kElementsPerAccess>;
using FragmentOutput = AlignedArray<ElementOutput, kElementsPerAccess>;
//
// Types
//
/// Params structure
struct Params {
MatrixCoord problem_size;
int partitions;
size_t partition_stride;
WorkspaceTensorRef workspace;
OutputTensorRef destination;
OutputTensorRef source;
typename OutputOp::Params output;
typename ReductionOp::Params reduction;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(
MatrixCoord problem_size_,
int partitions_,
size_t partition_stride_,
WorkspaceTensorRef workspace_,
OutputTensorRef destination_,
OutputTensorRef source_,
typename OutputOp::Params output_ = typename OutputOp::Params(),
typename ReductionOp::Params reduction_ = typename ReductionOp::Params()
):
problem_size(problem_size_),
partitions(partitions_),
partition_stride(sizeof(FragmentWorkspace) * partition_stride_ / kElementsPerAccess),
workspace(workspace_),
destination(destination_),
source(source_),
output(output_),
reduction(reduction_) {
}
};
struct SharedStorage { };
public:
/// Computes the grid size given a chosen threadblock shape
CUTLASS_HOST_DEVICE
static dim3 grid_shape(
cutlass::MatrixCoord problem_size) {
return dim3(
(problem_size.row() + Shape::kRow - 1) / Shape::kRow,
(problem_size.column() + Shape::kColumn - 1) / Shape::kColumn);
}
/// Determines the threadblock shape
CUTLASS_HOST_DEVICE
static dim3 block_shape() {
return dim3(Shape::kColumn / kElementsPerAccess, Shape::kRow);
}
/// Perform a reduction
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &storage) {
// Determine CTA position
MatrixCoord thread_offset(
MatrixCoord::Index(int(blockIdx.x) * Shape::kRow + threadIdx.y),
MatrixCoord::Index(int(blockIdx.y) * Shape::kColumn + threadIdx.x * kElementsPerAccess)
);
// One guard conditional
if (!(thread_offset.row() < params.problem_size.row() &&
thread_offset.column() < params.problem_size.column())) {
return;
}
ReductionOp reduction_op(params.reduction);
FragmentAccumulator accumulator;
accumulator.clear();
//
// Load the first slice
//
char const *workspace_ptr =
reinterpret_cast<char const *>(
params.workspace.data() + params.workspace.offset(thread_offset));
FragmentWorkspace workspace_frag[kPartitionsPerStage];
//
// Construct the output operator
//
OutputOp output_op(params.output);
//
// Load and accumulate with a simple batched loading sequence.
//
CUTLASS_PRAGMA_NO_UNROLL
for (int k = 0; k < params.partitions; k += kPartitionsPerStage) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPartitionsPerStage; ++i) {
if (k + i < params.partitions) {
workspace_frag[i] = *reinterpret_cast<FragmentWorkspace const *>(workspace_ptr);
workspace_ptr += params.partition_stride;
}
}
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPartitionsPerStage; ++i) {
if (k + i < params.partitions) {
accumulator = reduction_op(accumulator, workspace_frag[i]);
}
}
}
//
// Conditionally load the source
//
FragmentOutput source_frag;
source_frag.clear();
FragmentOutput const *source_ptr = reinterpret_cast<FragmentOutput const *>(
params.source.data() + params.source.offset(thread_offset));
if (output_op.is_source_needed()) {
reinterpret_cast<FragmentOutput &>(source_frag) = *source_ptr;
}
//
// Compute the output
//
typename OutputOp::FragmentOutput output_frag = output_op(accumulator, source_frag);
//
// Store
//
FragmentOutput *dest_ptr = reinterpret_cast<FragmentOutput *>(
params.destination.data() + params.destination.offset(thread_offset));
*dest_ptr = reinterpret_cast<FragmentOutput const &>(output_frag);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace reduction
} // namespace cutlass
|
cutlass/include/cutlass/reduction/kernel/reduce_split_k.h/0
|
{
"file_path": "cutlass/include/cutlass/reduction/kernel/reduce_split_k.h",
"repo_id": "cutlass",
"token_count": 2623
}
| 44 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cstdint>
#include <string>
#define CUTLASS_MAJOR 3
#define CUTLASS_MINOR 5
#define CUTLASS_PATCH 0
#ifdef CUTLASS_VERSIONS_GENERATED
#include "cutlass/version_extended.h"
#else
#define CUTLASS_BUILD 0
#define CUTLASS_REVISION ""
#endif
#define CUTLASS_VERSION ((CUTLASS_MAJOR)*100 + (CUTLASS_MINOR)*10 + CUTLASS_PATCH)
namespace cutlass {
inline constexpr uint32_t getVersion() {
return CUTLASS_VERSION;
}
inline constexpr uint32_t getVersionMajor() {
return CUTLASS_MAJOR;
}
inline constexpr uint32_t getVersionMinor() {
return CUTLASS_MINOR;
}
inline constexpr uint32_t getVersionPatch() {
return CUTLASS_PATCH;
}
inline constexpr uint32_t getVersionBuild() {
return CUTLASS_BUILD + 0;
}
inline std::string getVersionString() {
std::string version = "@CUTLASS_VERSION@";
if (getVersionBuild()) {
version += "." + std::to_string(getVersionBuild());
}
return version;
}
inline std::string getGitRevision() {
return "@CUTLASS_REVISION@";
}
} // namespace cutlass
|
cutlass/include/cutlass/version.h/0
|
{
"file_path": "cutlass/include/cutlass/version.h",
"repo_id": "cutlass",
"token_count": 888
}
| 45 |
[README](../../README.md#documentation) > **CUTLASS 3.0 Design and Hierarchy**
# CUTLASS 3.0 Design
CUTLASS 3.0 is a major enhancement over the abstractions of CUTLASS 2.x
and aims to make usage of all layers of the GEMM hierarchy easier and more composable
while still achieving peak performance on Hardware.
## CUTLASS 3.0 design goals
CUTLASS 3.0 has the following design goals, in no particular order.
- Simplify expressing and manipulating data and thread layouts across
the GEMM hierarchy with CuTe layouts and layout algebra.
- Improve code readability and learning curve by
reducing the number of named types.
- Functional correctness by default,
actionable static asserts otherwise.
- Single, clear points of performance tuning and custom kernel extensions.
- Support for NVIDIA Hopper GPUs with great performance using
features such as Tensor Cores, tensor memory accelerator, and thread block clusters.
## A new Conceptual GEMM Hierarchy
CUTLASS 2.x decomposes the moving parts of a GEMM operation
across a hierarchy that closely mirrors the organization of GPU
architectures. This discussed in detail within the
[CUTLASS 2.x GEMM API documentation](/media/docs/gemm_api.md).
This design, however, sometimes results in a coupling that is too tight
to extend to newer GPU features that might not fit into the same architectural
hierarchy. For instance, Hopper's warp-group wide instructions do not naturally
fit into any warp or thread layer GEMM concept in CUTLASS 2.x. Even for Volta tensor cores,
instructions that atomically exist at the quad-pair granularity are first tiled at
the warp level before use. This hints at the brittleness of the abstraction power.
CUTLASS 3.0 detaches its interface layers from the hardware,
centering them instead around the natural structure of GEMM algorithms
not tied to any particular GPU generation.
This makes CUTLASS's code more robust to GPU architecture evolution,
less prone to implementation detail leakage, and provides users
with a consistent interface to hardware acceleration regardless of
the architecture specific details.
The new conceptual GEMM hierarchy is discussed in detail in the dedicated
[CUTLASS 3.0 GEMM API documentation readme](/media/docs/gemm_api_3x.md),
along with code examples of the core concepts and types.
## Adoption of CuTe Layout and Tensors
CUTLASS 3.0 introduces a new core library, CuTe, to describe and manipulate tensors of threads and data.
CuTe is a collection of C++ CUDA template abstractions for defining and operating on hierarchically multidimensional layouts of threads and data. CuTe provides `Layout` and `Tensor` objects that compactly packages the type, shape, memory space, and layout of data, while performing the complicated indexing for the user.
CUTLASS 3.0 adopts CuTe throughout the GEMM hierarchy in its templates, greatly simplifying the design,
improving code composability, and readability. More documentation specific to CuTe can be found in its [dedicated documentation directory](/media/docs/cute/00_quickstart.md).

Programming massively parallel systems with various layers of logical thread and data hierarchies is not a trivial task.
- `cute::Layout`s always maintain logical consistency of their coordinates,
allowing us to check pre- and post-conditions at compile time for all static inner loops.
- Explicit thread to data mapping allows users and kernel authors to inspect and reason about operations
from a single point in the source code.
- Layouts provide a single point of performance tuning, as most optimizations can be done by careful
selection of thread and data layouts.
- Formalized algebra makes manipulation of and reasoning about thread->data mapping explicit in source code.
- Single vocabulary type (`cute::Layout`) subsumes every iterator and layout in CUTLASS 2.x CUTLASS 2.x uses many bespoke thread maps, iterators, and data layouts. Iterators are fundamentally 1-D, whereas most layouts we encounter in the GPU hierarchy are fundamentally n-D.
## Reducing the number of named types and iterator concepts
CUTLASS 2.x design preferred introducing bespoke named types for each
architecture specific thread and data layout. For instance, `gemm::treadblock` namespace
contains implementation for `MmaMultistage`, `MmaPlanarComplexMultistage`, `MmaPipelined` etc.
despite them providing mainloops for GEMMs. To spell these types the same way in generic code,
CUTLASS 2.x provides aliases through its `default_x_configuration.h` files, however,
these aliases make the code much harder to read as the user has to perform type substitution
mentally in order to understand the codebase.
CUTLASS 3.0 greatly reduces the number of named types used throughout by
- Replacing all iterator concepts for all memory domains with `cute::Tensor`s
- Dispatching mainloop and epilogue implementations on tag-dispatch policies rather than naming new types
- Dispatching kernel layer schedules on tag-dispatch policies rather than naming new types
Reducing the number of named types has many benefits:
- It *makes writing generic code easier*, as the primary type names share the same lexical
without aliasing through configuration providers.
- It *flattens the learning curve of CUTLASS* by greatly reducing the mental context required
as the library only exposes a handful of named types.
- It *provides a clear, singular extension point* for users to plug in their customizations
through the dispatch policies.
## Correctness by default, Performance through clear, individual points of tuning
CUTLASS 2.x maintained its thread layouts as implicit indexing math implemented
as a part of 1D iterators. This meant that the thread to data layout mapping
was implicit in the imperative structure of the C++ code itself and did not have
a formal algebra we could use to manipulate these mappings. Each iterator
had to re-implement its indexing and mapping logic. This made it hard to learn
how this mapping was performed for existing iterators, and even harder to
implement custom layout functions for the core inner loops of a GEMM.
CUTLASS 3.0 replaces all iterator concepts from CUTLASS 2.x
with a single layout type for thread and data tensors.
CuTe's formalized layout algebra is then used at every layer of
the GEMM hierarchy to manipulate the mapping between the two.
CuTe layouts always maintain logical consistency, and for fully static layouts
(such as in the core unrolled inner loops), provide
compile time checks that break builds if this consistency is violated.
In this way, CuTe reifies the thread-to-data-layout mapping,
makes it easier to write code that is "correct by construction".
If the code compiles, it's probably correct.
|
cutlass/media/docs/cutlass_3x_design.md/0
|
{
"file_path": "cutlass/media/docs/cutlass_3x_design.md",
"repo_id": "cutlass",
"token_count": 1661
}
| 46 |

[README](../../README.md#documentation) > **CUTLASS Utilities**
Note: This document discusses utilities commonly used with code that targets CUTLASS 2.x.
Although CUTLASS 3.0's primary entry point APIs do not transact in these `cutlass::*` tensor types anymore,
users can still find them convenient for managing allocations with trivial affine layouts.
For more advanced host side tensor management, [`cute::Tensor`](/media/docs/cute/03_tensor.md)s
can be used on either host or device for any memory space and full expressive power of
[`cute::Layout`](/media/docs/cute/01_layout.md)s.
# CUTLASS Utilities
CUTLASS utilities are additional template classes that facilitate recurring tasks. These are
flexible implementations of needed functionality, but they are not expected to be efficient.
Applications should configure their builds to list `/tools/util/include` in their include
paths.
Source code is in [`/tools/util/include/cutlass/util/`](/tools/util/include/cutlass/util).
## Tensor Allocation and I/O
To allocate a tensor with storage in both host and device memory, use `HostTensor` in
[`cutlass/util/host_tensor.h`](/tools/util/include/cutlass/util/host_tensor.h)
```c++
template <typename Element, typename Layout>
class HostTensor;
```
This class is compatible with all CUTLASS numeric data types and layouts.
**Example:** column-major matrix storage of single-precision elements.
```c++
#include <cutlass/layout/matrix.h>
#include <cutlass/util/host_tensor.h>
int main() {
int rows = 32;
int columns = 16;
cutlass::HostTensor<float, cutlass::layout::ColumnMajor> tensor({rows, columns});
return 0;
}
```
Internal host-side storage may be accessed via the following methods.
```c++
float *host_ptr = tensor.host_data();
cutlass::TensorRef<float, cutlass::layout::ColumnMajor> host_ref = tensor.host_ref();
cutlass::TensorView<float, cutlass::layout::ColumnMajor> host_view = tensor.host_view();
```
Device memory may be accessed similarly.
```c++
float *device_ptr = tensor.device_data();
cutlass::TensorRef<float, cutlass::layout::ColumnMajor> device_ref = tensor.device_ref();
cutlass::TensorView<float, cutlass::layout::ColumnMajor> device_view = tensor.device_view();
```
Printing to human-readable CSV output is accoplished with `std::ostream::operator<<()` defined in
[`cutlass/util/tensor_view_io.h`](/tools/util/include/cutlass/util/tensor_view_io.h).
Note, this assumes all views refer to host memory.
```c++
#include <cutlass/util/tensor_view_io.h>
int main() {
// Obtain a TensorView into host memory
cutlass::TensorView<float, cutlass::layout::ColumnMajor> view = tensor.host_view();
// Print to std::cout
std::cout << view << std::endl;
return 0;
}
```
Host and device memory must be explicitly synchronized by the application.
```c++
float idx = 0;
for (int i = 0; i < rows; ++i) {
for (int j = 0; j < columns; ++j) {
// Write the element at location {i, j} in host memory
tensor.host_ref().at({i, j}) = idx;
idx += 0.5f;
}
}
// Copy host memory to device memory
tensor.sync_device();
// Obtain a device pointer usable in CUDA kernels
float *device_ptr = tensor.device_data();
```
`HostTensor<>` is usable by all CUTLASS layouts including interleaved layouts.
```c++
int rows = 4;
int columns = 3;
cutlass::HostTensor<float, cutlass::layout::ColumnMajorInterleaved<4>> tensor({rows, columns});
for (int i = 0; i < rows; ++i) {
for (int j = 0; j < columns; ++j) {
// Write the element at location {i, j} in host memory
tensor.host_ref().at({i, j}) = float(i) * 1.5f - float(j) * 2.25f;
}
}
std::cout << tensor.host_view() << std::endl;
```
## Device Allocations
To strictly allocate memory on the device using the smart pointer pattern to manage allocation and deallocation,
use `cutlass::DeviceAllocation<>`.
**Example:** allocating an array in device memory.
```c++
#include <cutlass/layout/matrix.h>
#include <cutlass/layout/tensor_view.h>
#include <cutlass/util/device_memory.h>
__global__ void kernel(float *device_ptr) {
}
int main() {
size_t N = 1024;
cutlass::DeviceAllocation<float> device_alloc(N);
// Call a CUDA kernel passing device memory as a pointer argument
kernel<<< grid, block >>>(alloc.get());
if (cudaGetLastError() != cudaSuccess) {
return -1;
}
// Device memory is automatically freed when device_alloc goes out of scope
return 0;
}
```
## Tensor Initialization
CUTLASS defines several utility functions to initialize tensors to uniform, procedural,
or randomly generated elements. These have implementations using strictly host code and
implementations using strictly CUDA device code.
`TensorFill()` for uniform elements throughout a tensor.
```c++
#include <cutlass/layout/matrix.h>
#include <cutlass/util/reference/host/tensor_fill.h>
#include <cutlass/util/reference/device/tensor_fill.h>
#include <cutlass/util/host_tensor.h>
int main() {
int rows = 128;
int columns = 64;
float x = 3.14159f;
cutlass::HostTensor<float, cutlass::layout::ColumnMajor> tensor({rows, columns});
// Initialize in host memory
cutlass::reference::host::TensorFill(tensor.host_view(), x);
// Initialize in device memory
cutlass::reference::device::TensorFill(tensor.device_view(), x);
return 0;
}
```
`TensorFillRandomUniform()` for initializing elements to a random uniform distribution.
The device-side implementation uses CURAND to generate random numbers.
```c++
#include <cutlass/layout/matrix.h>
#include <cutlass/util/reference/host/tensor_fill.h>
#include <cutlass/util/reference/device/tensor_fill.h>
#include <cutlass/util/host_tensor.h>
int main() {
int rows = 128;
int columns = 64;
double maximum = 4;
double minimum = -4;
uint64_t seed = 0x2019;
cutlass::HostTensor<float, cutlass::layout::ColumnMajor> tensor({rows, columns});
// Initialize in host memory
cutlass::reference::host::TensorFillRandomUniform(
tensor.host_view(),
seed,
maximum,
minimum);
// Initialize in device memory
cutlass::reference::device::TensorFillRandomUniform(
tensor.device_view(),
seed,
maximum,
minimum);
return 0;
}
```
`TensorFillRandomGaussian()` for initializing elements to a random gaussian distribution.
The device-side implementation uses CURAND to generate random numbers.
```c++
#include <cutlass/layout/matrix.h>
#include <cutlass/util/reference/host/tensor_fill.h>
#include <cutlass/util/reference/device/tensor_fill.h>
#include <cutlass/util/host_tensor.h>
int main() {
int rows = 128;
int columns = 64;
double mean = 0.5;
double stddev = 2.0;
uint64_t seed = 0x2019;
cutlass::HostTensor<float, cutlass::layout::ColumnMajor> tensor({rows, columns});
// Initialize in host memory
cutlass::reference::host::TensorFillRandomGaussian(
tensor.host_view(),
seed,
mean,
stddev);
// Initialize in device memory
cutlass::reference::device::TensorFillRandomGaussian(
tensor.device_view(),
seed,
mean,
stddev);
return 0;
}
```
Each of these functions accepts an additional argument to specify how many bits of
the mantissa less than 1 are non-zero. This simplifies functional comparisons when
exact random distributions are not necessary, since elements may be restricted to
integers or values with exact fixed-point representations.
```c++
#include <cutlass/layout/matrix.h>
#include <cutlass/util/reference/host/tensor_fill.h>
#include <cutlass/util/reference/device/tensor_fill.h>
#include <cutlass/util/host_tensor.h>
int main() {
int rows = 128;
int columns = 64;
double mean = 0.5;
double stddev = 2.0;
uint64_t seed = 0x2019;
int bits_right_of_binary_decimal = 2;
cutlass::HostTensor<float, cutlass::layout::ColumnMajor> tensor({rows, columns});
// Initialize in host memory
cutlass::reference::host::TensorFillRandomGaussian(
tensor.host_view(),
seed,
mean,
stddev,
bits_right_of_binary_decimal);
// Initialize in device memory
cutlass::reference::device::TensorFillRandomGaussian(
tensor.device_view(),
seed,
mean,
stddev,
bits_right_of_binary_decimal);
return 0;
}
```
These utilities may be used for all data types.
**Example:** random half-precision tensor with Gaussian distribution.
```c++
#include <cutlass/numeric_types.h>
#include <cutlass/layout/matrix.h>
#include <cutlass/util/reference/host/tensor_fill.h>
#include <cutlass/util/reference/device/tensor_fill.h>
#include <cutlass/util/host_tensor.h>
int main() {
int rows = 128;
int columns = 64;
double mean = 0.5;
double stddev = 2.0;
uint64_t seed = 0x2019;
// Allocate a column-major tensor with half-precision elements
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> tensor({rows, columns});
// Initialize in host memory
cutlass::reference::host::TensorFillRandomGaussian(
tensor.host_view(),
seed,
mean,
stddev);
// Initialize in device memory
cutlass::reference::device::TensorFillRandomGaussian(
tensor.device_view(),
seed,
mean,
stddev);
return 0;
}
```
## Reference Implementations
CUTLASS defines reference implementations usable with all data types and layouts. These are
used throughout the unit tests.
**Example:** Reference GEMM implementation with mixed precision internal computation.
```c++
#include <cutlass/numeric_types.h>
#include <cutlass/layout/matrix.h>
#include <cutlass/util/host_tensor.h>
#include <cutlass/util/reference/host/gemm.h>
int main() {
int M = 64;
int N = 32;
int K = 16;
float alpha = 1.5f;
float beta = -1.25f;
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> A({M, K});
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> B({K, N});
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> C({M, N});
cutlass::reference::host::Gemm<
cutlass::half_t, cutlass::layout::ColumnMajor, // ElementA and LayoutA
cutlass::half_t, cutlass::layout::ColumnMajor, // ElementB and LayoutB
cutlass::half_t, cutlass::layout::ColumnMajor, // ElementC and LayoutC
float, // scalar type (alpha and beta)
float> gemm_op; // internal accumulation type
gemm_op(
{M, N, K}, // problem size
alpha, // alpha scalar
A.host_view(), // TensorView to host memory
B.host_view(), // TensorView to host memory
beta, // beta scalar
C.host_view(), // TensorView to host memory
D.host_view()); // TensorView to device memory
return 0;
}
```
# Copyright
Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
```
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
```
|
cutlass/media/docs/utilities.md/0
|
{
"file_path": "cutlass/media/docs/utilities.md",
"repo_id": "cutlass",
"token_count": 4175
}
| 47 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import subprocess
from cutlass_library import DataTypeTag
import pydot
from cutlass.backend.evt.ir.dag_ir import DAGIR
_COLOR_MAP = {
"load": '"AliceBlue"',
"compute": "LemonChiffon1",
"accumulator": "LightGrey",
"store": "PowderBlue",
"layout": "lightseagreen",
"dag": "darkorange"
}
class EVTGraphDrawer:
"""
Visualize a EVT DAGIR with graphviz
"""
def __init__(
self,
graph: DAGIR,
name: str
):
self._name = name
self._dot_graphs = {}
self._dot_graphs[name] = self._to_dot(graph, name)
def _get_node_style(self, node):
template = {
"shape": "record",
"fillcolor": "#CAFFE3",
"style": '"filled,rounded"',
"fontcolor": "#000000",
}
if node.op in _COLOR_MAP:
template["fillcolor"] = _COLOR_MAP[node.op]
else:
raise NotImplementedError("unknown node op")
if node.disabled:
template["fontcolor"] = "grey"
template["fillcolor"] = "white"
return template
def _get_node_label(self, node):
label = "{" + f"name={node.name}|op={node.op}"
if node.op == "layout":
label += f"|fn={node.fn.__name__}"
for key in node.kwargs:
label += f"|{key}={node.kwargs[key]}"
if node.underlying_impl is not None:
label += f"|impl={type(node.underlying_impl).__name__}"
if node.op == "load":
label += f"|element_output={DataTypeTag[node.underlying_impl.element]}"
elif node.op == "compute":
label += f"|element_compute={DataTypeTag[node.underlying_impl.element_compute]}|element_output={DataTypeTag[node.underlying_impl.element_output]}"
elif node.op == "store":
label += f"|element_store={DataTypeTag[node.underlying_impl.element]}|element_output={DataTypeTag[node.underlying_impl.element_output]}"
elif node.op == "dag":
label += f"|element_output={DataTypeTag[node.underlying_impl.element_output]}"
if node.tensor is not None:
shape = node.tensor.shape
stride = node.tensor.stride
label += f"|shape={shape}|stride={stride}"
if hasattr(node, "store_tensor"):
if node.store_tensor is not None:
store_shape = node.store_tensor.shape
store_stride = node.store_tensor.stride
label += f"|store_shape={store_shape}|stride_stride={store_stride}"
label += "}"
return label
def _to_dot(
self,
graph: DAGIR,
name: str
):
dot_graph = pydot.Dot(name, randir="TB")
for node in graph.nodes_meta:
style = self._get_node_style(node)
label = self._get_node_label(node)
dot_node = pydot.Node(
node.name, label=label, **style
)
dot_graph.add_node(dot_node)
if node.op == "dag":
dot_subgraph = self._to_dot(node.subgraph, name=node.name)
self._dot_graphs[node.name] = dot_subgraph
# Add edges
for src, dst in graph.edges:
weight = graph.get_edge_weight(src, dst)
dot_graph.add_edge(pydot.Edge(src, dst, label=weight))
return dot_graph
def get_dot_graph(self) -> pydot.Dot:
return [(key, self.get_dot_graph_by_name(key)) for key in self._dot_graphs.keys()]
def get_dot_graph_by_name(self, name) -> pydot.Dot:
return self._dot_graphs[name]
def get_main_dot_graph(self) -> pydot.Dot:
return self._dot_graphs[self._name]
|
cutlass/python/cutlass/backend/evt/passes/graph_drawer.py/0
|
{
"file_path": "cutlass/python/cutlass/backend/evt/passes/graph_drawer.py",
"repo_id": "cutlass",
"token_count": 2285
}
| 48 |
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import ctypes
from cuda import __version__, cuda
from cutlass.backend.utils.device import device_cc
_version_splits = [int(x) for x in __version__.split("rc")[0].split(".")]
_supports_cluster_launch = None
def supports_cluster_launch():
global _supports_cluster_launch
if _supports_cluster_launch is None:
major, minor = _version_splits[0], _version_splits[1]
_supports_cluster_launch = device_cc() >= 90 and (major > 11 or (major == 11 and minor >= 8))
return _supports_cluster_launch
class LaunchConfiguration:
def __init__(self, grid=[1, 1, 1], block=[1, 1, 1], smem=0):
self.grid = grid
self.block = block
self.shared_memory_capacity = smem
class ExecutableOperation:
def __init__(self, operation):
self.operation = operation
self.module = None
self.kernel = None
def name(self):
return self.operation.procedural_name()
def emit(self):
return ""
def can_implement(self, configuration, arguments):
raise NotImplementedError()
def get_host_workspace_size(self, arguments):
raise NotImplementedError()
def get_device_workspace_size(self, arguments):
raise NotImplementedError()
def plan(self, arguments):
raise NotImplementedError()
def initialize(self, host_workspace, device_workspace, launch_config, arguments, stream=cuda.CUstream(0)):
raise NotImplementedError()
def run_with_clusters(self, launch_config, kernel_params, stream=cuda.CUstream(0)):
if hasattr(self.operation, "tile_description") and hasattr(self.operation.tile_description, "cluster_shape"):
attr = cuda.CUlaunchAttribute()
attr.value.clusterDim.x, attr.value.clusterDim.y, attr.value.clusterDim.z = self.operation.tile_description.cluster_shape
attr.id = cuda.CUstreamAttrID.CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION
attrs = [attr]
# Allow for non-portable cluster sizes
err, = cuda.cuFuncSetAttribute(
self.kernel, cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_NON_PORTABLE_CLUSTER_SIZE_ALLOWED, 1)
if err != cuda.CUresult.CUDA_SUCCESS:
return err
else:
attrs = []
config = cuda.CUlaunchConfig()
config.gridDimX, config.gridDimY, config.gridDimZ = launch_config.grid
config.blockDimX, config.blockDimY, config.blockDimZ = launch_config.block
config.blockDimZ = launch_config.block[2]
config.sharedMemBytes = launch_config.shared_memory_capacity
config.hStream = stream
config.attrs = attrs
config.numAttrs = len(attrs)
err, = cuda.cuLaunchKernelEx(
config, f=self.kernel, kernelParams=kernel_params, extra=0)
return err
def run_without_clusters(self, launch_config, kernel_params, stream=cuda.CUstream(0)):
err, = cuda.cuLaunchKernel(
self.kernel,
launch_config.grid[0], launch_config.grid[1], launch_config.grid[2],
launch_config.block[0], launch_config.block[1], launch_config.block[2],
launch_config.shared_memory_capacity,
stream,
kernel_params,
0)
return err
def run(self, host_workspace, device_workspace, launch_config, stream=cuda.CUstream(0)):
cArg = (ctypes.c_char * len(host_workspace)).from_buffer(host_workspace)
packed = (ctypes.c_void_p * 1)()
packed[0] = ctypes.addressof(cArg)
if supports_cluster_launch():
return self.run_with_clusters(launch_config, packed, stream)
else:
return self.run_without_clusters(launch_config, packed, stream)
|
cutlass/python/cutlass/backend/operation.py/0
|
{
"file_path": "cutlass/python/cutlass/backend/operation.py",
"repo_id": "cutlass",
"token_count": 2048
}
| 49 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Base operation used for defining high-level CUTLASS operations (e.g., GEMM, Conv2d)
"""
from bisect import bisect_left
from cutlass_library import (
DataType,
DataTypeSize,
MathOperation,
OperationKind,
SharedMemPerCC
)
import cutlass
from cutlass import get_option_registry
from cutlass.backend.evt import EpilogueFunctorVisitor
from cutlass.backend.utils.device import device_cc
from cutlass.epilogue import get_activations, get_activation_epilogue, identity
from cutlass.library_defaults import KernelsForDataType, _generator_ccs
from cutlass.swizzle import get_swizzling_functors
from cutlass.utils import datatypes, check
class OperationBase:
"""
Base operation used for defining high-level CUTLASS operations (e.g., GEMM, Conv2d)
"""
def __init__(self, cc: int = None, kernel_cc: int = None, operation_kind = OperationKind.Gemm):
"""
:param cc: compute capability of device for which kernels should be compiled. For example, if running on H100, this should be set to 90
:type cc: int
:param kernel_cc: compute capability of kernels to generate. For example, if running on SM90, but desiring to use a CUTLASS 2.x-style Ampere kernel, this should be set to 80
:type kernel_cc: int
:param operation_kind: class of operation that will be performed (e.g., GEMM, Conv)
:type operation_kind: cutlass_library.OperationKind
"""
self.operation_kind = operation_kind
self.cc = cc if cc is not None else device_cc()
self.specified_kernel_cc = kernel_cc is not None
self.current_cc = kernel_cc if kernel_cc is not None else self._find_closest_cc(self.cc)
self.tile_description = None
self._math_operation = None
self.options = get_option_registry().options_for_cc(self.current_cc, operation_kind)
if self.options is None:
raise Exception(f"Invalid or unsupported compute capability: {self.current_cc}")
# Default activation function: identity
self._activation = identity
def _find_closest_cc(self, cc: int) -> int:
"""
Returns the closest CC in _generator_ccs less than or equal to `cc`
:param cc: compute capability to query
:type cc: int
:returns: closest CC in _generator_ccs less than or equal to `cc`
:rtype: int
"""
if cc in _generator_ccs:
return cc
# Find closest CC lower than this CC
idx = bisect_left(_generator_ccs, cc)
if idx == 0:
raise Exception(f'No valid CC to fall back to for {cc}')
return _generator_ccs[idx-1]
def activations(self) -> list:
"""
Returns possible activation functions that can be used
:return: list of activation functions that can be used
:rtype: list
"""
return get_activations()
def swizzling_functors(self) -> list:
"""
Returns possible swizzling functions that can be used
:return: list of swizzling functions that can be used
:rtype: list
"""
return get_swizzling_functors()
def _reset_options(self, cc: int):
"""
Resets the kernel options based on cc
:param cc: compute capability to reset to
:type cc: int
"""
if cc != self.current_cc:
if cc not in _generator_ccs:
raise Exception(f'Invalid CC for CUTLASS kernels: {cc}.')
self.current_cc = cc
self.options = get_option_registry().options_for_cc(self.current_cc, self.operation_kind)
def _verify_scalar(self, scalar, ref_scalar, ref_dtype, name):
"""
Verifies the following properties:
1) Either ``scalar`` or ``ref_scakar`` must be set (i.e., not ``None``)
2) If ``scalar`` is not ``None``, its datatype must match matches the current version
set by the plan (i.e., those in ``ref_dtype``)
If either of these properties does not hold, an exception is raised. If these properties hold and
``scalar`` is not ``None``, ``scalar`` is returned. Otherwise, ``ref_scalar`` is returned.
:param scalar: object representing a tensor passed in to verify, or ``None`` if no tensor was passed in
:type scalar: numpy/cupy/torch scalar
:param ref_scalar: object representing a tensor passed in on construction of this object, or ``None`` if no tensor was passed in
:type ref_scalar: numpy/cupy/torch scalar
:param ref_dtype: data type for the scalar that this object was initialized to
:param name: identifier of the scalar to verify. Used in raising exceptions
:type name: str
:return: valid scalar to use
:rtype: numpy/cupy/torch scalar
"""
if scalar is None:
if ref_scalar is None:
raise Exception(f"Scalar {name} must be set.")
return ref_scalar
if hasattr(scalar, "dtype"):
dtype = datatypes.library_type(scalar.dtype)
if dtype != ref_dtype:
raise Exception(
f"Tensor {name} with type {dtype} does not match expected type {ref_dtype}."
)
return scalar
def _verify_tensor(self, tensor, ref_tensor, ref_dtype, ref_layout, name):
"""
Verifies the following properties:
If ref_dtype is not void:
1) Either ``tensor`` or ``ref_tensor`` must be set (i.e., not ``None``)
2) If ``tensor`` is not ``None``, its datatype and layout must match matches the current versions
set by the plan (i.e., those in ``ref_dtype`` and ``ref_layout``)
If ref_dtype is void:
Neither ``tensor`` nor ``ref_tensor`` are set
If either of these properties does not hold, an exception is raised. If these properties hold and
``tensor`` is not ``None``, ``tensor`` is returned. Otherwise, ``ref_tensor`` is returned.
:param tensor: object representing a tensor passed in to verify, or ``None`` if no tensor was passed in
:type tensor: numpy/cupy/torch array/tensor object
:param ref_tensor: object representing a tensor passed in on construction of this object, or ``None`` if no tensor was passed in
:type ref_tensor: numpy/cupy/torch array/tensor object
:param ref_dtype: data type for the tensor that this object was initialized to
:param ref_layout: layout for the tensor that this object was initialized to
:param name: identifier of the tensor to verify. Used in raising exceptions
:type name: str
:return: valid tensor object to use
:rtype: numpy/cupy/torch array/tensor object
"""
if ref_dtype == DataType.void:
if tensor is not None or ref_tensor is not None:
raise Exception("Operands with element DataType.void must not be provided a tensor")
return None
if tensor is None:
if ref_tensor is None:
raise Exception(f"Tensor {name} must be set.")
return ref_tensor
self._verify_type_and_layout(tensor, ref_dtype, ref_layout, name)
return tensor
@property
def opclass(self) -> cutlass.OpcodeClass:
"""
Returns the opcode class currently in use
:return: opcode class currently in use
:rtype: cutlass.OpcodeClass
"""
return self.op_class
@opclass.setter
def opclass(self, oc: cutlass.OpcodeClass):
if isinstance(oc, str):
oc = datatypes.getattr_enum(cutlass.OpcodeClass, oc)
if oc in self.possible_op_classes:
self.op_class = oc
else:
raise Exception(
f'Unsupported operation class {oc} for CC {self.cc} and data type combination '
f'({self._element_a}, {self._element_b}, {self._element_accumulator}) and '
f'layout combination ({self._layout_a}, {self._layout_b}).')
# Changing the op class also changes the possible operations available. Reset these.
self.possible_operations = self.options.operations(
self.op_class, self._element_a, self._element_b,
self._element_accumulator, self._layout_a, self._layout_b, self._math_operation)
# Changing the op class changes the elements per access in the epilogue. Reset this.
if self.epilogue_functor is not None:
self.epilogue_functor = self._reset_epilogue_functor_alignment(self._elements_per_access(), self.epilogue_functor)
@property
def math_operation(self) -> cutlass.MathOperation:
"""
Returns the math operation currently in use
:return: math operation currently in use
:rtype: cutlass.MathOperation
"""
return self._math_operation
@math_operation.setter
def math_operation(self, mo: cutlass.MathOperation):
if isinstance(mo, str):
mo = datatypes.getattr_enum(cutlass.MathOperation, mo)
if not self.specified_kernel_cc:
if self.current_cc == 90:
# CUTLASS 3.0 kernels do not use different math operations. If one is specified, we
# revert to using a CUTLASS 2.x kernel by using SM80-tagged kernels.
cutlass.logger.warning("Reverting to using SM80-tagged kernel. Opclass may change.")
self._reset_options(80)
self._reset_operations(reset_epilogue=False)
elif self.current_cc == 90:
raise Exception("CUTLASS 3.0 kernels do not use different math operations. "
"To use 2.x kernels with a specific math operation, do not set the `kernel_cc`"
"parameter when constructing the plan.")
self._math_operation = mo
self._reset_operations()
def _elements_per_access(self):
if self.op_class == cutlass.OpcodeClass.Simt:
return 1
elif self._element_c != DataType.void:
return 128 // DataTypeSize[self._element_c]
else:
return 128 // max(self.possible_operations.alignments("C"))
def _create_epilogue_functor_activation(self, activation):
"""
Returns the epilogue functor with given activation function
"""
if self.epilogue_functor is None:
elements_per_access = self._elements_per_access()
else:
elements_per_access = self.epilogue_functor.epilogue_vector_length
if not self.specified_kernel_cc:
if self.current_cc == 90 and activation != identity:
# CUTLASS 3.0 kernels in Python currently only support identity activation. If one requests a non-identity activation,
# revert to using a CUTLASS 2.x kernel by using SM80-tagged kernels.
cutlass.logger.warning("Reverting to using SM80-tagged kernel. Opclass may change.")
if self._element_c != self._element_d:
raise Exception("CUTLASS 2.x kernels require element C to be the same as element D")
self._reset_options(80)
self._reset_operations(reset_epilogue=False)
elif (self.cc == 90 and self.current_cc != 90 and activation == identity and self._math_operation is None):
# SM80 fallback kernels are currently used. Since an identity activation is requested,
# we can switch back to using SM90 kernels.
self._reset_options(90)
self._reset_operations(reset_epilogue=False)
else:
if self.current_cc == 90 and activation != identity:
raise Exception("Epilogues with elementwise fusion are not currently supported "
"in the Python interface for 3.x kernels. To use 2.x kernels "
"with fused elementwise epilogues, do not set the `kernel_cc` "
"parameter when constructing the plan.")
return get_activation_epilogue(
activation,
self._element_d,
elements_per_access,
self._element_accumulator,
self._element_accumulator,
)
def _reset_epilogue_functor_activation(self, activation):
"""
Set the epilogue functor based on the provided activation function
"""
self.epilogue_functor = self._create_epilogue_functor_activation(activation)
def _reset_epilogue_functor_alignment(self, alignment, epilogue_functor):
"""
Reset the alignment of the current epilogue functor based on alignment C
"""
if isinstance(epilogue_functor, EpilogueFunctorVisitor):
return epilogue_functor
if epilogue_functor is None or not hasattr(epilogue_functor, 'activation_functor'):
# Identity epilogue does not have 'activation_functor'
activation = identity
else:
activation = epilogue_functor.activation_functor
epilogue_functor = get_activation_epilogue(
activation,
self._element_d,
alignment,
self._element_accumulator,
self._element_accumulator,
)
return epilogue_functor
@property
def activation(self):
"""
Returns the type of the current activation function used
"""
if hasattr(self.epilogue_functor, "activation_functor"):
return self.epilogue_functor.activation_functor
else:
return identity
@activation.setter
def activation(self, act):
"""
Sets the type of the activation function to use
Activation can come with a set of arguments
:param act: type of activation function to use
:type act: str or tuple. e.g. "relu", ("leaky_relu", 0.01)
"""
if isinstance(act, tuple):
if isinstance(act[0], str):
act_fn = getattr(cutlass.backend.epilogue, act[0])
else:
act_fn = act[0]
self._reset_epilogue_functor_activation(act_fn)
self._activation_args = act[1]
self._activation = act[0]
else:
if isinstance(act, str):
act = getattr(cutlass.backend.epilogue, act)
self._reset_epilogue_functor_activation(act)
self._activation = act
@property
def epilogue_visitor(self):
"""
Return the epilogue functor
"""
return self.epilogue_functor
@epilogue_visitor.setter
def epilogue_visitor(self, visitor):
"""
Create the epilogue visitor
"""
self.epilogue_functor = EpilogueFunctorVisitor(self.cc, visitor)
# The epilogue_functor may consume too much shared memory
# Reset the possible operations
if self.cc != 90:
# The shared memory is only a concern for sm90 epilogue
# In sm80, the epilogue and mainloop share the shared memory
return
datatype_comb = self.possible_operations.datatype_comb
layout_comb = self.possible_operations.layout_comb
new_possible_operations = KernelsForDataType(datatype_comb, layout_comb)
for operation in self.possible_operations.all_operations:
td = datatypes.td_from_profiler_op(operation)
# Filter invalid epilogue schedules
if td.epilogue_schedule not in [
cutlass.EpilogueScheduleType.TmaWarpSpecialized,
cutlass.EpilogueScheduleType.TmaWarpSpecializedCooperative]:
continue
epilogue_smem_bytes = self.epilogue_functor.get_smem_size(td)
# Verify the maximum number of mainloop stages
mainloop_smem_per_stage = check.calculate_smem_usage_per_stage(td, OperationKind.Gemm)
smem_capacity_bytes = SharedMemPerCC[self.cc] << 10
mainloop_stages = (smem_capacity_bytes - epilogue_smem_bytes) // mainloop_smem_per_stage
if mainloop_stages < 2:
# Mainloop stages must >= 2
continue
new_possible_operations.add(operation)
if len(new_possible_operations.all_operations) == 0:
raise RuntimeError(
"The epilogue consumes too much shared memory. "
"No valid tile description is found in the generator.")
self.possible_operations = new_possible_operations
def run_setup(self):
"""
Steps that must be taken before caling `plan.run()`
"""
# Initialize the memory pool if, if not already done
cutlass.get_memory_pool()
|
cutlass/python/cutlass/op/op.py/0
|
{
"file_path": "cutlass/python/cutlass/op/op.py",
"repo_id": "cutlass",
"token_count": 7578
}
| 50 |
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Utilities for emitting RankK kernels
"""
import enum
import functools
import operator
import os.path
import shutil
try:
import builtins
if hasattr(builtins, "CUTLASS_IGNORE_PACKAGE") and CUTLASS_IGNORE_PACKAGE == True:
raise ImportError("Disabling attempt to import cutlass_library")
from cutlass_library.library import *
except ImportError:
from library import *
###################################################################################################
#
# Data structure modeling a Rank K update operation
#
###################################################################################################
#
class RankKOperation:
#
def __init__(self, rank_k_kind, arch, tile_description, A, C, element_epilogue, \
epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity8, \
blas_mode = BlasMode.symmetric):
self.blas_mode = blas_mode
self.operation_kind = OperationKind.RankK
self.arch = arch
self.tile_description = tile_description
self.rank_k_kind = rank_k_kind
self.A = A
self.C = C
self.element_epilogue = element_epilogue
self.epilogue_functor = epilogue_functor
self.swizzling_functor = swizzling_functor
#
def is_complex(self):
complex_operators = [
MathOperation.multiply_add_complex,
MathOperation.multiply_add_complex_gaussian,
MathOperation.multiply_add_complex_fast_f32
]
return self.tile_description.math_instruction.math_operation in complex_operators
return False
#
def is_mixed_input(self):
return False
#
def is_planar_complex(self):
return False
#
def accumulator_type(self):
accum = self.tile_description.math_instruction.element_accumulator
if self.is_complex():
return get_complex_from_real(accum)
return accum
#
def short_math_name(self):
if self.tile_description.math_instruction.math_operation == MathOperation.multiply_add_complex_gaussian:
return "g%s" % ShortDataTypeNames[self.accumulator_type()]
return ShortDataTypeNames[self.accumulator_type()]
#
def core_name(self):
''' The basic operation kind is prefixed with a letter indicating the accumulation type. '''
inst_shape = ''
inst_operation = ''
intermediate_type = ''
math_operations_map = {
MathOperation.xor_popc: 'xor',
MathOperation.and_popc: 'and'
}
if self.tile_description.math_instruction.opcode_class == OpcodeClass.TensorOp or \
self.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp:
math_op = self.tile_description.math_instruction.math_operation
math_op_string = math_operations_map[math_op] if math_op in math_operations_map.keys() else ''
inst_shape = "%d%d%d" % tuple(self.tile_description.math_instruction.instruction_shape)
inst_shape += math_op_string
if self.tile_description.math_instruction.element_a != self.A.element and \
self.tile_description.math_instruction.element_a != self.tile_description.math_instruction.element_accumulator:
intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a]
operation_name = 'syrk' if self.blas_mode == BlasMode.symmetric else 'herk'
return "%s%s%s%s" % (self.short_math_name(), inst_shape, intermediate_type, operation_name)
#
def extended_name(self):
''' Append data types if they differ from compute type. '''
if self.is_complex():
extended_name = "${core_name}"
else:
if self.C.element != self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${element_c}_${core_name}_${element_a}"
elif self.C.element == self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${core_name}_${element_a}"
else:
extended_name = "${core_name}"
extended_name = SubstituteTemplate(extended_name, {
'element_a': DataTypeNames[self.A.element],
'element_c': DataTypeNames[self.C.element],
'core_name': self.core_name()
})
return extended_name
#
def layout_name(self):
if self.is_complex() or self.is_planar_complex():
return "%s" % (
ShortComplexLayoutNames[(self.A.layout, self.A.complex_transform)]
)
return "%s" % (ShortLayoutTypeNames[self.A.layout])
#
def fill_mode_name(self):
return "%s" % (ShortFillModeNames[self.C.fill_mode])
#
def procedural_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
threadblock = self.tile_description.procedural_name()
opcode_class_name = OpcodeClassNames[self.tile_description.math_instruction.opcode_class]
alignment = max([self.A.alignment, self.C.alignment])
return SubstituteTemplate(
"cutlass_${opcode_class}_${extended_name}_${threadblock}_${layout}_${fill_mode}_align${alignment}",
{
'opcode_class': opcode_class_name,
'extended_name': self.extended_name(),
'threadblock': threadblock,
'layout': self.layout_name(),
'fill_mode': self.fill_mode_name(),
'alignment': "%d" % self.A.alignment,
}
)
#
def configuration_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
return self.procedural_name()
###################################################################################################
#
# Emits single instances of a CUTLASS device-wide operator
#
###################################################################################################
#
class EmitRankKUniversalInstance:
''' Responsible for emitting a CUTLASS template definition'''
def __init__(self):
self.rank_k_template = """
// Rank K operator ${operation_name}
using Operation_${operation_name} =
typename cutlass::gemm::device::RankK<
${element_a}, ${layout_a},
${element_c}, ${layout_c}, ${fill_mode},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue}
>,
${swizzling_functor},
${stages},
${align_a},
${split_k_serial},
${math_operation}
>;
"""
self.rank_k_complex_template = """
// Rank K operator ${operation_name}
using Operation_${operation_name} =
typename cutlass::gemm::device::RankK<
${element_a}, ${layout_a},
${element_c}, ${layout_c}, ${fill_mode},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue}
>,
${swizzling_functor},
${stages},
${align_a},
${split_k_serial},
${math_operation},
${transform_a},
${blas_mode}
>;
"""
def emit(self, operation):
threadblock_shape = operation.tile_description.threadblock_shape
warp_count = operation.tile_description.warp_count
warp_shape = [threadblock_shape[idx] // warp_count[idx] for idx in range(3)]
epilogue_vector_length = int(min(operation.C.alignment * DataTypeSize[operation.C.element], 128) / DataTypeSize[operation.C.element])
values = {
'operation_name': operation.procedural_name(),
'element_a': DataTypeTag[operation.A.element],
'layout_a': LayoutTag[operation.A.layout],
'element_c': DataTypeTag[operation.C.element],
'layout_c': LayoutTag[operation.C.layout],
'fill_mode': FillModeTag[operation.C.fill_mode],
'element_accumulator': DataTypeTag[operation.accumulator_type()],
'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class],
'arch': "cutlass::arch::Sm%d" % operation.arch,
'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]),
'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]),
'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]),
'warp_shape_m': str(warp_shape[0]),
'warp_shape_n': str(warp_shape[1]),
'warp_shape_k': str(warp_shape[2]),
'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]),
'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]),
'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]),
'epilogue_vector_length': str(epilogue_vector_length),
'element_epilogue': str(DataTypeTag[operation.element_epilogue]),
'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor],
'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor],
'stages': str(operation.tile_description.stages),
'align_a': str(operation.A.alignment),
'split_k_serial': 'false',
'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation],
'transform_a': ComplexTransformTag[operation.A.complex_transform],
'blas_mode': BlasModeTag[operation.blas_mode]
}
rank_k_template = self.rank_k_complex_template if operation.is_complex() else self.rank_k_template
return SubstituteTemplate(rank_k_template, values)
###################################################################################################
###################################################################################################
#
# Emitters functions for all targets
#
###################################################################################################
class EmitRankKConfigurationLibrary:
def __init__(self, operation_path, configuration_name):
self.configuration_name = configuration_name
self.configuration_path = os.path.join(operation_path, "%s.cu" % configuration_name).replace('\\', '/')
self.instance_emitter = {
RankKKind.Universal: EmitRankKUniversalInstance,
}
self.rank_k_kind_wrappers = {
RankKKind.Universal: 'RankKOperation',
}
self.instance_template = {
RankKKind.Universal: """
${compile_guard_start}
manifest.append(new ${rank_k_kind}<
Operation_${operation_name}
>("${operation_name}"));
${compile_guard_end}
"""
}
self.header_template = """
/*
Generated by rank_k_operation.py - Do not edit.
*/
///////////////////////////////////////////////////////////////////////////////////////////////////
#include "cutlass/cutlass.h"
#include "cutlass/library/library.h"
#include "cutlass/library/manifest.h"
#include "library_internal.h"
#include "rank_k_operation.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
self.initialize_function_template = """
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
void initialize_${configuration_name}(Manifest &manifest) {
"""
self.epilogue_template = """
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
def __enter__(self):
self.configuration_file = open(self.configuration_path, "w")
self.configuration_file.write(self.header_template)
self.instance_definitions = []
self.instance_wrappers = []
self.operations = []
return self
def emit(self, operation):
emitter = self.instance_emitter[operation.rank_k_kind]()
self.operations.append(operation)
self.instance_definitions.append(emitter.emit(operation))
self.instance_wrappers.append(SubstituteTemplate(self.instance_template[operation.rank_k_kind], {
'configuration_name': self.configuration_name,
'operation_name': operation.procedural_name(),
'rank_k_kind': self.rank_k_kind_wrappers[operation.rank_k_kind],
'compile_guard_start': SubstituteTemplate(self.wmma_guard_start, {'sm_number': str(operation.arch)}) \
if operation.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp else "",
'compile_guard_end': "#endif" \
if operation.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp else ""
}))
def __exit__(self, exception_type, exception_value, traceback):
# Write instance definitions in top-level namespace
for instance_definition in self.instance_definitions:
self.configuration_file.write(instance_definition)
# Add wrapper objects within initialize() function
self.configuration_file.write(SubstituteTemplate(self.initialize_function_template, {
'configuration_name': self.configuration_name
}))
for instance_wrapper in self.instance_wrappers:
self.configuration_file.write(instance_wrapper)
self.configuration_file.write(self.epilogue_template)
self.configuration_file.close()
###################################################################################################
|
cutlass/python/cutlass_library/rank_k_operation.py/0
|
{
"file_path": "cutlass/python/cutlass_library/rank_k_operation.py",
"repo_id": "cutlass",
"token_count": 5264
}
| 51 |
function escapeRegExp(string) {
return string.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); // $& means the whole matched string
}
/**
* Removes excluded text from a Node.
*
* @param {Node} target Node to filter.
* @param {string} exclude CSS selector of nodes to exclude.
* @returns {DOMString} Text from `target` with text removed.
*/
export function filterText(target, exclude) {
const clone = target.cloneNode(true); // clone as to not modify the live DOM
if (exclude) {
// remove excluded nodes
clone.querySelectorAll(exclude).forEach(node => node.remove());
}
return clone.innerText;
}
// Callback when a copy button is clicked. Will be passed the node that was clicked
// should then grab the text and replace pieces of text that shouldn't be used in output
export function formatCopyText(textContent, copybuttonPromptText, isRegexp = false, onlyCopyPromptLines = true, removePrompts = true, copyEmptyLines = true, lineContinuationChar = "", hereDocDelim = "") {
var regexp;
var match;
// Do we check for line continuation characters and "HERE-documents"?
var useLineCont = !!lineContinuationChar
var useHereDoc = !!hereDocDelim
// create regexp to capture prompt and remaining line
if (isRegexp) {
regexp = new RegExp('^(' + copybuttonPromptText + ')(.*)')
} else {
regexp = new RegExp('^(' + escapeRegExp(copybuttonPromptText) + ')(.*)')
}
const outputLines = [];
var promptFound = false;
var gotLineCont = false;
var gotHereDoc = false;
const lineGotPrompt = [];
for (const line of textContent.split('\n')) {
match = line.match(regexp)
if (match || gotLineCont || gotHereDoc) {
promptFound = regexp.test(line)
lineGotPrompt.push(promptFound)
if (removePrompts && promptFound) {
outputLines.push(match[2])
} else {
outputLines.push(line)
}
gotLineCont = line.endsWith(lineContinuationChar) & useLineCont
if (line.includes(hereDocDelim) & useHereDoc)
gotHereDoc = !gotHereDoc
} else if (!onlyCopyPromptLines) {
outputLines.push(line)
} else if (copyEmptyLines && line.trim() === '') {
outputLines.push(line)
}
}
// If no lines with the prompt were found then just use original lines
if (lineGotPrompt.some(v => v === true)) {
textContent = outputLines.join('\n');
}
// Remove a trailing newline to avoid auto-running when pasting
if (textContent.endsWith("\n")) {
textContent = textContent.slice(0, -1)
}
return textContent
}
|
cutlass/python/docs/_static/copybutton_funcs.js/0
|
{
"file_path": "cutlass/python/docs/_static/copybutton_funcs.js",
"repo_id": "cutlass",
"token_count": 1049
}
| 52 |
{
"path": "./../../../../examples/python/02_pytorch_extension_grouped_gemm.ipynb"
}
|
cutlass/python/docs_src/source/externals/02_pytorch_extension_grouped_gemm.nblink/0
|
{
"file_path": "cutlass/python/docs_src/source/externals/02_pytorch_extension_grouped_gemm.nblink",
"repo_id": "cutlass",
"token_count": 39
}
| 53 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Utility functions for Conv2d tests.
"""
from cutlass_library import SubstituteTemplate
import torch
import cutlass
from cutlass_library import (
ConvKind,
ConvMode,
DataType,
DataTypeNames,
EpilogueScheduleSuffixes,
KernelScheduleSuffixes,
LayoutType,
OpcodeClassNames,
ShortDataTypeNames,
ShortLayoutTypeNames,
SplitKMode,
)
from cutlass.shape import Conv2DProblemSize
from cutlass.utils.datatypes import numpy_type, torch_type
from conv2d_problem_sizes import TestbedConv2dProblemSizes
def get_name_conv2d(
arch,
conv_kind,
element,
element_accumulator,
element_output,
opclass,
threadblock_shape,
warp_count,
instruction_shape,
stages,
iterator_algorithm,
swizzle,
split_k_mode,
split_k_slices,
activation
):
"""
Generates a procedural name for a test case for conv2d
:param arch: compute capability of kernel being generated
:type arch: int
:param conv_kind: the convolution type (i.e. fprop, dgrad, wgrad)
:type conv_kind: str
:param iterator_algorithm: the iterator algorithm applied
:type iterator_algorithm: cutlass_library.library.IteratorAlgorithm
:param element_a: data type of operand A
:param element_b: data type of operand B
:param element_c: data type of operand C
:param element_accumulator: data type used in accumulation
:param opclass: class of operation being performed (e.g., SIMT, Tensor Core)
:type opclass: cutlass.OpcodeClass
:param threadblock_shape: indexable container of dimensions of threadblock tiles
:param stages: number of pipeline stages to use in the kernel
:type stages: int
:param stride_support: stride support of dgrad
:param alignment: int
:type alignment: int
:return: str
"""
if iterator_algorithm is None:
iterator_algorithm = "AUTO"
if swizzle is None:
swizzle = 1
name_format = "test_SM${arch}_Device_Conv2d_${conv_kind}_${iter_alg}_ImplicitGemm_${eA}nhwc_${eB}nhwc_${eC}nhwc_${opclass}_${acc}_${tbM}x${tbN}x${tbK}_${wM}x${wN}x${wK}_${IM}${IN}${IK}_stage${stages}_swizzle${swizzle}_${split_k_mode}${split_k_slices}_${activation}"
return SubstituteTemplate(
name_format,
{
"arch": str(arch),
"conv_kind": conv_kind,
"iter_alg": iterator_algorithm,
"eA": DataTypeNames[element],
"eB": DataTypeNames[element],
"eC": DataTypeNames[element_output],
"opclass": opclass,
"acc": DataTypeNames[element_accumulator],
"tbM": str(threadblock_shape[0]),
"tbN": str(threadblock_shape[1]),
"tbK": str(threadblock_shape[2]),
"wM": str(threadblock_shape[0] // warp_count[0]),
"wN": str(threadblock_shape[1] // warp_count[1]),
"wK": str(threadblock_shape[2] // warp_count[2]),
"IM": str(instruction_shape[0]),
"IN": str(instruction_shape[1]),
"IK": str(instruction_shape[2]),
"stages": str(stages),
"swizzle": str(swizzle),
"split_k_mode": split_k_mode,
"split_k_slices": str(split_k_slices),
"activation": activation
}
)
def conv2d_few_channel_problemsizes(channels):
problem_sizes = [
Conv2DProblemSize(
1, 8, 8, channels,
16, 3, 3, channels,
1, 1,
2, 2,
1, 1,
ConvMode.CrossCorrelation,
1, 1
),
Conv2DProblemSize(
1, 16, 16, channels,
16, 3, 3, channels,
1, 1,
2, 2,
1, 1,
ConvMode.CrossCorrelation,
1, 1
),
Conv2DProblemSize(
1, 16, 16, channels,
16, 7, 7, channels,
1, 1,
1, 1,
1, 1,
ConvMode.CrossCorrelation,
1, 1
),
Conv2DProblemSize(
1, 224, 224, channels,
32, 7, 7, channels,
1, 1,
1, 1,
1, 1,
ConvMode.CrossCorrelation,
1, 1
),
Conv2DProblemSize(
1, 224, 224, channels,
64, 7, 7, channels,
1, 1,
2, 2,
1, 1,
ConvMode.CrossCorrelation,
1, 1
),
Conv2DProblemSize(
1, 224, 224, channels,
64, 5, 5, channels,
1, 1,
1, 1,
1, 1,
ConvMode.CrossCorrelation,
1, 1
),
Conv2DProblemSize(
1, 224, 224, channels,
64, 5, 5, channels,
1, 1,
2, 2,
1, 1,
ConvMode.CrossCorrelation,
1, 1
),
]
return problem_sizes
def validate_problem_size(ps, conv_kind, split_k_slices):
P = (ps.H + 2 * ps.pad_h - ps.dilation_h * (ps.R - 1) - 1) // ps.stride_h + 1
Q = (ps.W + 2 * ps.pad_w - ps.dilation_w * (ps.S - 1) - 1) // ps.stride_w + 1
if P != ps.P or Q != ps.Q:
return False
# Split-K (serial or parallel) is not supported for strided dgrad
if conv_kind == "dgrad" and split_k_slices > 1 and (ps.stride_h > 1 or ps.stride_w > 1):
return False
return True
class Conv2dLauncherFrontend:
def __init__(self, plan: cutlass.Conv2d, seed: int = 80, backend="numpy"):
self.operation = plan
self.conv_kind = plan.conv_kind
self.seed = seed
self.backend = backend
self.dtype_A = plan._element_a
self.dtype_B = plan._element_b
self.dtype_C = plan._element_c
self.dtype_acc = plan._element_accumulator
self.layout_A = LayoutType.TensorNHWC
self.layout_B = LayoutType.TensorNHWC
self.layout_C = LayoutType.TensorNHWC
self.layout_D = LayoutType.TensorNHWC
self.element_compute = DataType.f32
if self.dtype_A in [cutlass.DataType.f16, cutlass.DataType.bf16]:
self.rand_max = 1
else:
self.rand_max = 4
self.activation = plan.activation
def uniform_init(self, size, dtype):
tensor = torch.ceil(
torch.empty(size=size, dtype=torch_type(dtype), device="cuda").uniform_(-self.rand_max - 0.5, self.rand_max - 0.5)
).to(memory_format=torch.channels_last)
return tensor
def reference(self, ps, A, B, C, alpha, beta, activation):
if self.conv_kind == ConvKind.Fprop:
torch_result = alpha * torch.ops.aten.conv2d(
A,
B,
stride=(ps.stride_h, ps.stride_w),
padding=(ps.pad_h, ps.pad_w),
dilation=(ps.dilation_h, ps.dilation_w)
) + beta * C
elif self.conv_kind == ConvKind.Dgrad:
torch_result = alpha * torch.nn.grad.conv2d_input(
(ps.N, ps.C, ps.H, ps.W),
B,
A,
padding=(ps.pad_h, ps.pad_w),
stride=(ps.stride_h, ps.stride_w)
) + beta * C
elif self.conv_kind == ConvKind.Wgrad:
torch_result = alpha * torch.nn.grad.conv2d_weight(
B,
(ps.K, ps.C, ps.R, ps.S),
A,
padding=(ps.pad_h, ps.pad_w),
stride=(ps.stride_h, ps.stride_w)
) + beta * C
else:
raise Exception(f"Conv kind {self.conv_kind} is currently unsupported.")
if activation == cutlass.backend.epilogue.relu:
torch_result = torch.nn.functional.relu(torch_result)
elif activation == cutlass.backend.epilogue.leaky_relu:
torch_result = torch.nn.functional.leaky_relu(torch_result, 0.5)
return torch_result
def run(self, ps, split_k_mode=SplitKMode.Serial, split_k_slices=1, alpha=1.0, beta=0.0):
if self.conv_kind == ConvKind.Fprop:
tensor_A_size = (ps.N, ps.C, ps.H, ps.W)
tensor_B_size = (ps.K, ps.C, ps.R, ps.S)
tensor_C_size = (ps.N, ps.K, ps.P, ps.Q)
elif self.conv_kind == ConvKind.Dgrad:
tensor_A_size = (ps.N, ps.K, ps.P, ps.Q)
tensor_B_size = (ps.K, ps.C, ps.R, ps.S)
tensor_C_size = (ps.N, ps.C, ps.H, ps.W)
elif self.conv_kind == ConvKind.Wgrad:
tensor_A_size = (ps.N, ps.K, ps.P, ps.Q)
tensor_B_size = (ps.N, ps.C, ps.H, ps.W)
tensor_C_size = (ps.K, ps.C, ps.R, ps.S)
else:
raise Exception(f"Conv kind {self.conv_kind} is not supported")
torch.manual_seed(self.seed)
tensor_A = self.uniform_init(size=tensor_A_size, dtype=self.dtype_A)
tensor_B = self.uniform_init(size=tensor_B_size, dtype=self.dtype_B)
tensor_C = self.uniform_init(size=tensor_C_size, dtype=self.dtype_C)
tensor_D = torch.zeros_like(tensor_C).to(memory_format=torch.channels_last)
args = self.operation.run(tensor_A, tensor_B, tensor_C, tensor_D,
stride=(ps.stride_h, ps.stride_w),
padding=(ps.pad_h, ps.pad_w),
dilation=(ps.dilation_h, ps.dilation_w),
alpha=alpha, beta=beta,
split_k=(split_k_mode, split_k_slices))
args.sync()
tensor_D_ref = self.reference(ps, tensor_A, tensor_B, tensor_C, alpha, beta, self.activation)
torch.cuda.synchronize()
passed = torch.allclose(tensor_D, tensor_D_ref, atol=2e-06)
return passed
def add_test(
cls,
cc,
conv_kind,
problem_sizes,
element,
element_accumulator,
element_output,
opclass,
threadblock_shape,
warp_count,
instruction_shape,
stages,
iterator_algorithm=None,
swizzle=None,
split_k_mode="serial",
split_k_slices=1,
activation = "identity"
):
"""Create a test-running function with the given specification"""
test_name = get_name_conv2d(
cc, conv_kind, element, element_accumulator,
element_output, opclass, threadblock_shape, warp_count, instruction_shape, stages,
iterator_algorithm, swizzle, split_k_mode, split_k_slices, activation)
def run(self):
# Create the plan
plan = cutlass.Conv2d(
kind=conv_kind,
element=element,
element_accumulator=element_accumulator,
element_C=element_output,
element_D=element_output
)
# Set the opclass
plan.opclass = opclass
# Set the tile description
td = {
"threadblock_shape": threadblock_shape,
"warp_count": warp_count,
"stages": stages,
"instruction_shape": instruction_shape,
}
plan.tile_description = td
# Set iterator algorithm
if iterator_algorithm is not None:
plan.iterator_algorithm = iterator_algorithm
# Set swizzling functor
if swizzle is not None:
plan.swizzling_stride = swizzle
if activation != "identity":
if activation == "leaky_relu":
plan.activation = (cutlass.epilogue.leaky_relu, 0.5)
else:
plan.activation = getattr(cutlass.epilogue, activation)
conv2d_launcher = Conv2dLauncherFrontend(plan, 80, backend="torch")
for ps in problem_sizes:
if not validate_problem_size(ps, conv_kind, split_k_slices):
continue
self.assertTrue(conv2d_launcher.run(ps, split_k_mode, split_k_slices, 1.0, 2.0))
setattr(cls, test_name, run)
return run
def get_conv_problems():
# 64: minimum channel size
conv_problems = TestbedConv2dProblemSizes(64).all
# Insert alignment 4 & 2 tests
conv_problems += [
Conv2DProblemSize(
1, 4, 4, 12,
8, 3, 3, 12,
0, 0,
3, 3,
1, 1,
ConvMode.CrossCorrelation,
1, 1
),
Conv2DProblemSize(
1, 4, 4, 14,
8, 3, 3, 14,
0, 0,
3, 3,
1, 1,
ConvMode.CrossCorrelation,
1, 1
),
Conv2DProblemSize(
1, 23, 56, 98,
128, 3, 3, 98,
4, 5,
3, 3,
1, 1,
ConvMode.CrossCorrelation,
1, 1
),
]
return conv_problems
|
cutlass/test/python/cutlass/conv2d/conv2d_test_utils.py/0
|
{
"file_path": "cutlass/test/python/cutlass/conv2d/conv2d_test_utils.py",
"repo_id": "cutlass",
"token_count": 6885
}
| 54 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.