Unverified Commit e17c0d80 authored by Qianfeng's avatar Qianfeng Committed by GitHub
Browse files

Reduction in Composable Kernel (#82)



* Initial adding of generic reduction

* Initial adding of generic reduction ...

* Updates to make compiling done

* clang-format all files

* clang-format some files again

* Renaming in profiler/include/profile_reduce.hpp

* Updates and make BlockWise cases passed

* Updates and make ThreadWise and MultiBlockTwoCall cases passed

* Remove the support for MUL and NORM1 reduceOp from the profiler and the device instances

* Change to replace the dim0_max_vector_size/dim1_max_vector_size template argument in the device reduce classes

* format

* adding pooling

* added max and average pooling

* comment out cout and kernel timing

* Tiny simplification in profiler/reduce_profiler.cpp

* Add example for reduce_blockwise

* Tiny updates

* Change to pass the ElementWiseOp from device layer to kernel

* Fix the vectorDim and vectorSize in Device layer

* Enable vector load on both dim0 and dim1 for Threadwise method

* Tiny updates

* Change to let the user to pass the preUnaryOp and posUnaryOp

* Make pooling example work

* split device_reduce_instance into two libraries

* Tiny update

* Replace nanPropaOpt enum by boolean propagate_nan

* Simplification in DeviceReduce layer codes

* update build

* Change to clarify the difference between ck::half_t and half_float::half

* Renaming in all the reduction codes

* Add VectorSize as template parameter for device layer

* Add BetaIsZero as kernel template and as AccDataType for alpha

* print

* Small updates for pooling

* Updates for host_generic_reduction for reference

* Update to make AVG pooling pass

* Update to make MAX pooling with indices output pass

* fix

* add OutDst vector store to threadwise reduction and pooling

* tweak

* turn off check_indices that caused build issue

* refactor pooling

* clean up

* turn off check_indices for building issue for php-compiler

* add more tile size for odd C

* tweak conv for odd C

* update script

* clean up elementwise op

* add hack in reduction_operator.hpp to avoid compile error. To fix it, need to use element_wise_op in reduction op

* Add OutVectorSize as device and kernel tunable, also update to Elementwise Operations

* Move reduce operator mapping to host layer file reduction_operator_mapping.hpp from reduction_operator.hpp

* Change to the unary operators

* Move the definitions of unary operations to element_wise_operation.hpp

* re-org files

* Refine in device interfaces and multiblock kernels

* Split the reduction configurations into instances for specific methods

* Update in getTypeString() of device pool2d

* Renaming in host and kernel

* Tiny update in profiler/src/profiler.cpp

* Uncomment in device_operation/CMakeLists.txt to enable the building of all operations

* Make check_indices a templated function to remove some linking issue

* Renaming in the profiler reduce module

* Add support for double Reduction (but disable MultiblockAtomicAdd for double)

* Tiny correction of literal string

* Rename DevicePoolFwd to DevicePool2dFwd

* Split device_reduce_instance_xxx.cpp files according to the data types to speed up compiling

* Add comments for lists of configurations, lists of instances and references of add_reduce_instances_xxx

* Remove un-used header file gridwise_generic_reduction_wrapper_common.hpp

* Renaming and refining in the Reduction codes

* Tiny change in the unary operators

* Renaming symbols and files

* Renaming symbols in the kernels

* Move kernel kernel_set_buffer_value to separate file

* Add IndexDataType template parameter for kernels and use int32_t as index data type in device layer

* Tiny update in the kernels

* Remove definition of sqrtf()/isnan()/abs() for half_t due to some ADL issue

* Simplify a helper function in device layer

* Tiny adjustment in testing data initialization

* Renaming in kernel/device/host

* Add two testing scripts for reduction

* Refine the Unary operators in element_wise_operation.hpp

* Update in the reduce profiler module

* Update to the reduction testing scripts

* reduce compile parallelism

* change CI docker to rocm5.0

* remove unused variables

* fix build
Co-authored-by: default avatarChao Liu <chao.liu2@amd.com>
parent 12dfba3d
FROM ubuntu:18.04
ARG ROCMVERSION=4.3.1
ARG ROCMVERSION=5.0
ARG OSDB_BKC_VERSION
RUN set -xe
......
......@@ -175,6 +175,161 @@ struct RequantReluRequant
float scaleRelu_;
};
// Unary operators are usually called element-wisely before/after the reduction is executed on the
// elements. They are needed for easy implementation of reduction types of AVG, NRM1, NRM2
template <typename Y, typename X, bool HasDividing = false>
struct UnaryIdentic;
template <>
struct UnaryIdentic<float, float, false>
{
__host__ __device__ UnaryIdentic(const int32_t divider = 1) { (void)divider; };
__host__ __device__ void operator()(float& y, const float& x) const { y = x; };
};
template <>
struct UnaryIdentic<float, float, true>
{
__host__ __device__ UnaryIdentic(const int32_t divider = 1) { divider_ = divider; };
__host__ __device__ void operator()(float& y, const float& x) const
{
y = x / type_convert<float>(divider_);
};
int32_t divider_ = 1;
};
template <>
struct UnaryIdentic<half_t, half_t, false>
{
__host__ __device__ UnaryIdentic(const int32_t divider = 1) { (void)divider; };
__host__ __device__ void operator()(half_t& y, const half_t& x) const { y = x; };
};
template <>
struct UnaryIdentic<double, double, false>
{
__host__ __device__ UnaryIdentic(const int32_t divider = 1) { (void)divider; };
__host__ __device__ void operator()(double& y, const double& x) const { y = x; };
};
template <>
struct UnaryIdentic<double, double, true>
{
__host__ __device__ UnaryIdentic(const int32_t divider = 1) { divider_ = divider; };
__host__ __device__ void operator()(double& y, const double& x) const
{
y = x / type_convert<double>(divider_);
};
int32_t divider_ = 1;
};
template <>
struct UnaryIdentic<int32_t, int32_t, false>
{
__host__ __device__ UnaryIdentic(const int32_t divider = 1) { (void)divider; };
__host__ __device__ void operator()(int32_t& y, const int32_t& x) const { y = x; };
};
template <typename Y, typename X, bool HasDividing = false>
struct UnarySquare;
template <>
struct UnarySquare<float, float, false>
{
__host__ __device__ UnarySquare(const int32_t divider = 1) { (void)divider; };
__host__ __device__ void operator()(float& y, const float& x) const { y = x * x; };
};
template <>
struct UnarySquare<float, float, true>
{
__host__ __device__ UnarySquare(const int32_t divider = 1) { divider_ = divider; };
__host__ __device__ void operator()(float& y, const float& x) const
{
y = x * x / type_convert<float>(divider_);
};
int32_t divider_ = 1;
};
template <>
struct UnarySquare<double, double, false>
{
__host__ __device__ UnarySquare(const int32_t divider = 1) { (void)divider; };
__host__ __device__ void operator()(double& y, const double& x) const { y = x * x; };
};
template <>
struct UnarySquare<double, double, true>
{
__host__ __device__ UnarySquare(const int32_t divider = 1) { divider_ = divider; };
__host__ __device__ void operator()(double& y, const double& x) const
{
y = x * x / type_convert<double>(divider_);
};
int32_t divider_ = 1;
};
template <typename Y, typename X>
struct UnaryAbs;
template <>
struct UnaryAbs<float, float>
{
__host__ __device__ UnaryAbs(const int32_t divider = 1) { (void)divider; };
__host__ __device__ void operator()(float& y, const float& x) const { y = abs(x); };
};
template <>
struct UnaryAbs<half_t, half_t>
{
__host__ __device__ UnaryAbs(const int32_t divider = 1) { (void)divider; };
__host__ __device__ void operator()(half_t& y, const half_t& x) const { y = __habs(x); };
};
template <>
struct UnaryAbs<double, double>
{
__host__ __device__ UnaryAbs(const int32_t divider = 1) { (void)divider; };
__host__ __device__ void operator()(double& y, const double& x) const { y = abs(x); };
};
template <typename Y, typename X>
struct UnarySqrt;
template <>
struct UnarySqrt<float, float>
{
__host__ __device__ UnarySqrt(const int32_t divider = 1) { (void)divider; };
__host__ __device__ void operator()(float& y, const float& x) const { y = sqrtf(x); };
};
template <>
struct UnarySqrt<double, double>
{
__host__ __device__ UnarySqrt(const int32_t divider = 1) { (void)divider; };
__host__ __device__ void operator()(double& y, const double& x) const { y = sqrt(x); };
};
} // namespace element_wise
} // namespace tensor_operation
} // namespace ck
......
/*******************************************************************************
*
* MIT License
*
* Copyright (c) 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#ifndef CK_GRIDWISE_2D_REDUCTION_BLOCKWISE_HPP
#define CK_GRIDWISE_2D_REDUCTION_BLOCKWISE_HPP
#include "data_type.hpp"
#include "reduction_common.hpp"
#include "reduction_operator.hpp"
#include "reduction_functions_accumulate.hpp"
#include "reduction_functions_blockwise.hpp"
#include "threadwise_tensor_slice_transfer.hpp"
namespace ck {
template <typename GridwiseReduction,
bool NeedIndices,
typename InDataType,
typename OutDataType,
typename AccDataType,
typename IndexDataType,
typename InGridDesc_M_K,
typename OutGridDesc_M,
typename InElementwiseOperation,
typename OutElementwiseOperation>
__global__ void kernel_reduce_blockwise(const InGridDesc_M_K in_grid_desc_m_k,
const OutGridDesc_M out_grid_desc_m,
const InElementwiseOperation in_elementwise_op,
const OutElementwiseOperation acc_elementwise_op,
AccDataType alpha,
const InDataType* const __restrict__ p_in_global,
OutDataType beta,
OutDataType* const __restrict__ p_out_global,
const IndexDataType* const __restrict__ p_ws_indices_global,
IndexDataType* const __restrict__ p_indices_global)
{
if constexpr(!NeedIndices)
{
GridwiseReduction::Run(in_grid_desc_m_k,
out_grid_desc_m,
in_elementwise_op,
acc_elementwise_op,
alpha,
p_in_global,
beta,
p_out_global,
p_ws_indices_global,
p_indices_global);
}
else
{
GridwiseReduction::RunWithIndex(in_grid_desc_m_k,
out_grid_desc_m,
in_elementwise_op,
acc_elementwise_op,
alpha,
p_in_global,
beta,
p_out_global,
p_ws_indices_global,
p_indices_global);
};
};
template <typename GridwiseReduction,
bool NeedIndices,
typename InDataType,
typename OutDataType,
typename AccDataType,
typename IndexDataType,
typename InGridDesc_M_K,
typename OutGridDesc_M,
typename InElementwiseOperation,
typename OutElementwiseOperation>
__global__ void
kernel_reduce_blockwise_second_call(const InGridDesc_M_K in_grid_desc_m_k,
const OutGridDesc_M out_grid_desc_m,
const InElementwiseOperation in_elementwise_op,
const OutElementwiseOperation acc_elementwise_op,
AccDataType alpha,
const InDataType* const __restrict__ p_in_global,
OutDataType beta,
OutDataType* const __restrict__ p_out_global,
const IndexDataType* const __restrict__ p_ws_indices_global,
IndexDataType* const __restrict__ p_indices_global)
{
if constexpr(!NeedIndices)
{
GridwiseReduction::Run(in_grid_desc_m_k,
out_grid_desc_m,
in_elementwise_op,
acc_elementwise_op,
alpha,
p_in_global,
beta,
p_out_global,
p_ws_indices_global,
p_indices_global);
}
else
{
GridwiseReduction::RunSecondCallWithIndex(in_grid_desc_m_k,
out_grid_desc_m,
in_elementwise_op,
acc_elementwise_op,
alpha,
p_in_global,
beta,
p_out_global,
p_ws_indices_global,
p_indices_global);
};
};
template <typename InDataType,
typename OutDataType,
typename AccDataType,
typename IndexDataType,
typename InGridDesc_M_K,
typename OutGridDesc_M,
typename ReduceOperation,
typename InElementwiseOperation,
typename OutElementwiseOperation,
bool PropagateNan,
bool BetaIsZero,
index_t BlockSize,
index_t MThreadClusterSize,
index_t KThreadClusterSize,
index_t MThreadSliceSize,
index_t KThreadSliceSize,
index_t InSrcVectorDim,
index_t InSrcVectorSize,
index_t OutDstVectorSize>
struct GridwiseReduction_mk_to_m_blockwise
{
static constexpr bool reorder_thread_cluster = (InSrcVectorDim == 0);
static constexpr auto buffer_1d_desc =
make_naive_tensor_descriptor_packed(make_tuple(Number<BlockSize>{}));
template <typename T>
using PassThroughOp = tensor_operation::element_wise::UnaryIdentic<T, T>;
static constexpr auto I0 = Number<0>{};
static constexpr index_t M_BlockTileSize = MThreadClusterSize * MThreadSliceSize;
static constexpr index_t K_BlockTileSize = KThreadClusterSize * KThreadSliceSize;
__device__ static void Run(const InGridDesc_M_K& in_grid_desc_m_k,
const OutGridDesc_M& out_grid_desc_m,
const InElementwiseOperation& in_elementwise_op,
const OutElementwiseOperation& acc_elementwise_op,
AccDataType alpha,
const InDataType* const __restrict__ p_in_global,
OutDataType beta,
OutDataType* const __restrict__ p_out_global,
const IndexDataType* const __restrict__ p_ws_indices_global,
IndexDataType* const __restrict__ p_indices_global)
{
using BlockwiseReduce = PartitionedBlockwiseReductionOn1dBuffer<decltype(buffer_1d_desc),
AccDataType,
BlockSize,
MThreadClusterSize,
KThreadClusterSize,
reorder_thread_cluster,
ReduceOperation,
PropagateNan>;
using Accumulation =
detail::AccumulateWithNanCheck<PropagateNan, ReduceOperation, AccDataType>;
(void)p_ws_indices_global;
(void)p_indices_global;
// LDS
__shared__ AccDataType p_block_reduce_buffer[BlockSize];
const auto zeroVal = ReduceOperation::GetReductionZeroVal();
const auto in_global_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
p_in_global, in_grid_desc_m_k.GetElementSpaceSize(), type_convert<InDataType>(zeroVal));
auto out_global_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
p_out_global, out_grid_desc_m.GetElementSpaceSize());
auto block_reduce_buf =
make_dynamic_buffer<AddressSpaceEnum_t::Lds>(p_block_reduce_buffer, BlockSize);
StaticBuffer<AddressSpaceEnum_t::Vgpr,
AccDataType,
MThreadSliceSize * KThreadSliceSize,
true>
in_thread_buf;
StaticBuffer<AddressSpaceEnum_t::Vgpr, AccDataType, MThreadSliceSize, true> accu_value_buf;
static_for<0, MThreadSliceSize, 1>{}([&](auto I) { accu_value_buf(I) = zeroVal; });
const auto toReduceLength = in_grid_desc_m_k.GetLength(Number<1>{});
const index_t thread_local_id = get_thread_local_1d_id();
const index_t block_global_1d_id = get_block_1d_id();
const index_t thread_m_cluster_id =
reorder_thread_cluster ? thread_local_id % MThreadClusterSize
: ((thread_local_id / KThreadClusterSize) % MThreadClusterSize);
const index_t thread_k_cluster_id =
reorder_thread_cluster ? ((thread_local_id / MThreadClusterSize) % KThreadClusterSize)
: thread_local_id % KThreadClusterSize;
using ThreadBufferLengths = Sequence<MThreadSliceSize, KThreadSliceSize>;
constexpr auto thread_buffer_desc = make_naive_tensor_descriptor_packed(
make_tuple(Number<MThreadSliceSize>{}, Number<KThreadSliceSize>{}));
auto threadwise_src_load = ThreadwiseTensorSliceTransfer_v2<
InDataType,
AccDataType,
InGridDesc_M_K,
decltype(thread_buffer_desc),
ThreadBufferLengths,
typename conditional<InSrcVectorDim == 0, Sequence<1, 0>, Sequence<0, 1>>::type,
InSrcVectorDim,
InSrcVectorSize,
1,
false>(in_grid_desc_m_k,
make_multi_index(block_global_1d_id * M_BlockTileSize +
thread_m_cluster_id * MThreadSliceSize,
thread_k_cluster_id * KThreadSliceSize));
constexpr auto in_thread_copy_step = make_multi_index(0, K_BlockTileSize);
const index_t toReduceTiles = (toReduceLength + K_BlockTileSize - 1) / K_BlockTileSize;
index_t reducedTiles = 0;
do
{
threadwise_src_load.Run(in_grid_desc_m_k,
in_global_buf,
thread_buffer_desc,
make_tuple(I0, I0),
in_thread_buf);
static_for<0, MThreadSliceSize, 1>{}([&](auto I) {
// do element-wise pre-reduction operation
static_for<0, KThreadSliceSize, 1>{}([&](auto J) {
constexpr auto offset = I * Number<KThreadSliceSize>{} + J;
in_elementwise_op(in_thread_buf(offset), in_thread_buf(offset));
});
// reduce on each thread-local slice
static_for<0, KThreadSliceSize, 1>{}([&](auto J) {
constexpr auto offset = I * Number<KThreadSliceSize>{} + J;
Accumulation::Calculate(accu_value_buf(I), in_thread_buf[offset]);
});
});
threadwise_src_load.MoveSrcSliceWindow(in_grid_desc_m_k, in_thread_copy_step);
reducedTiles++;
} while(reducedTiles < toReduceTiles);
constexpr auto reduced_data_desc =
make_naive_tensor_descriptor_packed(make_tuple(Number<MThreadSliceSize>{}));
static_for<0, MThreadSliceSize, 1>{}([&](auto I) {
if constexpr(reorder_thread_cluster)
{
block_reduce_buf(thread_k_cluster_id * MThreadClusterSize + thread_m_cluster_id) =
accu_value_buf[I];
}
else
block_reduce_buf(thread_m_cluster_id * KThreadClusterSize + thread_k_cluster_id) =
accu_value_buf[I];
accu_value_buf(I) = zeroVal;
__syncthreads();
BlockwiseReduce::Reduce(
block_reduce_buf, accu_value_buf(I), thread_m_cluster_id, thread_k_cluster_id);
});
static_for<0, MThreadSliceSize, 1>{}([&](auto I) {
if(thread_k_cluster_id == 0)
{
acc_elementwise_op(accu_value_buf(I), accu_value_buf(I));
accu_value_buf(I) *= alpha;
}
});
if(thread_k_cluster_id == 0)
{
if constexpr(!BetaIsZero)
{
if(!float_equal_zero{}(beta))
{
StaticBuffer<AddressSpaceEnum_t::Vgpr, OutDataType, MThreadSliceSize, true>
priorDstValueBuf;
auto threadwise_dst_load =
ThreadwiseTensorSliceTransfer_v2<OutDataType,
OutDataType,
OutGridDesc_M,
decltype(reduced_data_desc),
Sequence<MThreadSliceSize>,
Sequence<0>,
0,
OutDstVectorSize,
1,
false>(
out_grid_desc_m,
make_multi_index(block_global_1d_id * M_BlockTileSize +
thread_m_cluster_id * MThreadSliceSize));
threadwise_dst_load.Run(out_grid_desc_m,
out_global_buf,
reduced_data_desc,
make_tuple(I0),
priorDstValueBuf);
static_for<0, MThreadSliceSize, 1>{}([&](auto I) {
accu_value_buf(I) += type_convert<AccDataType>(priorDstValueBuf[I] * beta);
});
};
};
auto threadwise_dst_store =
ThreadwiseTensorSliceTransfer_v1r3<AccDataType,
OutDataType,
decltype(reduced_data_desc),
OutGridDesc_M,
PassThroughOp<AccDataType>,
Sequence<MThreadSliceSize>,
Sequence<0>,
0,
OutDstVectorSize,
InMemoryDataOperationEnum_t::Set,
1,
true>(
out_grid_desc_m,
make_multi_index(block_global_1d_id * M_BlockTileSize +
thread_m_cluster_id * MThreadSliceSize),
PassThroughOp<AccDataType>{});
threadwise_dst_store.Run(
reduced_data_desc, make_tuple(I0), accu_value_buf, out_grid_desc_m, out_global_buf);
}
};
__device__ static void RunWithIndex(const InGridDesc_M_K& in_grid_desc_m_k,
const OutGridDesc_M& out_grid_desc_m,
const InElementwiseOperation& in_elementwise_op,
const OutElementwiseOperation& acc_elementwise_op,
AccDataType alpha,
const InDataType* const __restrict__ p_in_global,
OutDataType beta,
OutDataType* const __restrict__ p_out_global,
const IndexDataType* const __restrict__ p_ws_indices_global,
IndexDataType* const __restrict__ p_indices_global)
{
using BlockwiseReduceWithIndex =
PartitionedBlockwiseReductionWithIndexOn1dBuffer<decltype(buffer_1d_desc),
AccDataType,
IndexDataType,
BlockSize,
MThreadClusterSize,
KThreadClusterSize,
reorder_thread_cluster,
ReduceOperation,
PropagateNan>;
using AccumulationWithIndex = detail::AccumulateWithIndexAndNanCheck<PropagateNan,
ReduceOperation,
AccDataType,
IndexDataType>;
(void)p_ws_indices_global;
// LDS
__shared__ AccDataType p_block_reduce_val_buffer[BlockSize];
__shared__ IndexDataType p_block_reduce_idx_buffer[BlockSize];
const auto zeroVal = ReduceOperation::GetReductionZeroVal();
const auto in_global_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
p_in_global, in_grid_desc_m_k.GetElementSpaceSize(), type_convert<InDataType>(zeroVal));
auto out_global_val_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
p_out_global, out_grid_desc_m.GetElementSpaceSize());
auto out_global_idx_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
p_indices_global, out_grid_desc_m.GetElementSpaceSize());
auto block_reduce_val_buf =
make_dynamic_buffer<AddressSpaceEnum_t::Lds>(p_block_reduce_val_buffer, BlockSize);
auto block_reduce_idx_buf =
make_dynamic_buffer<AddressSpaceEnum_t::Lds>(p_block_reduce_idx_buffer, BlockSize);
StaticBuffer<AddressSpaceEnum_t::Vgpr,
AccDataType,
MThreadSliceSize * KThreadSliceSize,
true>
in_thread_val_buf;
StaticBuffer<AddressSpaceEnum_t::Vgpr, index_t, MThreadSliceSize * KThreadSliceSize, true>
in_thread_idx_buf;
StaticBuffer<AddressSpaceEnum_t::Vgpr, AccDataType, MThreadSliceSize, true> accu_value_buf;
StaticBuffer<AddressSpaceEnum_t::Vgpr, IndexDataType, MThreadSliceSize, true>
accu_index_buf;
const auto toReduceLength = in_grid_desc_m_k.GetLength(Number<1>{});
const index_t thread_local_id = get_thread_local_1d_id();
const index_t block_global_1d_id = get_block_1d_id();
const index_t thread_m_cluster_id =
reorder_thread_cluster ? thread_local_id % MThreadClusterSize
: ((thread_local_id / KThreadClusterSize) % MThreadClusterSize);
const index_t thread_k_cluster_id =
reorder_thread_cluster ? ((thread_local_id / MThreadClusterSize) % KThreadClusterSize)
: thread_local_id % KThreadClusterSize;
using ThreadBufferLengths = Sequence<MThreadSliceSize, KThreadSliceSize>;
constexpr auto thread_buffer_desc = make_naive_tensor_descriptor_packed(
make_tuple(Number<MThreadSliceSize>{}, Number<KThreadSliceSize>{}));
auto threadwise_src_load = ThreadwiseTensorSliceTransfer_v2<
InDataType,
AccDataType,
InGridDesc_M_K,
decltype(thread_buffer_desc),
ThreadBufferLengths,
typename conditional<InSrcVectorDim == 0, Sequence<1, 0>, Sequence<0, 1>>::type,
InSrcVectorDim,
InSrcVectorSize,
1,
false>(in_grid_desc_m_k,
make_multi_index(block_global_1d_id * M_BlockTileSize +
thread_m_cluster_id * MThreadSliceSize,
thread_k_cluster_id * KThreadSliceSize));
index_t indexOffset = 0;
static_for<0, MThreadSliceSize, 1>{}([&](auto I) {
accu_value_buf(I) = zeroVal;
accu_index_buf(I) = 0;
});
constexpr auto in_thread_copy_step = make_multi_index(0, K_BlockTileSize);
const index_t toReduceTiles = (toReduceLength + K_BlockTileSize - 1) / K_BlockTileSize;
index_t reducedTiles = 0;
do
{
// load the thread slice
threadwise_src_load.Run(in_grid_desc_m_k,
in_global_buf,
thread_buffer_desc,
make_tuple(I0, I0),
in_thread_val_buf);
static_for<0, MThreadSliceSize, 1>{}([&](auto I) {
static_for<0, KThreadSliceSize, 1>{}([&](auto J) {
constexpr auto offset = I * Number<KThreadSliceSize>{} + J;
// initialize the indices for the per-thread to-reduce values
in_thread_idx_buf(offset) =
indexOffset + thread_k_cluster_id * KThreadSliceSize + J();
// do element-wise pre-reduction operation
in_elementwise_op(in_thread_val_buf(offset), in_thread_val_buf(offset));
});
AccDataType tmpValue = zeroVal;
IndexDataType tmpIndex = 0;
static_for<0, KThreadSliceSize, 1>{}([&](auto J) {
constexpr auto offset = I * Number<KThreadSliceSize>{} + J;
// reduce on the dim1 thread slice
AccumulationWithIndex::Calculate(
tmpValue, in_thread_val_buf[offset], tmpIndex, in_thread_idx_buf[offset]);
});
// store thread local value to LDS for parallel reduction
if constexpr(reorder_thread_cluster)
{
block_reduce_val_buf(thread_k_cluster_id * MThreadClusterSize +
thread_m_cluster_id) = tmpValue;
block_reduce_idx_buf(thread_k_cluster_id * MThreadClusterSize +
thread_m_cluster_id) = tmpIndex;
}
else
{
block_reduce_val_buf(thread_m_cluster_id * KThreadClusterSize +
thread_k_cluster_id) = tmpValue;
block_reduce_idx_buf(thread_m_cluster_id * KThreadClusterSize +
thread_k_cluster_id) = tmpIndex;
}
__syncthreads();
BlockwiseReduceWithIndex::Reduce(block_reduce_val_buf,
block_reduce_idx_buf,
tmpValue,
tmpIndex,
thread_m_cluster_id,
thread_k_cluster_id);
AccumulationWithIndex::Calculate(
accu_value_buf(I), tmpValue, accu_index_buf(I), tmpIndex);
});
threadwise_src_load.MoveSrcSliceWindow(in_grid_desc_m_k, in_thread_copy_step);
indexOffset += K_BlockTileSize;
reducedTiles++;
} while(reducedTiles < toReduceTiles);
constexpr auto reduced_data_desc =
make_naive_tensor_descriptor_packed(make_tuple(Number<MThreadSliceSize>{}));
static_for<0, MThreadSliceSize, 1>{}([&](auto I) {
if(thread_k_cluster_id == 0)
{
// for indiced operation, acc_elementwise_op shoud do nothing
acc_elementwise_op(accu_value_buf(I), accu_value_buf(I));
accu_value_buf(I) *= alpha;
}
});
if(thread_k_cluster_id == 0)
{
if constexpr(!BetaIsZero)
{
if(!float_equal_zero{}(beta))
{
StaticBuffer<AddressSpaceEnum_t::Vgpr, OutDataType, MThreadSliceSize, true>
priorDstValueBuf;
auto threadwise_dst_load =
ThreadwiseTensorSliceTransfer_v2<OutDataType,
OutDataType,
OutGridDesc_M,
decltype(reduced_data_desc),
Sequence<MThreadSliceSize>,
Sequence<0>,
0,
OutDstVectorSize,
1,
false>(
out_grid_desc_m,
make_multi_index(block_global_1d_id * M_BlockTileSize +
thread_m_cluster_id * MThreadSliceSize));
threadwise_dst_load.Run(out_grid_desc_m,
out_global_val_buf,
reduced_data_desc,
make_tuple(I0),
priorDstValueBuf);
static_for<0, MThreadSliceSize, 1>{}([&](auto I) {
accu_value_buf(I) += type_convert<AccDataType>(priorDstValueBuf[I] * beta);
});
};
};
auto threadwise_dst_val_store =
ThreadwiseTensorSliceTransfer_v1r3<AccDataType,
OutDataType,
decltype(reduced_data_desc),
OutGridDesc_M,
PassThroughOp<AccDataType>,
Sequence<MThreadSliceSize>,
Sequence<0>,
0,
OutDstVectorSize,
InMemoryDataOperationEnum_t::Set,
1,
false>(
out_grid_desc_m,
make_multi_index(block_global_1d_id * M_BlockTileSize +
thread_m_cluster_id * MThreadSliceSize),
PassThroughOp<AccDataType>{});
auto threadwise_dst_idx_store =
ThreadwiseTensorSliceTransfer_v1r3<IndexDataType,
IndexDataType,
decltype(reduced_data_desc),
OutGridDesc_M,
PassThroughOp<index_t>,
Sequence<MThreadSliceSize>,
Sequence<0>,
0,
OutDstVectorSize,
InMemoryDataOperationEnum_t::Set,
1,
false>(
out_grid_desc_m,
make_multi_index(block_global_1d_id * M_BlockTileSize +
thread_m_cluster_id * MThreadSliceSize),
PassThroughOp<index_t>{});
threadwise_dst_val_store.Run(reduced_data_desc,
make_tuple(I0),
accu_value_buf,
out_grid_desc_m,
out_global_val_buf);
threadwise_dst_idx_store.Run(reduced_data_desc,
make_tuple(I0),
accu_index_buf,
out_grid_desc_m,
out_global_idx_buf);
}
};
__device__ static void
RunSecondCallWithIndex(const InGridDesc_M_K& in_grid_desc_m_k,
const OutGridDesc_M& out_grid_desc_m,
const InElementwiseOperation in_elementwise_op,
const OutElementwiseOperation acc_elementwise_op,
AccDataType alpha,
const InDataType* const __restrict__ p_ws_values_global,
OutDataType beta,
OutDataType* const __restrict__ p_out_global,
const IndexDataType* const __restrict__ p_ws_indices_global,
IndexDataType* const __restrict__ p_indices_global)
{
using BlockwiseReduceWithIndex =
PartitionedBlockwiseReductionWithIndexOn1dBuffer<decltype(buffer_1d_desc),
AccDataType,
IndexDataType,
BlockSize,
MThreadClusterSize,
KThreadClusterSize,
reorder_thread_cluster,
ReduceOperation,
PropagateNan>;
using AccumulationWithIndex = detail::AccumulateWithIndexAndNanCheck<PropagateNan,
ReduceOperation,
AccDataType,
IndexDataType>;
(void)in_elementwise_op;
// LDS
__shared__ AccDataType p_block_reduce_val_buffer[BlockSize];
__shared__ IndexDataType p_block_reduce_idx_buffer[BlockSize];
const auto zeroVal = ReduceOperation::GetReductionZeroVal();
const auto src_global_val_buf =
make_dynamic_buffer<AddressSpaceEnum_t::Global>(p_ws_values_global,
in_grid_desc_m_k.GetElementSpaceSize(),
type_convert<InDataType>(zeroVal));
const auto src_global_idx_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
p_ws_indices_global, in_grid_desc_m_k.GetElementSpaceSize());
auto out_global_val_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
p_out_global, out_grid_desc_m.GetElementSpaceSize());
auto out_global_idx_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
p_indices_global, out_grid_desc_m.GetElementSpaceSize());
auto block_reduce_val_buf =
make_dynamic_buffer<AddressSpaceEnum_t::Lds>(p_block_reduce_val_buffer, BlockSize);
auto block_reduce_idx_buf =
make_dynamic_buffer<AddressSpaceEnum_t::Lds>(p_block_reduce_idx_buffer, BlockSize);
StaticBuffer<AddressSpaceEnum_t::Vgpr,
AccDataType,
MThreadSliceSize * KThreadSliceSize,
true>
in_thread_val_buf;
StaticBuffer<AddressSpaceEnum_t::Vgpr,
IndexDataType,
MThreadSliceSize * KThreadSliceSize,
true>
in_thread_idx_buf;
StaticBuffer<AddressSpaceEnum_t::Vgpr, AccDataType, MThreadSliceSize, true> accu_value_buf;
StaticBuffer<AddressSpaceEnum_t::Vgpr, IndexDataType, MThreadSliceSize, true>
accu_index_buf;
const auto toReduceLength = in_grid_desc_m_k.GetLength(Number<1>{});
const index_t thread_local_id = get_thread_local_1d_id();
const index_t block_global_1d_id = get_block_1d_id();
const index_t thread_m_cluster_id =
reorder_thread_cluster ? thread_local_id % MThreadClusterSize
: ((thread_local_id / KThreadClusterSize) % MThreadClusterSize);
const index_t thread_k_cluster_id =
reorder_thread_cluster ? ((thread_local_id / MThreadClusterSize) % KThreadClusterSize)
: thread_local_id % KThreadClusterSize;
using ThreadBufferLengths = Sequence<MThreadSliceSize, KThreadSliceSize>;
constexpr auto thread_buffer_desc = make_naive_tensor_descriptor_packed(
make_tuple(Number<MThreadSliceSize>{}, Number<KThreadSliceSize>{}));
auto threadwise_src_val_load = ThreadwiseTensorSliceTransfer_v2<
InDataType,
AccDataType,
InGridDesc_M_K,
decltype(thread_buffer_desc),
ThreadBufferLengths,
typename conditional<InSrcVectorDim == 0, Sequence<1, 0>, Sequence<0, 1>>::type,
InSrcVectorDim,
InSrcVectorSize,
1,
false>(in_grid_desc_m_k,
make_multi_index(block_global_1d_id * M_BlockTileSize +
thread_m_cluster_id * MThreadSliceSize,
thread_k_cluster_id * KThreadSliceSize));
auto threadwise_src_idx_load = ThreadwiseTensorSliceTransfer_v2<
IndexDataType,
IndexDataType,
InGridDesc_M_K,
decltype(thread_buffer_desc),
ThreadBufferLengths,
typename conditional<InSrcVectorDim == 0, Sequence<1, 0>, Sequence<0, 1>>::type,
InSrcVectorDim,
InSrcVectorSize,
1,
false>(in_grid_desc_m_k,
make_multi_index(block_global_1d_id * M_BlockTileSize +
thread_m_cluster_id * MThreadSliceSize,
thread_k_cluster_id * KThreadSliceSize));
// index_t indexOffset = 0;
static_for<0, MThreadSliceSize, 1>{}([&](auto I) {
accu_value_buf(I) = zeroVal;
accu_index_buf(I) = 0;
});
constexpr auto in_thread_copy_step = make_multi_index(0, K_BlockTileSize);
const index_t toReduceTiles = (toReduceLength + K_BlockTileSize - 1) / K_BlockTileSize;
index_t reducedTiles = 0;
do
{
// load the thread slice
threadwise_src_val_load.Run(in_grid_desc_m_k,
src_global_val_buf,
thread_buffer_desc,
make_tuple(I0, I0),
in_thread_val_buf);
threadwise_src_idx_load.Run(in_grid_desc_m_k,
src_global_idx_buf,
thread_buffer_desc,
make_tuple(I0, I0),
in_thread_idx_buf);
static_for<0, MThreadSliceSize, 1>{}([&](auto I) {
AccDataType tmpValue = zeroVal;
IndexDataType tmpIndex = 0;
static_for<0, KThreadSliceSize, 1>{}([&](auto J) {
constexpr auto offset = I * Number<KThreadSliceSize>{} + J;
// reduce on the dim1 thread slice
AccumulationWithIndex::Calculate(
tmpValue, in_thread_val_buf[offset], tmpIndex, in_thread_idx_buf[offset]);
});
// store thread local value to LDS for parallel reduction
if constexpr(reorder_thread_cluster)
{
block_reduce_val_buf(thread_k_cluster_id * MThreadClusterSize +
thread_m_cluster_id) = tmpValue;
block_reduce_idx_buf(thread_k_cluster_id * MThreadClusterSize +
thread_m_cluster_id) = tmpIndex;
}
else
{
block_reduce_val_buf(thread_m_cluster_id * KThreadClusterSize +
thread_k_cluster_id) = tmpValue;
block_reduce_idx_buf(thread_m_cluster_id * KThreadClusterSize +
thread_k_cluster_id) = tmpIndex;
}
__syncthreads();
BlockwiseReduceWithIndex::Reduce(block_reduce_val_buf,
block_reduce_idx_buf,
tmpValue,
tmpIndex,
thread_m_cluster_id,
thread_k_cluster_id);
AccumulationWithIndex::Calculate(
accu_value_buf(I), tmpValue, accu_index_buf(I), tmpIndex);
});
threadwise_src_val_load.MoveSrcSliceWindow(in_grid_desc_m_k, in_thread_copy_step);
threadwise_src_idx_load.MoveSrcSliceWindow(in_grid_desc_m_k, in_thread_copy_step);
// indexOffset += K_BlockTileSize;
reducedTiles++;
} while(reducedTiles < toReduceTiles);
constexpr auto reduced_data_desc =
make_naive_tensor_descriptor_packed(make_tuple(Number<MThreadSliceSize>{}));
static_for<0, MThreadSliceSize, 1>{}([&](auto I) {
if(thread_k_cluster_id == 0)
{
// for indiced operation, acc_elementwise_op shoud do nothing
acc_elementwise_op(accu_value_buf(I), accu_value_buf(I));
accu_value_buf(I) *= alpha;
}
});
if(thread_k_cluster_id == 0)
{
if constexpr(!BetaIsZero)
{
if(!float_equal_zero{}(beta))
{
StaticBuffer<AddressSpaceEnum_t::Vgpr, OutDataType, MThreadSliceSize, true>
priorDstValueBuf;
auto threadwise_dst_load =
ThreadwiseTensorSliceTransfer_v2<OutDataType,
OutDataType,
OutGridDesc_M,
decltype(reduced_data_desc),
Sequence<MThreadSliceSize>,
Sequence<0>,
0,
OutDstVectorSize,
1,
true>(
out_grid_desc_m,
make_multi_index(block_global_1d_id * M_BlockTileSize +
thread_m_cluster_id * MThreadSliceSize));
threadwise_dst_load.Run(out_grid_desc_m,
out_global_val_buf,
reduced_data_desc,
make_tuple(I0),
priorDstValueBuf);
static_for<0, MThreadSliceSize, 1>{}([&](auto I) {
accu_value_buf(I) += type_convert<AccDataType>(priorDstValueBuf[I] * beta);
});
};
};
auto threadwise_dst_val_store =
ThreadwiseTensorSliceTransfer_v1r3<AccDataType,
OutDataType,
decltype(reduced_data_desc),
OutGridDesc_M,
PassThroughOp<AccDataType>,
Sequence<MThreadSliceSize>,
Sequence<0>,
0,
OutDstVectorSize,
InMemoryDataOperationEnum_t::Set,
1,
true>(
out_grid_desc_m,
make_multi_index(block_global_1d_id * M_BlockTileSize +
thread_m_cluster_id * MThreadSliceSize),
PassThroughOp<AccDataType>{});
auto threadwise_dst_idx_store =
ThreadwiseTensorSliceTransfer_v1r3<IndexDataType,
IndexDataType,
decltype(reduced_data_desc),
OutGridDesc_M,
PassThroughOp<IndexDataType>,
Sequence<MThreadSliceSize>,
Sequence<0>,
0,
OutDstVectorSize,
InMemoryDataOperationEnum_t::Set,
1,
true>(
out_grid_desc_m,
make_multi_index(block_global_1d_id * M_BlockTileSize +
thread_m_cluster_id * MThreadSliceSize),
PassThroughOp<index_t>{});
threadwise_dst_val_store.Run(reduced_data_desc,
make_tuple(I0),
accu_value_buf,
out_grid_desc_m,
out_global_val_buf);
threadwise_dst_idx_store.Run(reduced_data_desc,
make_tuple(I0),
accu_index_buf,
out_grid_desc_m,
out_global_idx_buf);
}
};
};
} // namespace ck
#endif
/*******************************************************************************
*
* MIT License
*
* Copyright (c) 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#ifndef CK_GRIDWISE_2D_REDUCTION_MULTIBLOCK_ATOMIC_ADD_HPP
#define CK_GRIDWISE_2D_REDUCTION_MULTIBLOCK_ATOMIC_ADD_HPP
#include "reduction_common.hpp"
#include "reduction_operator.hpp"
#include "reduction_functions_accumulate.hpp"
#include "reduction_functions_blockwise.hpp"
#include "threadwise_tensor_slice_transfer.hpp"
namespace ck {
template <typename GridwiseReduction,
typename InDataType,
typename OutDataType,
typename AccDataType,
typename InGridDesc_M_K,
typename OutGridDesc_M,
typename InElementwiseOperation,
typename AccElementwiseOperation>
__global__ void
kernel_reduce_multiblock_atocmi_add(const InGridDesc_M_K in_grid_desc_m_k,
const OutGridDesc_M out_grid_desc_m,
const InElementwiseOperation in_elementwise_op,
const AccElementwiseOperation acc_elementwise_op,
index_t block_group_size,
index_t num_k_block_tile_iteration,
AccDataType alpha,
const InDataType* const __restrict__ p_in_global,
OutDataType* const __restrict__ p_out_global)
{
GridwiseReduction::Run(in_grid_desc_m_k,
out_grid_desc_m,
in_elementwise_op,
acc_elementwise_op,
block_group_size,
num_k_block_tile_iteration,
alpha,
p_in_global,
p_out_global);
};
template <typename InDataType,
typename OutDataType,
typename AccDataType,
typename InGridDesc_M_K,
typename OutGridDesc_M,
typename ReduceOperation,
typename InElementwiseOperation,
typename AccElementwiseOperation,
bool PropagateNan,
index_t BlockSize,
index_t MThreadClusterSize,
index_t KThreadClusterSize,
index_t MThreadSliceSize,
index_t KThreadSliceSize,
index_t InSrcVectorDim,
index_t InSrcVectorSize,
index_t OutDstVectorSize>
struct GridwiseReduction_mk_to_m_multiblock_atomic_add
{
static constexpr bool reorder_thread_cluster = (InSrcVectorDim == 0);
static constexpr auto buffer_1d_desc =
make_naive_tensor_descriptor_packed(make_tuple(Number<BlockSize>{}));
using blockwise_reduce = PartitionedBlockwiseReductionOn1dBuffer<decltype(buffer_1d_desc),
AccDataType,
BlockSize,
MThreadClusterSize,
KThreadClusterSize,
reorder_thread_cluster,
ReduceOperation,
PropagateNan>;
template <typename T>
using PassThroughOp = tensor_operation::element_wise::UnaryIdentic<T, T>;
static constexpr auto I0 = Number<0>{};
static constexpr index_t M_BlockTileSize = MThreadClusterSize * MThreadSliceSize;
static constexpr index_t K_BlockTileSize = KThreadClusterSize * KThreadSliceSize;
using Accumulation = detail::AccumulateWithNanCheck<PropagateNan, ReduceOperation, AccDataType>;
__device__ static void Run(const InGridDesc_M_K& in_grid_desc_m_k,
const OutGridDesc_M& out_grid_desc_m,
const InElementwiseOperation& in_elementwise_op,
const AccElementwiseOperation& acc_elementwise_op,
index_t block_group_size,
index_t num_k_block_tile_iteration,
AccDataType alpha,
const InDataType* const __restrict__ p_in_global,
OutDataType* const __restrict__ p_out_global)
{
const auto zeroVal = ReduceOperation::GetReductionZeroVal();
// LDS
__shared__ AccDataType p_block_reduce_buffer[BlockSize];
const auto in_global_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
p_in_global, in_grid_desc_m_k.GetElementSpaceSize(), type_convert<InDataType>(zeroVal));
auto out_global_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
p_out_global, out_grid_desc_m.GetElementSpaceSize());
auto block_reduce_buf =
make_dynamic_buffer<AddressSpaceEnum_t::Lds>(p_block_reduce_buffer, BlockSize);
StaticBuffer<AddressSpaceEnum_t::Vgpr,
AccDataType,
MThreadSliceSize * KThreadSliceSize,
true>
in_thread_buf;
StaticBuffer<AddressSpaceEnum_t::Vgpr, AccDataType, MThreadSliceSize, true> accu_value_buf;
static_for<0, MThreadSliceSize, 1>{}([&](auto I) { accu_value_buf(I) = zeroVal; });
const index_t thread_local_id = get_thread_local_1d_id();
const index_t block_global_id = get_block_1d_id();
const index_t blkgroup_id = block_global_id / block_group_size;
const index_t block_local_id = block_global_id % block_group_size;
const index_t thread_m_cluster_id =
reorder_thread_cluster ? thread_local_id % MThreadClusterSize
: ((thread_local_id / KThreadClusterSize) % MThreadClusterSize);
const index_t thread_k_cluster_id =
reorder_thread_cluster ? ((thread_local_id / MThreadClusterSize) % KThreadClusterSize)
: thread_local_id % KThreadClusterSize;
const index_t reduceSizePerBlock = K_BlockTileSize * num_k_block_tile_iteration;
using ThreadBufferLengths = Sequence<MThreadSliceSize, KThreadSliceSize>;
constexpr auto thread_buffer_desc = make_naive_tensor_descriptor_packed(
make_tuple(Number<MThreadSliceSize>{}, Number<KThreadSliceSize>{}));
auto threadwise_src_load = ThreadwiseTensorSliceTransfer_v2<
InDataType,
AccDataType,
InGridDesc_M_K,
decltype(thread_buffer_desc),
ThreadBufferLengths,
typename conditional<InSrcVectorDim == 0, Sequence<1, 0>, Sequence<0, 1>>::type,
InSrcVectorDim,
InSrcVectorSize,
1,
false>(
in_grid_desc_m_k,
make_multi_index(blkgroup_id * M_BlockTileSize + thread_m_cluster_id * MThreadSliceSize,
block_local_id * reduceSizePerBlock +
thread_k_cluster_id * KThreadSliceSize));
constexpr auto in_thread_copy_step = make_multi_index(0, K_BlockTileSize);
index_t reducedTiles = 0;
do
{
threadwise_src_load.Run(in_grid_desc_m_k,
in_global_buf,
thread_buffer_desc,
make_tuple(I0, I0),
in_thread_buf);
static_for<0, MThreadSliceSize, 1>{}([&](auto I) {
// do element-wise pre-reduction operation
static_for<0, KThreadSliceSize, 1>{}([&](auto J) {
constexpr auto offset = I * Number<KThreadSliceSize>{} + J;
in_elementwise_op(in_thread_buf(offset), in_thread_buf(offset));
});
// reduce on each thread-local slice
static_for<0, KThreadSliceSize, 1>{}([&](auto J) {
constexpr auto offset = I * Number<KThreadSliceSize>{} + J;
Accumulation::Calculate(accu_value_buf(I), in_thread_buf[offset]);
});
});
threadwise_src_load.MoveSrcSliceWindow(in_grid_desc_m_k, in_thread_copy_step);
reducedTiles++;
} while(reducedTiles < num_k_block_tile_iteration);
constexpr auto reduced_data_desc =
make_naive_tensor_descriptor_packed(make_tuple(Number<MThreadSliceSize>{}));
// Each block executes multiple parallel reductions on the LDS, and by atomic-adding its
// reduced output to the global location corresponding to each invariant dimension to get a
// consistent reduced result for that invariant dimension. due to the using of vector_load,
// each block/thread is involved into multiple invarirant dimensions.
static_for<0, MThreadSliceSize, 1>{}([&](auto I) {
if constexpr(reorder_thread_cluster)
{
block_reduce_buf(thread_k_cluster_id * MThreadClusterSize + thread_m_cluster_id) =
accu_value_buf[I];
}
else
block_reduce_buf(thread_m_cluster_id * KThreadClusterSize + thread_k_cluster_id) =
accu_value_buf[I];
accu_value_buf(I) = zeroVal;
__syncthreads();
blockwise_reduce::Reduce(
block_reduce_buf, accu_value_buf(I), thread_m_cluster_id, thread_k_cluster_id);
});
static_for<0, MThreadSliceSize, 1>{}([&](auto I) {
if(thread_k_cluster_id == 0)
{
acc_elementwise_op(accu_value_buf(I), accu_value_buf(I));
accu_value_buf(I) *= alpha;
}
});
if(thread_k_cluster_id == 0)
{
auto threadwise_dst_store =
ThreadwiseTensorSliceTransfer_v1r3<AccDataType,
OutDataType,
decltype(reduced_data_desc),
OutGridDesc_M,
PassThroughOp<AccDataType>,
Sequence<MThreadSliceSize>,
Sequence<0>,
0,
OutDstVectorSize,
InMemoryDataOperationEnum_t::AtomicAdd,
1,
true>(
out_grid_desc_m,
make_multi_index(blkgroup_id * M_BlockTileSize +
thread_m_cluster_id * MThreadSliceSize),
PassThroughOp<AccDataType>{});
threadwise_dst_store.Run(
reduced_data_desc, make_tuple(I0), accu_value_buf, out_grid_desc_m, out_global_buf);
}
};
};
} // namespace ck
#endif
/*******************************************************************************
*
* MIT License
*
* Copyright (c) 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#ifndef CK_GRIDWISE_2D_REDUCTION_MULTIBLOCK_TWO_CALL_HPP
#define CK_GRIDWISE_2D_REDUCTION_MULTIBLOCK_TWO_CALL_HPP
#include "reduction_common.hpp"
#include "reduction_operator.hpp"
#include "reduction_functions_accumulate.hpp"
#include "reduction_functions_blockwise.hpp"
#include "threadwise_tensor_slice_transfer.hpp"
namespace ck {
template <typename GridwiseReduction,
bool NeedIndices,
typename InDataType,
typename AccDataType,
typename IndexDataType,
typename InGridDesc_M_K,
typename WorkspaceDesc_M_K,
typename InElementwiseOperation,
typename AccElementwiseOperation>
__global__ void
kernel_partial_reduce_multiblock(const InGridDesc_M_K in_grid_desc_m_k,
const WorkspaceDesc_M_K workspace_desc_m_k,
const InElementwiseOperation in_elementwise_op,
const AccElementwiseOperation acc_elementwise_op,
index_t block_group_size,
index_t num_k_block_tile_iteration,
const InDataType* const __restrict__ p_src_global,
AccDataType* const __restrict__ p_ws_values_global,
IndexDataType* const __restrict__ p_ws_indices_global)
{
if constexpr(!NeedIndices)
{
GridwiseReduction::Run(in_grid_desc_m_k,
workspace_desc_m_k,
in_elementwise_op,
acc_elementwise_op,
block_group_size,
num_k_block_tile_iteration,
p_src_global,
p_ws_values_global,
p_ws_indices_global);
}
else
{
GridwiseReduction::RunWithIndex(in_grid_desc_m_k,
workspace_desc_m_k,
in_elementwise_op,
acc_elementwise_op,
block_group_size,
num_k_block_tile_iteration,
p_src_global,
p_ws_values_global,
p_ws_indices_global);
};
};
template <typename InDataType,
typename AccDataType,
typename IndexDataType,
typename InGridDesc_M_K,
typename WorkspaceDesc_M_K,
typename ReduceOperation,
typename InElementwiseOperation,
typename AccElementwiseOperation,
bool PropagateNan,
index_t BlockSize,
index_t MThreadClusterSize,
index_t KThreadClusterSize,
index_t MThreadSliceSize,
index_t KThreadSliceSize,
index_t InSrcVectorDim,
index_t InSrcVectorSize,
index_t OutDstVectorSize>
struct GridwiseReduction_mk_to_mk_multiblock_partial_reduce
{
static constexpr bool reorder_thread_cluster = (InSrcVectorDim == 0);
static constexpr auto buffer1dDesc =
make_naive_tensor_descriptor_packed(make_tuple(Number<BlockSize>{}));
template <typename T>
using PassThroughOp = tensor_operation::element_wise::UnaryIdentic<T, T>;
static constexpr auto I0 = Number<0>{};
static constexpr index_t M_BlockTileSize = MThreadClusterSize * MThreadSliceSize;
static constexpr index_t K_BlockTileSize = KThreadClusterSize * KThreadSliceSize;
__device__ static void Run(const InGridDesc_M_K& in_grid_desc_m_k,
const WorkspaceDesc_M_K& workspace_desc_m_k,
const InElementwiseOperation& in_elementwise_op,
const AccElementwiseOperation& acc_elementwise_op,
index_t block_group_size,
index_t num_k_block_tile_iteration,
const InDataType* const __restrict__ p_src_global,
AccDataType* const __restrict__ p_ws_values_global,
IndexDataType* const __restrict__ p_ws_indices_global)
{
using BlockwiseReduce = PartitionedBlockwiseReductionOn1dBuffer<decltype(buffer1dDesc),
AccDataType,
BlockSize,
MThreadClusterSize,
KThreadClusterSize,
reorder_thread_cluster,
ReduceOperation,
PropagateNan>;
using Accumulation =
detail::AccumulateWithNanCheck<PropagateNan, ReduceOperation, AccDataType>;
(void)p_ws_indices_global;
(void)acc_elementwise_op;
const auto zeroVal = ReduceOperation::GetReductionZeroVal();
// LDS
__shared__ AccDataType p_block_reduce_buffer[BlockSize];
const auto in_global_buf =
make_dynamic_buffer<AddressSpaceEnum_t::Global>(p_src_global,
in_grid_desc_m_k.GetElementSpaceSize(),
type_convert<InDataType>(zeroVal));
auto workspace_global_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
p_ws_values_global, workspace_desc_m_k.GetElementSpaceSize());
auto block_reduce_buf =
make_dynamic_buffer<AddressSpaceEnum_t::Lds>(p_block_reduce_buffer, BlockSize);
StaticBuffer<AddressSpaceEnum_t::Vgpr,
AccDataType,
MThreadSliceSize * KThreadSliceSize,
true>
in_thread_buf;
StaticBuffer<AddressSpaceEnum_t::Vgpr, AccDataType, MThreadSliceSize, true> accu_value_buf;
static_for<0, MThreadSliceSize, 1>{}([&](auto I) { accu_value_buf(I) = zeroVal; });
const index_t thread_local_id = get_thread_local_1d_id();
const index_t block_global_id = get_block_1d_id();
const index_t blkgroup_id = block_global_id / block_group_size;
const index_t block_local_id = block_global_id % block_group_size;
const index_t thread_m_cluster_id =
reorder_thread_cluster ? thread_local_id % MThreadClusterSize
: ((thread_local_id / KThreadClusterSize) % MThreadClusterSize);
const index_t thread_k_cluster_id =
reorder_thread_cluster ? ((thread_local_id / MThreadClusterSize) % KThreadClusterSize)
: thread_local_id % KThreadClusterSize;
const index_t reduceSizePerBlock = K_BlockTileSize * num_k_block_tile_iteration;
using ThreadBufferLengths = Sequence<MThreadSliceSize, KThreadSliceSize>;
constexpr auto thread_buffer_desc = make_naive_tensor_descriptor_packed(
make_tuple(Number<MThreadSliceSize>{}, Number<KThreadSliceSize>{}));
auto threadwise_src_load = ThreadwiseTensorSliceTransfer_v2<
InDataType,
AccDataType,
InGridDesc_M_K,
decltype(thread_buffer_desc),
ThreadBufferLengths,
typename conditional<InSrcVectorDim == 0, Sequence<1, 0>, Sequence<0, 1>>::type,
InSrcVectorDim,
InSrcVectorSize,
1,
false>(
in_grid_desc_m_k,
make_multi_index(blkgroup_id * M_BlockTileSize + thread_m_cluster_id * MThreadSliceSize,
block_local_id * reduceSizePerBlock +
thread_k_cluster_id * KThreadSliceSize));
constexpr auto in_thread_copy_step = make_multi_index(0, K_BlockTileSize);
index_t reducedTiles = 0;
do
{
threadwise_src_load.Run(in_grid_desc_m_k,
in_global_buf,
thread_buffer_desc,
make_tuple(I0, I0),
in_thread_buf);
static_for<0, MThreadSliceSize, 1>{}([&](auto I) {
// do element-wise pre-reduction operation
static_for<0, KThreadSliceSize, 1>{}([&](auto J) {
constexpr auto offset = I * Number<KThreadSliceSize>{} + J;
in_elementwise_op(in_thread_buf(offset), in_thread_buf(offset));
});
// reduce on each thread-local slice
static_for<0, KThreadSliceSize, 1>{}([&](auto J) {
constexpr auto offset = I * Number<KThreadSliceSize>{} + J;
Accumulation::Calculate(accu_value_buf(I), in_thread_buf[offset]);
});
});
threadwise_src_load.MoveSrcSliceWindow(in_grid_desc_m_k, in_thread_copy_step);
reducedTiles++;
} while(reducedTiles < num_k_block_tile_iteration);
constexpr auto reduced_data_desc = make_naive_tensor_descriptor_packed(
make_tuple(Number<MThreadSliceSize>{}, Number<1>{}));
// Each block executes multiple parallel reductions on the LDS, and due to the using of
// vector_load, each block/thread is involved into multiple invarirant dimensions.
static_for<0, MThreadSliceSize, 1>{}([&](auto I) {
if constexpr(reorder_thread_cluster)
{
block_reduce_buf(thread_k_cluster_id * MThreadClusterSize + thread_m_cluster_id) =
accu_value_buf[I];
}
else
block_reduce_buf(thread_m_cluster_id * KThreadClusterSize + thread_k_cluster_id) =
accu_value_buf[I];
accu_value_buf(I) = zeroVal;
__syncthreads();
BlockwiseReduce::Reduce(
block_reduce_buf, accu_value_buf(I), thread_m_cluster_id, thread_k_cluster_id);
});
if(thread_k_cluster_id == 0)
{
auto threadwise_workspace_store =
ThreadwiseTensorSliceTransfer_v1r3<AccDataType,
AccDataType,
decltype(reduced_data_desc),
WorkspaceDesc_M_K,
PassThroughOp<AccDataType>,
Sequence<MThreadSliceSize, 1>,
Sequence<0, 1>,
1,
1,
InMemoryDataOperationEnum_t::Set,
1,
true>(
workspace_desc_m_k,
make_multi_index(blkgroup_id * M_BlockTileSize +
thread_m_cluster_id * MThreadSliceSize,
block_local_id),
PassThroughOp<AccDataType>{});
threadwise_workspace_store.Run(reduced_data_desc,
make_tuple(I0, I0),
accu_value_buf,
workspace_desc_m_k,
workspace_global_buf);
}
};
__device__ static void RunWithIndex(const InGridDesc_M_K& in_grid_desc_m_k,
const WorkspaceDesc_M_K& workspace_desc_m_k,
const InElementwiseOperation& in_elementwise_op,
const AccElementwiseOperation& acc_elementwise_op,
index_t block_group_size,
index_t num_k_block_tile_iteration,
const InDataType* const __restrict__ p_src_global,
AccDataType* const __restrict__ p_ws_values_global,
IndexDataType* const __restrict__ p_ws_indices_global)
{
using BlockwiseReduceWithIndex =
PartitionedBlockwiseReductionWithIndexOn1dBuffer<decltype(buffer1dDesc),
AccDataType,
IndexDataType,
BlockSize,
MThreadClusterSize,
KThreadClusterSize,
reorder_thread_cluster,
ReduceOperation,
PropagateNan>;
using AccumulationWithIndex = detail::AccumulateWithIndexAndNanCheck<PropagateNan,
ReduceOperation,
AccDataType,
IndexDataType>;
(void)acc_elementwise_op;
const auto zeroVal = ReduceOperation::GetReductionZeroVal();
// LDS
__shared__ AccDataType p_block_reduce_val_buffer[BlockSize];
__shared__ index_t p_block_reduce_idx_buffer[BlockSize];
const auto in_global_buf =
make_dynamic_buffer<AddressSpaceEnum_t::Global>(p_src_global,
in_grid_desc_m_k.GetElementSpaceSize(),
type_convert<InDataType>(zeroVal));
auto workspace_global_val_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
p_ws_values_global, workspace_desc_m_k.GetElementSpaceSize());
auto workspace_global_idx_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
p_ws_indices_global, workspace_desc_m_k.GetElementSpaceSize());
auto block_reduce_val_buf =
make_dynamic_buffer<AddressSpaceEnum_t::Lds>(p_block_reduce_val_buffer, BlockSize);
auto block_reduce_idx_buf =
make_dynamic_buffer<AddressSpaceEnum_t::Lds>(p_block_reduce_idx_buffer, BlockSize);
StaticBuffer<AddressSpaceEnum_t::Vgpr,
AccDataType,
MThreadSliceSize * KThreadSliceSize,
true>
in_thread_val_buf;
StaticBuffer<AddressSpaceEnum_t::Vgpr,
IndexDataType,
MThreadSliceSize * KThreadSliceSize,
true>
in_thread_idx_buf;
StaticBuffer<AddressSpaceEnum_t::Vgpr, AccDataType, MThreadSliceSize, true> accu_value_buf;
StaticBuffer<AddressSpaceEnum_t::Vgpr, IndexDataType, MThreadSliceSize, true>
accu_index_buf;
const index_t thread_local_id = get_thread_local_1d_id();
const index_t block_global_id = get_block_1d_id();
const index_t blkgroup_id = block_global_id / block_group_size;
const index_t block_local_id = block_global_id % block_group_size;
const index_t thread_m_cluster_id =
reorder_thread_cluster ? thread_local_id % MThreadClusterSize
: ((thread_local_id / KThreadClusterSize) % MThreadClusterSize);
const index_t thread_k_cluster_id =
reorder_thread_cluster ? ((thread_local_id / MThreadClusterSize) % KThreadClusterSize)
: thread_local_id % KThreadClusterSize;
const index_t reduceSizePerBlock = K_BlockTileSize * num_k_block_tile_iteration;
using ThreadBufferLengths = Sequence<MThreadSliceSize, KThreadSliceSize>;
constexpr auto thread_buffer_desc = make_naive_tensor_descriptor_packed(
make_tuple(Number<MThreadSliceSize>{}, Number<KThreadSliceSize>{}));
auto threadwise_src_load = ThreadwiseTensorSliceTransfer_v2<
InDataType,
AccDataType,
InGridDesc_M_K,
decltype(thread_buffer_desc),
ThreadBufferLengths,
typename conditional<InSrcVectorDim == 0, Sequence<1, 0>, Sequence<0, 1>>::type,
InSrcVectorDim,
InSrcVectorSize,
1,
false>(
in_grid_desc_m_k,
make_multi_index(blkgroup_id * M_BlockTileSize + thread_m_cluster_id * MThreadSliceSize,
block_local_id * reduceSizePerBlock +
thread_k_cluster_id * KThreadSliceSize));
constexpr auto in_thread_copy_step = make_multi_index(0, K_BlockTileSize);
index_t indexOffset = block_local_id * reduceSizePerBlock;
static_for<0, MThreadSliceSize, 1>{}([&](auto I) {
accu_value_buf(I) = zeroVal;
accu_index_buf(I) = 0;
});
index_t reducedTiles = 0;
do
{
// load the thread slice
threadwise_src_load.Run(in_grid_desc_m_k,
in_global_buf,
thread_buffer_desc,
make_tuple(I0, I0),
in_thread_val_buf);
static_for<0, MThreadSliceSize, 1>{}([&](auto I) {
static_for<0, KThreadSliceSize, 1>{}([&](auto J) {
constexpr auto offset = I * Number<KThreadSliceSize>{} + J;
// initialize the indices for the per-thread to-reduce values
in_thread_idx_buf(offset) =
indexOffset + thread_k_cluster_id * KThreadSliceSize + J();
// do element-wise pre-reduction operation
in_elementwise_op(in_thread_val_buf(offset), in_thread_val_buf(offset));
});
AccDataType tmpValue = zeroVal;
IndexDataType tmpIndex = 0;
static_for<0, KThreadSliceSize, 1>{}([&](auto J) {
constexpr auto offset = I * Number<KThreadSliceSize>{} + J;
// reduce on the dim1 thread slice
AccumulationWithIndex::Calculate(
tmpValue, in_thread_val_buf[offset], tmpIndex, in_thread_idx_buf[offset]);
});
// store thread local value to LDS for parallel reduction
if constexpr(reorder_thread_cluster)
{
block_reduce_val_buf(thread_k_cluster_id * MThreadClusterSize +
thread_m_cluster_id) = tmpValue;
block_reduce_idx_buf(thread_k_cluster_id * MThreadClusterSize +
thread_m_cluster_id) = tmpIndex;
}
else
{
block_reduce_val_buf(thread_m_cluster_id * KThreadClusterSize +
thread_k_cluster_id) = tmpValue;
block_reduce_idx_buf(thread_m_cluster_id * KThreadClusterSize +
thread_k_cluster_id) = tmpIndex;
}
__syncthreads();
BlockwiseReduceWithIndex::Reduce(block_reduce_val_buf,
block_reduce_idx_buf,
tmpValue,
tmpIndex,
thread_m_cluster_id,
thread_k_cluster_id);
AccumulationWithIndex::Calculate(
accu_value_buf(I), tmpValue, accu_index_buf(I), tmpIndex);
});
threadwise_src_load.MoveSrcSliceWindow(in_grid_desc_m_k, in_thread_copy_step);
indexOffset += K_BlockTileSize;
reducedTiles++;
} while(reducedTiles < num_k_block_tile_iteration);
constexpr auto reduced_data_desc = make_naive_tensor_descriptor_packed(
make_tuple(Number<MThreadSliceSize>{}, Number<1>{}));
if(thread_k_cluster_id == 0)
{
auto threadwise_workspace_val_store =
ThreadwiseTensorSliceTransfer_v1r3<AccDataType,
AccDataType,
decltype(reduced_data_desc),
WorkspaceDesc_M_K,
PassThroughOp<AccDataType>,
Sequence<MThreadSliceSize, 1>,
Sequence<0, 1>,
1,
1,
InMemoryDataOperationEnum_t::Set,
1,
true>(
workspace_desc_m_k,
make_multi_index(blkgroup_id * M_BlockTileSize +
thread_m_cluster_id * MThreadSliceSize,
block_local_id),
PassThroughOp<AccDataType>{});
auto threadwise_workspace_idx_store =
ThreadwiseTensorSliceTransfer_v1r3<IndexDataType,
IndexDataType,
decltype(reduced_data_desc),
WorkspaceDesc_M_K,
PassThroughOp<IndexDataType>,
Sequence<MThreadSliceSize, 1>,
Sequence<0, 1>,
1,
1,
InMemoryDataOperationEnum_t::Set,
1,
true>(
workspace_desc_m_k,
make_multi_index(blkgroup_id * M_BlockTileSize +
thread_m_cluster_id * MThreadSliceSize,
block_local_id),
PassThroughOp<IndexDataType>{});
threadwise_workspace_val_store.Run(reduced_data_desc,
make_tuple(I0, I0),
accu_value_buf,
workspace_desc_m_k,
workspace_global_val_buf);
threadwise_workspace_idx_store.Run(reduced_data_desc,
make_tuple(I0, I0),
accu_index_buf,
workspace_desc_m_k,
workspace_global_idx_buf);
}
};
};
} // namespace ck
#endif
/*******************************************************************************
*
* MIT License
*
* Copyright (c) 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#ifndef CK_GRIDWISE_2D_REDUCTION_THREADWISE_HPP
#define CK_GRIDWISE_2D_REDUCTION_THREADWISE_HPP
#include "data_type.hpp"
#include "reduction_common.hpp"
#include "reduction_operator.hpp"
#include "reduction_functions_accumulate.hpp"
#include "threadwise_tensor_slice_transfer.hpp"
namespace ck {
template <typename GridwiseReduction,
bool NeedIndices,
typename InDataType,
typename OutDataType,
typename AccDataType,
typename IndexDataType,
typename InGridDesc_M_K,
typename OutGridDesc_M,
typename InElementwiseOperation,
typename AccElementwiseOperation>
__global__ void kernel_reduce_threadwise(const InGridDesc_M_K in_grid_desc_m_k,
const OutGridDesc_M out_grid_desc_m,
const InElementwiseOperation in_elementwise_op,
const AccElementwiseOperation acc_elementwise_op,
AccDataType alpha,
const InDataType* const __restrict__ p_in_global,
OutDataType beta,
OutDataType* const __restrict__ p_out_global,
IndexDataType* const __restrict__ p_indices_global)
{
if constexpr(!NeedIndices)
{
GridwiseReduction::Run(in_grid_desc_m_k,
out_grid_desc_m,
in_elementwise_op,
acc_elementwise_op,
alpha,
p_in_global,
beta,
p_out_global,
p_indices_global);
}
else
{
GridwiseReduction::RunWithIndices(in_grid_desc_m_k,
out_grid_desc_m,
in_elementwise_op,
acc_elementwise_op,
alpha,
p_in_global,
beta,
p_out_global,
p_indices_global);
};
};
template <typename InDataType,
typename OutDataType,
typename AccDataType,
typename IndexDataType,
typename InGridDesc_M_K,
typename OutGridDesc_M,
typename ReduceOperation,
typename InElementwiseOperation,
typename AccElementwiseOperation,
bool PropagateNan,
bool BetaIsZero,
index_t BlockSize,
index_t MThreadClusterSize,
index_t KThreadClusterSize,
index_t MThreadSliceSize,
index_t KThreadSliceSize,
index_t InSrcVectorDim,
index_t InSrcVectorSize,
index_t OutDstVectorSize>
struct GridwiseReduction_mk_to_m_threadwise
{
template <typename T>
using PassThroughOp = tensor_operation::element_wise::UnaryIdentic<T, T>;
static constexpr auto I0 = Number<0>{};
__device__ static void Run(const InGridDesc_M_K& in_grid_desc_m_k,
const OutGridDesc_M& out_grid_desc_m,
const InElementwiseOperation& in_elementwise_op,
const AccElementwiseOperation& acc_elementwise_op,
AccDataType alpha,
const InDataType* const __restrict__ p_in_global,
OutDataType beta,
OutDataType* const __restrict__ p_out_global,
IndexDataType* const __restrict__ p_indices_global)
{
using Accumulation =
detail::AccumulateWithNanCheck<PropagateNan, ReduceOperation, AccDataType>;
(void)p_indices_global;
const auto zeroVal = ReduceOperation::GetReductionZeroVal();
const auto in_global_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
p_in_global, in_grid_desc_m_k.GetElementSpaceSize(), type_convert<InDataType>(zeroVal));
auto dst_global_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
p_out_global, out_grid_desc_m.GetElementSpaceSize());
StaticBuffer<AddressSpaceEnum_t::Vgpr,
AccDataType,
MThreadSliceSize * KThreadSliceSize,
true>
in_thread_buf;
StaticBuffer<AddressSpaceEnum_t::Vgpr, AccDataType, MThreadSliceSize, true> accu_value_buf;
static_for<0, MThreadSliceSize, 1>{}([&](auto I) { accu_value_buf(I) = zeroVal; });
const auto toReduceLength = in_grid_desc_m_k.GetLength(Number<1>{});
using ThreadBufferLengths = Sequence<MThreadSliceSize, KThreadSliceSize>;
constexpr auto thread_buffer_desc = make_naive_tensor_descriptor_packed(
make_tuple(Number<MThreadSliceSize>{}, Number<KThreadSliceSize>{}));
index_t thread_global_1d_id = get_block_1d_id() * BlockSize + get_thread_local_1d_id();
auto threadwise_src_load = ThreadwiseTensorSliceTransfer_v2<
InDataType,
AccDataType,
InGridDesc_M_K,
decltype(thread_buffer_desc),
ThreadBufferLengths,
typename conditional<InSrcVectorDim == 0, Sequence<1, 0>, Sequence<0, 1>>::type,
InSrcVectorDim,
InSrcVectorSize,
1,
false>(in_grid_desc_m_k, make_multi_index(thread_global_1d_id * MThreadSliceSize, 0));
constexpr auto in_thread_copy_step = make_multi_index(0, KThreadSliceSize);
index_t reducedLength = 0;
do
{
threadwise_src_load.Run(in_grid_desc_m_k,
in_global_buf,
thread_buffer_desc,
make_tuple(I0, I0),
in_thread_buf);
static_for<0, MThreadSliceSize, 1>{}([&](auto I) {
// do element-wise pre-reduction operation
static_for<0, KThreadSliceSize, 1>{}([&](auto J) {
constexpr auto offset = I * Number<KThreadSliceSize>{} + J;
in_elementwise_op(in_thread_buf(offset), in_thread_buf(offset));
});
// reduce on each thread-local slice
static_for<0, KThreadSliceSize, 1>{}([&](auto J) {
constexpr auto offset = I * Number<KThreadSliceSize>{} + J;
Accumulation::Calculate(accu_value_buf(I), in_thread_buf[offset]);
});
});
threadwise_src_load.MoveSrcSliceWindow(in_grid_desc_m_k, in_thread_copy_step);
reducedLength += KThreadSliceSize;
} while(reducedLength < toReduceLength);
static_for<0, MThreadSliceSize, 1>{}([&](auto I) {
acc_elementwise_op(accu_value_buf(I), accu_value_buf(I));
accu_value_buf(I) *= alpha;
});
constexpr auto reduced_data_desc =
make_naive_tensor_descriptor_packed(make_tuple(Number<MThreadSliceSize>{}));
if constexpr(!BetaIsZero)
{
if(!float_equal_zero{}(beta))
{
auto threadwise_dst_load =
ThreadwiseTensorSliceTransfer_v2<OutDataType,
OutDataType,
OutGridDesc_M,
decltype(reduced_data_desc),
Sequence<MThreadSliceSize>,
Sequence<0>,
0,
1,
1,
true>(
out_grid_desc_m, make_multi_index(thread_global_1d_id * MThreadSliceSize));
StaticBuffer<AddressSpaceEnum_t::Vgpr, OutDataType, MThreadSliceSize, true>
priorDstValue_buf;
threadwise_dst_load.Run(out_grid_desc_m,
dst_global_buf,
reduced_data_desc,
make_tuple(I0),
priorDstValue_buf);
static_for<0, MThreadSliceSize, 1>{}([&](auto I) {
accu_value_buf(I) += type_convert<AccDataType>(priorDstValue_buf[I] * beta);
});
};
};
auto threadwise_dst_store =
ThreadwiseTensorSliceTransfer_v1r3<AccDataType,
OutDataType,
decltype(reduced_data_desc),
OutGridDesc_M,
PassThroughOp<AccDataType>,
Sequence<MThreadSliceSize>,
Sequence<0>,
0,
OutDstVectorSize,
InMemoryDataOperationEnum_t::Set,
1,
false>(
out_grid_desc_m,
make_multi_index(thread_global_1d_id * MThreadSliceSize),
PassThroughOp<AccDataType>{});
threadwise_dst_store.Run(
reduced_data_desc, make_tuple(I0), accu_value_buf, out_grid_desc_m, dst_global_buf);
};
__device__ static void RunWithIndices(const InGridDesc_M_K& in_grid_desc_m_k,
const OutGridDesc_M& out_grid_desc_m,
const InElementwiseOperation& in_elementwise_op,
const AccElementwiseOperation& acc_elementwise_op,
AccDataType alpha,
const InDataType* const __restrict__ p_in_global,
OutDataType beta,
OutDataType* const __restrict__ p_out_global,
IndexDataType* const __restrict__ p_indices_global)
{
using AccumulationWithIndex = detail::AccumulateWithIndexAndNanCheck<PropagateNan,
ReduceOperation,
AccDataType,
IndexDataType>;
(void)acc_elementwise_op;
const auto zeroVal = ReduceOperation::GetReductionZeroVal();
const auto in_global_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
p_in_global, in_grid_desc_m_k.GetElementSpaceSize(), type_convert<InDataType>(zeroVal));
auto out_global_val_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
p_out_global, out_grid_desc_m.GetElementSpaceSize());
auto out_global_idx_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
p_indices_global, out_grid_desc_m.GetElementSpaceSize());
StaticBuffer<AddressSpaceEnum_t::Vgpr,
AccDataType,
MThreadSliceSize * KThreadSliceSize,
true>
in_thread_buf;
StaticBuffer<AddressSpaceEnum_t::Vgpr, AccDataType, MThreadSliceSize, true> accu_value_buf;
StaticBuffer<AddressSpaceEnum_t::Vgpr, IndexDataType, MThreadSliceSize, true>
accu_index_buf;
static_for<0, MThreadSliceSize, 1>{}([&](auto I) {
accu_value_buf(I) = zeroVal;
accu_index_buf(I) = 0;
});
const auto toReduceLength = in_grid_desc_m_k.GetLength(Number<1>{});
using ThreadBufferLengths = Sequence<MThreadSliceSize, KThreadSliceSize>;
constexpr auto thread_buffer_desc = make_naive_tensor_descriptor_packed(
make_tuple(Number<MThreadSliceSize>{}, Number<KThreadSliceSize>{}));
index_t thread_global_1d_id = get_block_1d_id() * BlockSize + get_thread_local_1d_id();
auto threadwise_src_load = ThreadwiseTensorSliceTransfer_v2<
InDataType,
AccDataType,
InGridDesc_M_K,
decltype(thread_buffer_desc),
ThreadBufferLengths,
typename conditional<InSrcVectorDim == 0, Sequence<1, 0>, Sequence<0, 1>>::type,
InSrcVectorDim,
InSrcVectorSize,
1,
false>(in_grid_desc_m_k, make_multi_index(thread_global_1d_id * MThreadSliceSize, 0));
constexpr auto in_thread_copy_step = make_multi_index(0, KThreadSliceSize);
index_t indexStart = 0;
index_t reducedLength = 0;
do
{
threadwise_src_load.Run(in_grid_desc_m_k,
in_global_buf,
thread_buffer_desc,
make_tuple(I0, I0),
in_thread_buf);
static_for<0, MThreadSliceSize, 1>{}([&](auto I) {
// do element-wise pre-reduction operation
static_for<0, KThreadSliceSize, 1>{}([&](auto J) {
constexpr auto offset = I * Number<KThreadSliceSize>{} + J;
in_elementwise_op(in_thread_buf(offset), in_thread_buf(offset));
});
// reduce on each thread-local slice
static_for<0, KThreadSliceSize, 1>{}([&](auto J) {
constexpr auto offset = I * Number<KThreadSliceSize>{} + J;
AccumulationWithIndex::Calculate(accu_value_buf(I),
in_thread_buf[offset],
accu_index_buf(I),
indexStart + J);
});
});
threadwise_src_load.MoveSrcSliceWindow(in_grid_desc_m_k, in_thread_copy_step);
indexStart += KThreadSliceSize;
reducedLength += KThreadSliceSize;
} while(reducedLength < toReduceLength);
// for indiced operation, acc_elementwise_op shoud do nothing
static_for<0, MThreadSliceSize, 1>{}([&](auto I) {
acc_elementwise_op(accu_value_buf(I), accu_value_buf(I));
accu_value_buf(I) *= alpha;
});
constexpr auto reduced_data_desc =
make_naive_tensor_descriptor_packed(make_tuple(Number<MThreadSliceSize>{}));
if constexpr(!BetaIsZero)
{
if(!float_equal_zero{}(beta))
{
auto threadwise_dst_load =
ThreadwiseTensorSliceTransfer_v2<OutDataType,
OutDataType,
OutGridDesc_M,
decltype(reduced_data_desc),
Sequence<MThreadSliceSize>,
Sequence<0>,
0,
1,
1,
false>(
out_grid_desc_m, make_multi_index(thread_global_1d_id * MThreadSliceSize));
StaticBuffer<AddressSpaceEnum_t::Vgpr, OutDataType, MThreadSliceSize, true>
priorDstValue_buf;
threadwise_dst_load.Run(out_grid_desc_m,
out_global_val_buf,
reduced_data_desc,
make_tuple(I0),
priorDstValue_buf);
static_for<0, MThreadSliceSize, 1>{}([&](auto I) {
accu_value_buf(I) += type_convert<AccDataType>(priorDstValue_buf[I] * beta);
});
};
};
auto threadwise_dst_val_store =
ThreadwiseTensorSliceTransfer_v1r3<AccDataType,
OutDataType,
decltype(reduced_data_desc),
OutGridDesc_M,
PassThroughOp<AccDataType>,
Sequence<MThreadSliceSize>,
Sequence<0>,
0,
OutDstVectorSize,
InMemoryDataOperationEnum_t::Set,
1,
false>(
out_grid_desc_m,
make_multi_index(thread_global_1d_id * MThreadSliceSize),
PassThroughOp<AccDataType>{});
auto threadwise_dst_idx_store =
ThreadwiseTensorSliceTransfer_v1r3<IndexDataType,
IndexDataType,
decltype(reduced_data_desc),
OutGridDesc_M,
PassThroughOp<IndexDataType>,
Sequence<MThreadSliceSize>,
Sequence<0>,
0,
OutDstVectorSize,
InMemoryDataOperationEnum_t::Set,
1,
false>(
out_grid_desc_m,
make_multi_index(thread_global_1d_id * MThreadSliceSize),
PassThroughOp<IndexDataType>{});
threadwise_dst_val_store.Run(
reduced_data_desc, make_tuple(I0), accu_value_buf, out_grid_desc_m, out_global_val_buf);
threadwise_dst_idx_store.Run(
reduced_data_desc, make_tuple(I0), accu_index_buf, out_grid_desc_m, out_global_idx_buf);
};
};
} // namespace ck
#endif
/*******************************************************************************
*
* MIT License
*
* Copyright (c) 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#ifndef CK_GRIDWISE_GENERIC_2D_REDUCTION_BLOCKWISE_HPP
#define CK_GRIDWISE_GENERIC_2D_REDUCTION_BLOCKWISE_HPP
#include "data_type.hpp"
#include "reduction_common.hpp"
#include "reduction_operator.hpp"
#include "reduction_functions_blockwise.hpp"
#include "blockwise_tensor_slice_transfer.hpp"
namespace ck {
template <index_t BlockSize,
typename srcDataType,
typename dstDataType,
typename compType,
typename src2dDescType,
typename dst1dDescType,
ReduceTensorOp_t op,
NanPropagation_t nanPropaOpt,
ReduceTensorIndices_t reduceIndicesOpt,
bool isFirstCall,
bool isLastCall,
index_t GredAccessesPerThreadInBlock>
struct GridwiseReduction_xy_to_x_blockwise
{
using opReduce = typename reduce_binary_operator<compType, op>::opType;
using preUnaryOpType =
typename reduce_unary_operator<compType, op, isFirstCall, isLastCall>::preUnaryOp;
using posUnaryOpType =
typename reduce_unary_operator<compType, op, isFirstCall, isLastCall>::posUnaryOp;
static constexpr auto buffer2dDesc = make_naive_tensor_descriptor_packed(
make_tuple(Number<GredAccessesPerThreadInBlock>{}, Number<BlockSize>{}));
using blockwise_reduce =
BlockwiseReduction_2d_block_buffer<decltype(buffer2dDesc), true, opReduce, nanPropaOpt>;
static constexpr index_t BlockBufferSize = buffer2dDesc.GetElementSize();
static constexpr auto I0 = Number<0>{};
template <int RunId>
__device__ static void Run(const src2dDescType& src2dDesc,
const dst1dDescType& dst1dDesc,
int origReduceLen,
srcDataType alpha,
const srcDataType* const __restrict__ p_src_global,
dstDataType beta,
dstDataType* const __restrict__ p_dst_global,
const int* const __restrict__ ws_indices_global,
int* const __restrict__ indices_global);
template <>
__device__ static void Run<1>(const src2dDescType& src2dDesc,
const dst1dDescType& dst1dDesc,
int origReduceLen,
srcDataType alpha,
const srcDataType* const __restrict__ p_src_global,
dstDataType beta,
dstDataType* const __restrict__ p_dst_global,
const int* const __restrict__ ws_indices_global,
int* const __restrict__ indices_global)
{
(void)ws_indices_global;
(void)indices_global;
// LDS
__shared__ compType p_in_block_buffer[BlockBufferSize];
const auto zeroVal = opReduce::GetReductionZeroVal();
const auto src_global_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
p_src_global, src2dDesc.GetElementSpaceSize(), type_convert<srcDataType>(zeroVal));
auto dst_global_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
p_dst_global, dst1dDesc.GetElementSpaceSize());
auto in_block_buf =
make_dynamic_buffer<AddressSpaceEnum_t::Lds>(p_in_block_buffer, BlockBufferSize);
StaticBuffer<AddressSpaceEnum_t::Vgpr, compType, 1, true> accuValue_buf;
accuValue_buf(I0) = zeroVal;
const auto toReduceLength = src2dDesc.GetLength(Number<1>{});
const int divider = origReduceLen;
const preUnaryOpType preUnaryOp(divider);
const posUnaryOpType posUnaryOp(divider);
const index_t thread_local_id = get_thread_local_1d_id();
const index_t block_global_1d_id = get_block_1d_id();
constexpr auto in_block_desc =
make_naive_tensor_descriptor_packed(make_tuple(Number<1>{}, Number<BlockBufferSize>{}));
using ThreadSliceLengths = Sequence<1, GredAccessesPerThreadInBlock>;
using ThreadClusterLengths = Sequence<1, BlockSize>;
auto blockwise_src_load =
BlockwiseTensorSliceTransfer_v4<BlockSize,
InMemoryDataOperationEnum_t::Set,
Sequence<1, BlockBufferSize>,
ThreadSliceLengths,
ThreadClusterLengths,
Sequence<0, 1>,
srcDataType,
compType,
src2dDescType,
decltype(in_block_desc),
Sequence<0, 1>,
Sequence<0, 1>,
1,
1,
1,
1,
1,
1,
false,
true>(src2dDesc,
make_multi_index(block_global_1d_id, 0),
in_block_desc,
make_multi_index(0, 0));
constexpr auto in_block_copy_step = make_multi_index(0, BlockBufferSize);
const index_t toReduceBlocks = (toReduceLength + BlockSize - 1) / BlockSize;
for(index_t reducedBlocks = 0; reducedBlocks < toReduceBlocks;
reducedBlocks += GredAccessesPerThreadInBlock)
{
blockwise_src_load.RunRead(src2dDesc, src_global_buf);
blockwise_src_load.RunWrite(in_block_desc, in_block_buf);
__syncthreads();
// do element-wise pre-reduction operation
blockwise_reduce::operate_on_elements(preUnaryOp, in_block_buf);
index_t BlocksInOneOp = (reducedBlocks < toReduceBlocks - GredAccessesPerThreadInBlock)
? GredAccessesPerThreadInBlock
: toReduceBlocks - reducedBlocks;
blockwise_reduce::Reduce(in_block_buf, BlocksInOneOp, accuValue_buf(I0));
blockwise_src_load.MoveSrcSliceWindow(src2dDesc, in_block_copy_step);
}
accuValue_buf(I0) = posUnaryOp(accuValue_buf[I0]);
constexpr auto ReducedDataDesc =
make_naive_tensor_descriptor_packed(make_tuple(Number<1>{}));
// The first thread in the block stores the reduced result to the global location
// representing the block
if(thread_local_id == 0)
{
if(!float_equal_one{}(alpha))
accuValue_buf(I0) *= type_convert<compType>(alpha);
StaticBuffer<AddressSpaceEnum_t::Vgpr, dstDataType, 1, true> dstValue_buf;
dstValue_buf(I0) = type_convert<dstDataType>(accuValue_buf[I0]);
if(!float_equal_zero{}(beta))
{
auto threadwise_dst_load =
ThreadwiseTensorSliceTransfer_v2<dstDataType,
dstDataType,
dst1dDescType,
decltype(ReducedDataDesc),
Sequence<1>,
Sequence<0>,
0,
1,
1,
false>(dst1dDesc,
make_multi_index(block_global_1d_id));
StaticBuffer<AddressSpaceEnum_t::Vgpr, dstDataType, 1, true> priorDstValue_buf;
threadwise_dst_load.Run(
dst1dDesc, dst_global_buf, ReducedDataDesc, make_tuple(I0), priorDstValue_buf);
dstValue_buf(I0) += priorDstValue_buf[I0] * beta;
}
auto threadwise_dst_store =
ThreadwiseTensorSliceTransfer_v1r3<dstDataType,
dstDataType,
decltype(ReducedDataDesc),
dst1dDescType,
Sequence<1>,
Sequence<0>,
0,
1,
InMemoryDataOperationEnum_t::Set,
1,
false>(dst1dDesc,
make_multi_index(block_global_1d_id));
threadwise_dst_store.Run(
ReducedDataDesc, make_tuple(I0), dstValue_buf, dst1dDesc, dst_global_buf);
}
};
template <>
__device__ static void Run<2>(const src2dDescType& src2dDesc,
const dst1dDescType& dst1dDesc,
int origReduceLen,
srcDataType alpha,
const srcDataType* const __restrict__ p_src_global,
dstDataType beta,
dstDataType* const __restrict__ p_dst_global,
const int* const __restrict__ ws_indices_global,
int* const __restrict__ indices_global)
{
(void)ws_indices_global;
// LDS
__shared__ compType p_in_block_buffer[BlockBufferSize];
__shared__ int block_indices_buffer[BlockBufferSize];
const auto zeroVal = opReduce::GetReductionZeroVal();
const auto src_global_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
p_src_global, src2dDesc.GetElementSpaceSize(), type_convert<srcDataType>(zeroVal));
auto dst_global_val_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
p_dst_global, dst1dDesc.GetElementSpaceSize());
auto dst_global_idx_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
indices_global, dst1dDesc.GetElementSpaceSize());
auto in_block_val_buf =
make_dynamic_buffer<AddressSpaceEnum_t::Lds>(p_in_block_buffer, BlockBufferSize);
auto in_block_idx_buf =
make_dynamic_buffer<AddressSpaceEnum_t::Lds>(block_indices_buffer, BlockBufferSize);
StaticBuffer<AddressSpaceEnum_t::Vgpr, compType, 1, true> accuValue_buf;
StaticBuffer<AddressSpaceEnum_t::Vgpr, int, 1, true> accuIndex_buf;
accuValue_buf(I0) = zeroVal;
accuIndex_buf(I0) = 0;
const auto toReduceLength = src2dDesc.GetLength(Number<1>{});
const int divider = origReduceLen;
const preUnaryOpType preUnaryOp(divider);
const index_t thread_local_id = get_thread_local_1d_id();
const index_t block_global_1d_id = get_block_1d_id();
constexpr auto in_block_desc =
make_naive_tensor_descriptor_packed(make_tuple(Number<1>{}, Number<BlockBufferSize>{}));
using ThreadSliceLengths = Sequence<1, GredAccessesPerThreadInBlock>;
using ThreadClusterLengths = Sequence<1, BlockSize>;
auto blockwise_src_load =
BlockwiseTensorSliceTransfer_v4<BlockSize,
InMemoryDataOperationEnum_t::Set,
Sequence<1, BlockBufferSize>,
ThreadSliceLengths,
ThreadClusterLengths,
Sequence<0, 1>,
srcDataType,
compType,
src2dDescType,
decltype(in_block_desc),
Sequence<0, 1>,
Sequence<0, 1>,
1,
1,
1,
1,
1,
1,
false,
true>(src2dDesc,
make_multi_index(block_global_1d_id, 0),
in_block_desc,
make_multi_index(0, 0));
constexpr auto in_block_copy_step = make_multi_index(0, BlockBufferSize);
const index_t toReduceBlocks = (toReduceLength + BlockSize - 1) / BlockSize;
int indexOffset = 0;
for(index_t reducedBlocks = 0; reducedBlocks < toReduceBlocks;
reducedBlocks += GredAccessesPerThreadInBlock)
{
// load block data from global to LDS, no use of double buffers (to be improved)
blockwise_src_load.RunRead(src2dDesc, src_global_buf);
blockwise_src_load.RunWrite(in_block_desc, in_block_val_buf);
__syncthreads();
// construct the indices for the current toReduce blocks
blockwise_reduce::init_buffer_indices(in_block_idx_buf, indexOffset);
// unary operation before reducing, needed by AMAX; For MIN/MAX, nothing is actually
// done here
blockwise_reduce::operate_on_elements(preUnaryOp, in_block_val_buf);
index_t BlocksInOneOp = (reducedBlocks < toReduceBlocks - GredAccessesPerThreadInBlock)
? GredAccessesPerThreadInBlock
: toReduceBlocks - reducedBlocks;
blockwise_reduce::Reduce2(in_block_val_buf,
in_block_idx_buf,
BlocksInOneOp,
accuValue_buf(I0),
accuIndex_buf(I0));
indexOffset += BlockBufferSize;
blockwise_src_load.MoveSrcSliceWindow(src2dDesc, in_block_copy_step);
}
constexpr auto ReducedDataDesc =
make_naive_tensor_descriptor_packed(make_tuple(Number<1>{}));
// The first thread in the block stores the reduced result to the global location
// representing the block
if(thread_local_id == 0)
{
if(!float_equal_one{}(alpha))
accuValue_buf(I0) *= type_convert<compType>(alpha);
StaticBuffer<AddressSpaceEnum_t::Vgpr, dstDataType, 1, true> dstValue_buf;
dstValue_buf(I0) = type_convert<dstDataType>(accuValue_buf[I0]);
if(!float_equal_zero{}(beta))
{
auto threadwise_dst_load =
ThreadwiseTensorSliceTransfer_v2<dstDataType,
dstDataType,
dst1dDescType,
decltype(ReducedDataDesc),
Sequence<1>,
Sequence<0>,
0,
1,
1,
false>(dst1dDesc,
make_multi_index(block_global_1d_id));
StaticBuffer<AddressSpaceEnum_t::Vgpr, dstDataType, 1, true> priorDstValue_buf;
threadwise_dst_load.Run(dst1dDesc,
dst_global_val_buf,
ReducedDataDesc,
make_tuple(I0),
priorDstValue_buf);
dstValue_buf(I0) += priorDstValue_buf[I0] * beta;
}
auto threadwise_dst_val_store =
ThreadwiseTensorSliceTransfer_v1r3<dstDataType,
dstDataType,
decltype(ReducedDataDesc),
dst1dDescType,
Sequence<1>,
Sequence<0>,
0,
1,
InMemoryDataOperationEnum_t::Set,
1,
false>(dst1dDesc,
make_multi_index(block_global_1d_id));
auto threadwise_dst_idx_store =
ThreadwiseTensorSliceTransfer_v1r3<int,
int,
decltype(ReducedDataDesc),
dst1dDescType,
Sequence<1>,
Sequence<0>,
0,
1,
InMemoryDataOperationEnum_t::Set,
1,
false>(dst1dDesc,
make_multi_index(block_global_1d_id));
threadwise_dst_val_store.Run(
ReducedDataDesc, make_tuple(I0), dstValue_buf, dst1dDesc, dst_global_val_buf);
threadwise_dst_idx_store.Run(
ReducedDataDesc, make_tuple(I0), accuIndex_buf, dst1dDesc, dst_global_idx_buf);
}
};
template <>
__device__ static void Run<3>(const src2dDescType& src2dDesc,
const dst1dDescType& dst1dDesc,
int origReduceLen,
srcDataType alpha,
const srcDataType* const __restrict__ ws_values_global,
dstDataType beta,
dstDataType* const __restrict__ p_dst_global,
const int* const __restrict__ ws_indices_global,
int* const __restrict__ indices_global)
{
(void)origReduceLen;
// LDS
__shared__ compType p_in_block_buffer[BlockBufferSize];
__shared__ int block_indices_buffer[BlockBufferSize];
const auto zeroVal = opReduce::GetReductionZeroVal();
const auto src_global_val_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
ws_values_global, src2dDesc.GetElementSpaceSize(), type_convert<srcDataType>(zeroVal));
const auto src_global_idx_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
ws_indices_global, src2dDesc.GetElementSpaceSize());
auto dst_global_val_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
p_dst_global, dst1dDesc.GetElementSpaceSize());
auto dst_global_idx_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
indices_global, dst1dDesc.GetElementSpaceSize());
auto in_block_val_buf =
make_dynamic_buffer<AddressSpaceEnum_t::Lds>(p_in_block_buffer, BlockBufferSize);
auto in_block_idx_buf =
make_dynamic_buffer<AddressSpaceEnum_t::Lds>(block_indices_buffer, BlockBufferSize);
StaticBuffer<AddressSpaceEnum_t::Vgpr, compType, 1, true> accuValue_buf;
StaticBuffer<AddressSpaceEnum_t::Vgpr, int, 1, true> accuIndex_buf;
accuValue_buf(I0) = zeroVal;
accuIndex_buf(I0) = 0;
const auto toReduceLength = src2dDesc.GetLength(Number<1>{});
const index_t thread_local_id = get_thread_local_1d_id();
const index_t block_global_1d_id = get_block_1d_id();
constexpr auto in_block_desc =
make_naive_tensor_descriptor_packed(make_tuple(Number<1>{}, Number<BlockBufferSize>{}));
using ThreadSliceLengths = Sequence<1, GredAccessesPerThreadInBlock>;
using ThreadClusterLengths = Sequence<1, BlockSize>;
auto blockwise_src_val_load =
BlockwiseTensorSliceTransfer_v4<BlockSize,
InMemoryDataOperationEnum_t::Set,
Sequence<1, BlockBufferSize>,
ThreadSliceLengths,
ThreadClusterLengths,
Sequence<0, 1>,
srcDataType,
compType,
src2dDescType,
decltype(in_block_desc),
Sequence<0, 1>,
Sequence<0, 1>,
1,
1,
1,
1,
1,
1,
false,
true>(src2dDesc,
make_multi_index(block_global_1d_id, 0),
in_block_desc,
make_multi_index(0, 0));
auto blockwise_src_idx_load =
BlockwiseTensorSliceTransfer_v4<BlockSize,
InMemoryDataOperationEnum_t::Set,
Sequence<1, BlockBufferSize>,
ThreadSliceLengths,
ThreadClusterLengths,
Sequence<0, 1>,
int,
int,
src2dDescType,
decltype(in_block_desc),
Sequence<0, 1>,
Sequence<0, 1>,
1,
1,
1,
1,
1,
1,
false,
true>(src2dDesc,
make_multi_index(block_global_1d_id, 0),
in_block_desc,
make_multi_index(0, 0));
constexpr auto in_block_copy_step = make_multi_index(0, BlockBufferSize);
const index_t toReduceBlocks = (toReduceLength + BlockSize - 1) / BlockSize;
for(index_t reducedBlocks = 0; reducedBlocks < toReduceBlocks;
reducedBlocks += GredAccessesPerThreadInBlock)
{
// load block data from global to LDS, no use of double buffers (to be improved)
blockwise_src_val_load.RunRead(src2dDesc, src_global_val_buf);
blockwise_src_idx_load.RunRead(src2dDesc, src_global_idx_buf);
blockwise_src_val_load.RunWrite(in_block_desc, in_block_val_buf);
blockwise_src_idx_load.RunWrite(in_block_desc, in_block_idx_buf);
__syncthreads();
index_t BlocksInOneOp = (reducedBlocks < toReduceBlocks - GredAccessesPerThreadInBlock)
? GredAccessesPerThreadInBlock
: toReduceBlocks - reducedBlocks;
blockwise_reduce::Reduce2(in_block_val_buf,
in_block_idx_buf,
BlocksInOneOp,
accuValue_buf(I0),
accuIndex_buf(I0));
blockwise_src_val_load.MoveSrcSliceWindow(src2dDesc, in_block_copy_step);
blockwise_src_idx_load.MoveSrcSliceWindow(src2dDesc, in_block_copy_step);
}
constexpr auto ReducedDataDesc =
make_naive_tensor_descriptor_packed(make_tuple(Number<1>{}));
// The first thread in the block stores the reduced result to the global location
// representing the block
if(thread_local_id == 0)
{
if(!float_equal_one{}(alpha))
accuValue_buf(I0) *= type_convert<compType>(alpha);
StaticBuffer<AddressSpaceEnum_t::Vgpr, dstDataType, 1, true> dstValue_buf;
dstValue_buf(I0) = type_convert<dstDataType>(accuValue_buf[I0]);
if(!float_equal_zero{}(beta))
{
auto threadwise_dst_load =
ThreadwiseTensorSliceTransfer_v2<dstDataType,
dstDataType,
dst1dDescType,
decltype(ReducedDataDesc),
Sequence<1>,
Sequence<0>,
0,
1,
1,
true>(dst1dDesc,
make_multi_index(block_global_1d_id));
StaticBuffer<AddressSpaceEnum_t::Vgpr, dstDataType, 1, true> priorDstValue_buf;
threadwise_dst_load.Run(dst1dDesc,
dst_global_val_buf,
ReducedDataDesc,
make_tuple(I0),
priorDstValue_buf);
dstValue_buf(I0) += priorDstValue_buf[I0] * beta;
}
auto threadwise_dst_val_store =
ThreadwiseTensorSliceTransfer_v1r3<dstDataType,
dstDataType,
decltype(ReducedDataDesc),
dst1dDescType,
Sequence<1>,
Sequence<0>,
0,
1,
InMemoryDataOperationEnum_t::Set,
1,
true>(dst1dDesc,
make_multi_index(block_global_1d_id));
auto threadwise_dst_idx_store =
ThreadwiseTensorSliceTransfer_v1r3<int,
int,
decltype(ReducedDataDesc),
dst1dDescType,
Sequence<1>,
Sequence<0>,
0,
1,
InMemoryDataOperationEnum_t::Set,
1,
true>(dst1dDesc,
make_multi_index(block_global_1d_id));
threadwise_dst_val_store.Run(
ReducedDataDesc, make_tuple(I0), dstValue_buf, dst1dDesc, dst_global_val_buf);
threadwise_dst_idx_store.Run(
ReducedDataDesc, make_tuple(I0), accuIndex_buf, dst1dDesc, dst_global_idx_buf);
}
};
};
} // namespace ck
#endif
/*******************************************************************************
*
* MIT License
*
* Copyright (c) 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#ifndef CK_GRIDWISE_GENERIC_2D_REDUCTION_DIRECT_THREADWISE_HPP
#define CK_GRIDWISE_GENERIC_2D_REDUCTION_DIRECT_THREADWISE_HPP
#include "data_type.hpp"
#include "reduction_common.hpp"
#include "reduction_operator.hpp"
#include "reduction_functions_threadwise.hpp"
#include "threadwise_tensor_slice_transfer.hpp"
namespace ck {
template <index_t BlockSize,
typename srcDataType,
typename dstDataType,
typename compType,
typename src2dDescType,
typename dst1dDescType,
ReduceTensorOp_t op,
NanPropagation_t nanPropaOpt,
ReduceTensorIndices_t reduceIndicesOpt,
bool isFirstCall,
bool isLastCall,
index_t GredThreadBufferLength>
struct GridwiseReduction_xy_to_x_direct_threadwise
{
using opReduce = typename reduce_binary_operator<compType, op>::opType;
using preUnaryOpType =
typename reduce_unary_operator<compType, op, isFirstCall, isLastCall>::preUnaryOp;
using posUnaryOpType =
typename reduce_unary_operator<compType, op, isFirstCall, isLastCall>::posUnaryOp;
static constexpr auto I0 = Number<0>{};
template <int RunId>
__device__ static void Run(const src2dDescType& src2dDesc,
const dst1dDescType& dst1dDesc,
int origReduceLen,
srcDataType alpha,
const srcDataType* const __restrict__ p_src_global,
dstDataType beta,
dstDataType* const __restrict__ p_dst_global,
const int* const __restrict__ ws_indices_global,
int* const __restrict__ indices_global);
template <>
__device__ static void Run<1>(const src2dDescType& src2dDesc,
const dst1dDescType& dst1dDesc,
int origReduceLen,
srcDataType alpha,
const srcDataType* const __restrict__ p_src_global,
dstDataType beta,
dstDataType* const __restrict__ p_dst_global,
const int* const __restrict__ ws_indices_global,
int* const __restrict__ indices_global)
{
(void)ws_indices_global;
(void)indices_global;
const auto zeroVal = opReduce::GetReductionZeroVal();
const auto src_global_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
p_src_global, src2dDesc.GetElementSpaceSize(), type_convert<srcDataType>(zeroVal));
auto dst_global_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
p_dst_global, dst1dDesc.GetElementSpaceSize());
StaticBuffer<AddressSpaceEnum_t::Vgpr, compType, GredThreadBufferLength, true>
in_thread_buf;
using threadwise_reduce = ThreadReduce<decltype(in_thread_buf), opReduce, nanPropaOpt>;
StaticBuffer<AddressSpaceEnum_t::Vgpr, compType, 1, true> accuValue_buf;
accuValue_buf(I0) = zeroVal;
const auto toReduceLength = src2dDesc.GetLength(Number<1>{});
const int divider = origReduceLen;
const preUnaryOpType preUnaryOp(divider);
const posUnaryOpType posUnaryOp(divider);
using ThreadBufferLengths = Sequence<1, GredThreadBufferLength>;
constexpr auto ThreadBufferDesc = make_naive_tensor_descriptor_packed(
make_tuple(Number<1>{}, Number<GredThreadBufferLength>{}));
index_t thread_global_1d_id = get_block_1d_id() * BlockSize + get_thread_local_1d_id();
auto threadwise_src_load = ThreadwiseTensorSliceTransfer_v2<srcDataType,
compType,
src2dDescType,
decltype(ThreadBufferDesc),
ThreadBufferLengths,
Sequence<0, 1>,
1,
1,
1,
false>(
src2dDesc, make_multi_index(thread_global_1d_id, 0));
constexpr auto in_thread_copy_step = make_multi_index(0, GredThreadBufferLength);
for(index_t reducedLength = 0; reducedLength < toReduceLength;
reducedLength += GredThreadBufferLength)
{
threadwise_src_load.Run(
src2dDesc, src_global_buf, ThreadBufferDesc, make_tuple(I0, I0), in_thread_buf);
// do element-wise pre-reduction operation
threadwise_reduce::operate_on_elements(preUnaryOp, in_thread_buf);
// do the reduction on the Thread Buffer
threadwise_reduce::Reduce(in_thread_buf, accuValue_buf(I0));
threadwise_src_load.MoveSrcSliceWindow(src2dDesc, in_thread_copy_step);
}
accuValue_buf(I0) = posUnaryOp(accuValue_buf[I0]);
constexpr auto ReducedDataDesc =
make_naive_tensor_descriptor_packed(make_tuple(Number<1>{}));
if(!float_equal_one{}(alpha))
accuValue_buf(I0) *= type_convert<compType>(alpha);
StaticBuffer<AddressSpaceEnum_t::Vgpr, dstDataType, 1, true> dstValue_buf;
dstValue_buf(I0) = type_convert<dstDataType>(accuValue_buf[I0]);
if(!float_equal_zero{}(beta))
{
auto threadwise_dst_load = ThreadwiseTensorSliceTransfer_v2<dstDataType,
dstDataType,
dst1dDescType,
decltype(ReducedDataDesc),
Sequence<1>,
Sequence<0>,
0,
1,
1,
true>(
dst1dDesc, make_multi_index(thread_global_1d_id));
StaticBuffer<AddressSpaceEnum_t::Vgpr, dstDataType, 1, true> priorDstValue_buf;
threadwise_dst_load.Run(
dst1dDesc, dst_global_buf, ReducedDataDesc, make_tuple(I0), priorDstValue_buf);
dstValue_buf(I0) += priorDstValue_buf[I0] * beta;
}
auto threadwise_dst_store =
ThreadwiseTensorSliceTransfer_v1r3<dstDataType,
dstDataType,
decltype(ReducedDataDesc),
dst1dDescType,
Sequence<1>,
Sequence<0>,
0,
1,
InMemoryDataOperationEnum_t::Set,
1,
true>(dst1dDesc,
make_multi_index(thread_global_1d_id));
threadwise_dst_store.Run(
ReducedDataDesc, make_tuple(I0), dstValue_buf, dst1dDesc, dst_global_buf);
};
template <>
__device__ static void Run<2>(const src2dDescType& src2dDesc,
const dst1dDescType& dst1dDesc,
int origReduceLen,
srcDataType alpha,
const srcDataType* const __restrict__ p_src_global,
dstDataType beta,
dstDataType* const __restrict__ p_dst_global,
const int* const __restrict__ ws_indices_global,
int* const __restrict__ indices_global)
{
(void)ws_indices_global;
const auto zeroVal = opReduce::GetReductionZeroVal();
const auto src_global_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
p_src_global, src2dDesc.GetElementSpaceSize(), type_convert<srcDataType>(zeroVal));
auto dst_global_val_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
p_dst_global, dst1dDesc.GetElementSpaceSize());
auto dst_global_idx_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
indices_global, dst1dDesc.GetElementSpaceSize());
StaticBuffer<AddressSpaceEnum_t::Vgpr, compType, GredThreadBufferLength, true>
in_thread_buf;
using threadwise_reduce = ThreadReduce<decltype(in_thread_buf), opReduce, nanPropaOpt>;
StaticBuffer<AddressSpaceEnum_t::Vgpr, compType, 1, true> accuValue_buf;
StaticBuffer<AddressSpaceEnum_t::Vgpr, int, 1, true> accuIndex_buf;
accuValue_buf(I0) = zeroVal;
accuIndex_buf(I0) = 0;
const auto toReduceLength = src2dDesc.GetLength(Number<1>{});
const int divider = origReduceLen;
const preUnaryOpType preUnaryOp(divider);
using ThreadBufferLengths = Sequence<1, GredThreadBufferLength>;
constexpr auto ThreadBufferDesc = make_naive_tensor_descriptor_packed(
make_tuple(Number<1>{}, Number<GredThreadBufferLength>{}));
index_t thread_global_1d_id = get_block_1d_id() * BlockSize + get_thread_local_1d_id();
auto threadwise_src_load = ThreadwiseTensorSliceTransfer_v2<srcDataType,
compType,
src2dDescType,
decltype(ThreadBufferDesc),
ThreadBufferLengths,
Sequence<0, 1>,
1,
1,
1,
false>(
src2dDesc, make_multi_index(thread_global_1d_id, 0));
constexpr auto in_thread_copy_step = make_multi_index(0, GredThreadBufferLength);
index_t indexStart = 0;
for(index_t reducedLength = 0; reducedLength < toReduceLength;
reducedLength += GredThreadBufferLength)
{
threadwise_src_load.Run(
src2dDesc, src_global_buf, ThreadBufferDesc, make_tuple(I0, I0), in_thread_buf);
// unary operation before reducing, needed by AMAX; For MIN/MAX, nothing is actually
// done here
threadwise_reduce::operate_on_elements(preUnaryOp, in_thread_buf);
// do the reduction on the Thread Buffer
threadwise_reduce::Reduce2(
in_thread_buf, accuValue_buf(I0), accuIndex_buf(I0), indexStart);
indexStart += GredThreadBufferLength;
threadwise_src_load.MoveSrcSliceWindow(src2dDesc, in_thread_copy_step);
}
constexpr auto ReducedDataDesc =
make_naive_tensor_descriptor_packed(make_tuple(Number<1>{}));
if(!float_equal_one{}(alpha))
accuValue_buf(I0) *= type_convert<compType>(alpha);
StaticBuffer<AddressSpaceEnum_t::Vgpr, dstDataType, 1, true> dstValue_buf;
dstValue_buf(I0) = type_convert<dstDataType>(accuValue_buf[I0]);
if(!float_equal_zero{}(beta))
{
auto threadwise_dst_load = ThreadwiseTensorSliceTransfer_v2<dstDataType,
dstDataType,
dst1dDescType,
decltype(ReducedDataDesc),
Sequence<1>,
Sequence<0>,
0,
1,
1,
false>(
dst1dDesc, make_multi_index(thread_global_1d_id));
StaticBuffer<AddressSpaceEnum_t::Vgpr, dstDataType, 1, true> priorDstValue_buf;
threadwise_dst_load.Run(
dst1dDesc, dst_global_val_buf, ReducedDataDesc, make_tuple(I0), priorDstValue_buf);
dstValue_buf(I0) += priorDstValue_buf[I0] * beta;
}
auto threadwise_dst_val_store =
ThreadwiseTensorSliceTransfer_v1r3<dstDataType,
dstDataType,
decltype(ReducedDataDesc),
dst1dDescType,
Sequence<1>,
Sequence<0>,
0,
1,
InMemoryDataOperationEnum_t::Set,
1,
false>(dst1dDesc,
make_multi_index(thread_global_1d_id));
auto threadwise_dst_idx_store =
ThreadwiseTensorSliceTransfer_v1r3<int,
int,
decltype(ReducedDataDesc),
dst1dDescType,
Sequence<1>,
Sequence<0>,
0,
1,
InMemoryDataOperationEnum_t::Set,
1,
false>(dst1dDesc,
make_multi_index(thread_global_1d_id));
threadwise_dst_val_store.Run(
ReducedDataDesc, make_tuple(I0), dstValue_buf, dst1dDesc, dst_global_val_buf);
threadwise_dst_idx_store.Run(
ReducedDataDesc, make_tuple(I0), accuIndex_buf, dst1dDesc, dst_global_idx_buf);
};
template <>
__device__ static void Run<3>(const src2dDescType& src2dDesc,
const dst1dDescType& dst1dDesc,
int origReduceLen,
srcDataType alpha,
const srcDataType* const __restrict__ ws_values_global,
dstDataType beta,
dstDataType* const __restrict__ p_dst_global,
const int* const __restrict__ ws_indices_global,
int* const __restrict__ indices_global)
{
(void)origReduceLen;
const auto zeroVal = opReduce::GetReductionZeroVal();
const auto src_global_val_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
ws_values_global, src2dDesc.GetElementSpaceSize(), type_convert<srcDataType>(zeroVal));
const auto src_global_idx_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
ws_indices_global, src2dDesc.GetElementSpaceSize());
auto dst_global_val_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
p_dst_global, dst1dDesc.GetElementSpaceSize());
auto dst_global_idx_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
indices_global, dst1dDesc.GetElementSpaceSize());
StaticBuffer<AddressSpaceEnum_t::Vgpr, compType, GredThreadBufferLength, true>
in_thread_val_buf;
StaticBuffer<AddressSpaceEnum_t::Vgpr, int, GredThreadBufferLength, true> in_thread_idx_buf;
using threadwise_reduce = ThreadReduceWithIndicesInput<decltype(in_thread_val_buf),
decltype(in_thread_idx_buf),
opReduce,
nanPropaOpt>;
StaticBuffer<AddressSpaceEnum_t::Vgpr, compType, 1, true> accuValue_buf;
StaticBuffer<AddressSpaceEnum_t::Vgpr, int, 1, true> accuIndex_buf;
accuValue_buf(I0) = zeroVal;
accuIndex_buf(I0) = 0;
const auto toReduceLength = src2dDesc.GetLength(Number<1>{});
using ThreadBufferLengths = Sequence<1, GredThreadBufferLength>;
constexpr auto ThreadBufferDesc = make_naive_tensor_descriptor_packed(
make_tuple(Number<1>{}, Number<GredThreadBufferLength>{}));
index_t thread_global_1d_id = get_block_1d_id() * BlockSize + get_thread_local_1d_id();
auto threadwise_src_val_load = ThreadwiseTensorSliceTransfer_v2<srcDataType,
compType,
src2dDescType,
decltype(ThreadBufferDesc),
ThreadBufferLengths,
Sequence<0, 1>,
1,
1,
1,
false>(
src2dDesc, make_multi_index(thread_global_1d_id, 0));
auto threadwise_src_idx_load = ThreadwiseTensorSliceTransfer_v2<int,
int,
src2dDescType,
decltype(ThreadBufferDesc),
ThreadBufferLengths,
Sequence<0, 1>,
1,
1,
1,
false>(
src2dDesc, make_multi_index(thread_global_1d_id, 0));
constexpr auto in_thread_copy_step = make_multi_index(0, GredThreadBufferLength);
for(index_t reducedLength = 0; reducedLength < toReduceLength;
reducedLength += GredThreadBufferLength)
{
threadwise_src_val_load.Run(src2dDesc,
src_global_val_buf,
ThreadBufferDesc,
make_tuple(I0, I0),
in_thread_val_buf);
threadwise_src_idx_load.Run(src2dDesc,
src_global_idx_buf,
ThreadBufferDesc,
make_tuple(I0, I0),
in_thread_idx_buf);
// do the reduction on the Thread Buffer
threadwise_reduce::Reduce(
in_thread_val_buf, in_thread_idx_buf, accuValue_buf(I0), accuIndex_buf(I0));
threadwise_src_val_load.MoveSrcSliceWindow(src2dDesc, in_thread_copy_step);
threadwise_src_idx_load.MoveSrcSliceWindow(src2dDesc, in_thread_copy_step);
}
constexpr auto ReducedDataDesc =
make_naive_tensor_descriptor_packed(make_tuple(Number<1>{}));
if(!float_equal_one{}(alpha))
accuValue_buf(I0) *= type_convert<compType>(alpha);
StaticBuffer<AddressSpaceEnum_t::Vgpr, dstDataType, 1, true> dstValue_buf;
dstValue_buf(I0) = type_convert<dstDataType>(accuValue_buf[I0]);
if(!float_equal_zero{}(beta))
{
auto threadwise_dst_load = ThreadwiseTensorSliceTransfer_v2<dstDataType,
dstDataType,
dst1dDescType,
decltype(ReducedDataDesc),
Sequence<1>,
Sequence<0>,
0,
1,
1,
false>(
dst1dDesc, make_multi_index(thread_global_1d_id));
StaticBuffer<AddressSpaceEnum_t::Vgpr, dstDataType, 1, true> priorDstValue_buf;
threadwise_dst_load.Run(
dst1dDesc, dst_global_val_buf, ReducedDataDesc, make_tuple(I0), priorDstValue_buf);
dstValue_buf(I0) += priorDstValue_buf[I0] * beta;
}
auto threadwise_dst_val_store =
ThreadwiseTensorSliceTransfer_v1r3<dstDataType,
dstDataType,
decltype(ReducedDataDesc),
dst1dDescType,
Sequence<1>,
Sequence<0>,
0,
1,
InMemoryDataOperationEnum_t::Set,
1,
false>(dst1dDesc,
make_multi_index(thread_global_1d_id));
auto threadwise_dst_idx_store =
ThreadwiseTensorSliceTransfer_v1r3<int,
int,
decltype(ReducedDataDesc),
dst1dDescType,
Sequence<1>,
Sequence<0>,
0,
1,
InMemoryDataOperationEnum_t::Set,
1,
false>(dst1dDesc,
make_multi_index(thread_global_1d_id));
threadwise_dst_val_store.Run(
ReducedDataDesc, make_tuple(I0), dstValue_buf, dst1dDesc, dst_global_val_buf);
threadwise_dst_idx_store.Run(
ReducedDataDesc, make_tuple(I0), accuIndex_buf, dst1dDesc, dst_global_idx_buf);
};
};
} // namespace ck
#endif
/*******************************************************************************
*
* MIT License
*
* Copyright (c) 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#ifndef CK_GRIDWISE_GENERIC_2D_REDUCTION_DIRECT_WARPWISE_HPP
#define CK_GRIDWISE_GENERIC_2D_REDUCTION_DIRECT_WARPWISE_HPP
#include "data_type.hpp"
#include "reduction_common.hpp"
#include "reduction_operator.hpp"
#include "reduction_functions_warpwise.hpp"
#include "threadwise_tensor_slice_transfer.hpp"
namespace ck {
template <index_t BlockSize,
typename srcDataType,
typename dstDataType,
typename compType,
typename src2dDescType,
typename dst1dDescType,
ReduceTensorOp_t op,
NanPropagation_t nanPropaOpt,
ReduceTensorIndices_t reduceIndicesOpt,
bool isFirstCall,
bool isLastCall,
index_t GredAccessesPerThreadInWarp>
struct GridwiseReduction_xy_to_x_direct_warpwise
{
using opReduce = typename reduce_binary_operator<compType, op>::opType;
using preUnaryOpType =
typename reduce_unary_operator<compType, op, isFirstCall, isLastCall>::preUnaryOp;
using posUnaryOpType =
typename reduce_unary_operator<compType, op, isFirstCall, isLastCall>::posUnaryOp;
static constexpr auto I0 = Number<0>{};
template <int RunId>
__device__ static void Run(const src2dDescType& src2dDesc,
const dst1dDescType& dst1dDesc,
int origReduceLen,
srcDataType alpha,
const srcDataType* const __restrict__ p_src_global,
dstDataType beta,
dstDataType* const __restrict__ p_dst_global,
const int* const __restrict__ ws_indices_global,
int* const __restrict__ indices_global);
template <>
__device__ static void Run<1>(const src2dDescType& src2dDesc,
const dst1dDescType& dst1dDesc,
int origReduceLen,
srcDataType alpha,
const srcDataType* const __restrict__ p_src_global,
dstDataType beta,
dstDataType* const __restrict__ p_dst_global,
const int* const __restrict__ ws_indices_global,
int* const __restrict__ indices_global)
{
(void)ws_indices_global;
(void)indices_global;
const auto zeroVal = opReduce::GetReductionZeroVal();
const auto src_global_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
p_src_global, src2dDesc.GetElementSpaceSize(), type_convert<srcDataType>(zeroVal));
auto dst_global_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
p_dst_global, dst1dDesc.GetElementSpaceSize());
StaticBuffer<AddressSpaceEnum_t::Vgpr, compType, GredAccessesPerThreadInWarp, true>
in_thread_buf;
using warpwise_reduce =
WarpReduce<decltype(in_thread_buf), BlockSize, opReduce, nanPropaOpt>;
StaticBuffer<AddressSpaceEnum_t::Vgpr, compType, 1, true> accuValue_buf;
accuValue_buf(I0) = zeroVal;
const auto toReduceLength = src2dDesc.GetLength(Number<1>{});
const int divider = origReduceLen;
const preUnaryOpType preUnaryOp(divider);
const posUnaryOpType posUnaryOp(divider);
using ThreadBufferLengths = Sequence<1, GredAccessesPerThreadInWarp>;
constexpr auto ThreadBufferDesc = make_naive_tensor_descriptor_packed(
make_tuple(Number<1>{}, Number<GredAccessesPerThreadInWarp>{}));
index_t thread_global_1d_id = get_block_1d_id() * BlockSize + get_thread_local_1d_id();
index_t warp_global_1d_id = thread_global_1d_id / warpSize;
index_t thread_inwarp_id = thread_global_1d_id % warpSize;
auto threadwise_src_load = ThreadwiseTensorSliceTransfer_v2<srcDataType,
compType,
src2dDescType,
decltype(ThreadBufferDesc),
ThreadBufferLengths,
Sequence<0, 1>,
1,
1,
1,
false>(
src2dDesc,
make_multi_index(warp_global_1d_id, thread_inwarp_id * GredAccessesPerThreadInWarp));
constexpr auto in_thread_copy_step =
make_multi_index(0, warpSize * GredAccessesPerThreadInWarp);
for(index_t reducedLength = 0; reducedLength < toReduceLength;
reducedLength += warpSize * GredAccessesPerThreadInWarp)
{
threadwise_src_load.Run(
src2dDesc, src_global_buf, ThreadBufferDesc, make_tuple(I0, I0), in_thread_buf);
// do element-wise pre-reduction operation
warpwise_reduce::operate_on_elements(preUnaryOp, in_thread_buf);
// do the warp-wise reduction on data of all thread buffers
warpwise_reduce::Reduce(in_thread_buf, accuValue_buf(I0));
threadwise_src_load.MoveSrcSliceWindow(src2dDesc, in_thread_copy_step);
}
accuValue_buf(I0) = posUnaryOp(accuValue_buf[I0]);
constexpr auto ReducedDataDesc =
make_naive_tensor_descriptor_packed(make_tuple(Number<1>{}));
// The first thread in the warp stores the reduced result to the global location
// representing the Warp
if(thread_inwarp_id == 0)
{
if(!float_equal_one{}(alpha))
accuValue_buf(I0) *= type_convert<compType>(alpha);
StaticBuffer<AddressSpaceEnum_t::Vgpr, dstDataType, 1, true> dstValue_buf;
dstValue_buf(I0) = type_convert<dstDataType>(accuValue_buf[I0]);
if(!float_equal_zero{}(beta))
{
auto threadwise_dst_load =
ThreadwiseTensorSliceTransfer_v2<dstDataType,
dstDataType,
dst1dDescType,
decltype(ReducedDataDesc),
Sequence<1>,
Sequence<0>,
0,
1,
1,
true>(dst1dDesc,
make_multi_index(warp_global_1d_id));
StaticBuffer<AddressSpaceEnum_t::Vgpr, dstDataType, 1, true> priorDstValue_buf;
threadwise_dst_load.Run(
dst1dDesc, dst_global_buf, ReducedDataDesc, make_tuple(I0), priorDstValue_buf);
dstValue_buf(I0) += priorDstValue_buf(I0) * beta;
}
auto threadwise_dst_store =
ThreadwiseTensorSliceTransfer_v1r3<dstDataType,
dstDataType,
decltype(ReducedDataDesc),
dst1dDescType,
Sequence<1>,
Sequence<0>,
0,
1,
InMemoryDataOperationEnum_t::Set,
1,
true>(dst1dDesc,
make_multi_index(warp_global_1d_id));
threadwise_dst_store.Run(
ReducedDataDesc, make_tuple(I0), dstValue_buf, dst1dDesc, dst_global_buf);
}
};
template <>
__device__ static void Run<2>(const src2dDescType& src2dDesc,
const dst1dDescType& dst1dDesc,
int origReduceLen,
srcDataType alpha,
const srcDataType* const __restrict__ p_src_global,
dstDataType beta,
dstDataType* const __restrict__ p_dst_global,
const int* const __restrict__ ws_indices_global,
int* const __restrict__ indices_global)
{
(void)ws_indices_global;
const auto zeroVal = opReduce::GetReductionZeroVal();
const auto src_global_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
p_src_global, src2dDesc.GetElementSpaceSize(), type_convert<srcDataType>(zeroVal));
auto dst_global_val_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
p_dst_global, dst1dDesc.GetElementSpaceSize());
auto dst_global_idx_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
indices_global, dst1dDesc.GetElementSpaceSize());
StaticBuffer<AddressSpaceEnum_t::Vgpr, compType, GredAccessesPerThreadInWarp, true>
in_thread_buf;
using warpwise_reduce =
WarpReduce<decltype(in_thread_buf), BlockSize, opReduce, nanPropaOpt>;
StaticBuffer<AddressSpaceEnum_t::Vgpr, compType, 1, true> accuValue_buf;
StaticBuffer<AddressSpaceEnum_t::Vgpr, int, 1, true> accuIndex_buf;
accuValue_buf(I0) = zeroVal;
accuIndex_buf(I0) = 0;
const auto toReduceLength = src2dDesc.GetLength(Number<1>{});
const int divider = origReduceLen;
const preUnaryOpType preUnaryOp(divider);
using ThreadBufferLengths = Sequence<1, GredAccessesPerThreadInWarp>;
constexpr auto ThreadBufferDesc = make_naive_tensor_descriptor_packed(
make_tuple(Number<1>{}, Number<GredAccessesPerThreadInWarp>{}));
index_t thread_global_1d_id = get_block_1d_id() * BlockSize + get_thread_local_1d_id();
index_t warp_global_1d_id = thread_global_1d_id / warpSize;
index_t thread_inwarp_id = thread_global_1d_id % warpSize;
auto threadwise_src_load = ThreadwiseTensorSliceTransfer_v2<srcDataType,
compType,
src2dDescType,
decltype(ThreadBufferDesc),
ThreadBufferLengths,
Sequence<0, 1>,
1,
1,
1,
false>(
src2dDesc,
make_multi_index(warp_global_1d_id, thread_inwarp_id * GredAccessesPerThreadInWarp));
constexpr auto in_thread_copy_step =
make_multi_index(0, warpSize * GredAccessesPerThreadInWarp);
index_t indexOffset = 0;
for(index_t reducedLength = 0; reducedLength < toReduceLength;
reducedLength += warpSize * GredAccessesPerThreadInWarp)
{
threadwise_src_load.Run(
src2dDesc, src_global_buf, ThreadBufferDesc, make_tuple(I0, I0), in_thread_buf);
// unary operation before reducing, needed by AMAX; For MIN/MAX, nothing is actually
// done here
warpwise_reduce::operate_on_elements(preUnaryOp, in_thread_buf);
// do the warp-wise reduction on data of all thread buffers
warpwise_reduce::Reduce2(
in_thread_buf, accuValue_buf(I0), accuIndex_buf(I0), indexOffset);
indexOffset += warpSize * GredAccessesPerThreadInWarp;
threadwise_src_load.MoveSrcSliceWindow(src2dDesc, in_thread_copy_step);
}
constexpr auto ReducedDataDesc =
make_naive_tensor_descriptor_packed(make_tuple(Number<1>{}));
// The first thread in the warp stores the reduced result to the global location
// representing the Warp
if(thread_inwarp_id == 0)
{
if(!float_equal_one{}(alpha))
accuValue_buf(I0) *= type_convert<compType>(alpha);
StaticBuffer<AddressSpaceEnum_t::Vgpr, dstDataType, 1, true> dstValue_buf;
dstValue_buf(I0) = type_convert<dstDataType>(accuValue_buf[I0]);
if(!float_equal_zero{}(beta))
{
auto threadwise_dst_load =
ThreadwiseTensorSliceTransfer_v2<dstDataType,
dstDataType,
dst1dDescType,
decltype(ReducedDataDesc),
Sequence<1>,
Sequence<0>,
0,
1,
1,
true>(dst1dDesc,
make_multi_index(warp_global_1d_id));
StaticBuffer<AddressSpaceEnum_t::Vgpr, dstDataType, 1, true> priorDstValue_buf;
threadwise_dst_load.Run(dst1dDesc,
dst_global_val_buf,
ReducedDataDesc,
make_tuple(I0),
priorDstValue_buf);
dstValue_buf(I0) += priorDstValue_buf[I0] * beta;
}
auto threadwise_dst_val_store =
ThreadwiseTensorSliceTransfer_v1r3<dstDataType,
dstDataType,
decltype(ReducedDataDesc),
dst1dDescType,
Sequence<1>,
Sequence<0>,
0,
1,
InMemoryDataOperationEnum_t::Set,
1,
true>(dst1dDesc,
make_multi_index(warp_global_1d_id));
auto threadwise_dst_idx_store =
ThreadwiseTensorSliceTransfer_v1r3<int,
int,
decltype(ReducedDataDesc),
dst1dDescType,
Sequence<1>,
Sequence<0>,
0,
1,
InMemoryDataOperationEnum_t::Set,
1,
true>(dst1dDesc,
make_multi_index(warp_global_1d_id));
threadwise_dst_val_store.Run(
ReducedDataDesc, make_tuple(I0), dstValue_buf, dst1dDesc, dst_global_val_buf);
threadwise_dst_idx_store.Run(
ReducedDataDesc, make_tuple(I0), accuIndex_buf, dst1dDesc, dst_global_idx_buf);
}
};
template <>
__device__ static void Run<3>(const src2dDescType& src2dDesc,
const dst1dDescType& dst1dDesc,
int origReduceLen,
srcDataType alpha,
const srcDataType* const __restrict__ ws_values_global,
dstDataType beta,
dstDataType* const __restrict__ p_dst_global,
const int* const __restrict__ ws_indices_global,
int* const __restrict__ indices_global)
{
(void)origReduceLen;
const auto zeroVal = opReduce::GetReductionZeroVal();
const auto src_global_val_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
ws_values_global, src2dDesc.GetElementSpaceSize(), type_convert<srcDataType>(zeroVal));
const auto src_global_idx_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
ws_indices_global, src2dDesc.GetElementSpaceSize());
auto dst_global_val_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
p_dst_global, dst1dDesc.GetElementSpaceSize());
auto dst_global_idx_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
indices_global, dst1dDesc.GetElementSpaceSize());
StaticBuffer<AddressSpaceEnum_t::Vgpr, compType, GredAccessesPerThreadInWarp, true>
in_thread_val_buf;
StaticBuffer<AddressSpaceEnum_t::Vgpr, int, GredAccessesPerThreadInWarp, true>
in_thread_idx_buf;
using warpwise_reduce = WarpReduceWithIndicesInput<decltype(in_thread_val_buf),
decltype(in_thread_idx_buf),
BlockSize,
opReduce,
nanPropaOpt>;
StaticBuffer<AddressSpaceEnum_t::Vgpr, compType, 1, true> accuValue_buf;
StaticBuffer<AddressSpaceEnum_t::Vgpr, int, 1, true> accuIndex_buf;
accuValue_buf(I0) = zeroVal;
accuIndex_buf(I0) = 0;
const auto toReduceLength = src2dDesc.GetLength(Number<1>{});
using ThreadBufferLengths = Sequence<1, GredAccessesPerThreadInWarp>;
constexpr auto ThreadBufferDesc = make_naive_tensor_descriptor_packed(
make_tuple(Number<1>{}, Number<GredAccessesPerThreadInWarp>{}));
index_t thread_global_1d_id = get_block_1d_id() * BlockSize + get_thread_local_1d_id();
index_t warp_global_1d_id = thread_global_1d_id / warpSize;
index_t thread_inwarp_id = thread_global_1d_id % warpSize;
auto threadwise_src_val_load = ThreadwiseTensorSliceTransfer_v2<srcDataType,
compType,
src2dDescType,
decltype(ThreadBufferDesc),
ThreadBufferLengths,
Sequence<0, 1>,
1,
1,
1,
false>(
src2dDesc,
make_multi_index(warp_global_1d_id, thread_inwarp_id * GredAccessesPerThreadInWarp));
auto threadwise_src_idx_load = ThreadwiseTensorSliceTransfer_v2<int,
int,
src2dDescType,
decltype(ThreadBufferDesc),
ThreadBufferLengths,
Sequence<0, 1>,
1,
1,
1,
false>(
src2dDesc,
make_multi_index(warp_global_1d_id, thread_inwarp_id * GredAccessesPerThreadInWarp));
constexpr auto in_thread_copy_step =
make_multi_index(0, warpSize * GredAccessesPerThreadInWarp);
for(index_t reducedLength = 0; reducedLength < toReduceLength;
reducedLength += warpSize * GredAccessesPerThreadInWarp)
{
threadwise_src_val_load.Run(src2dDesc,
src_global_val_buf,
ThreadBufferDesc,
make_tuple(I0, I0),
in_thread_val_buf);
threadwise_src_idx_load.Run(src2dDesc,
src_global_idx_buf,
ThreadBufferDesc,
make_tuple(I0, I0),
in_thread_idx_buf);
// do the warp-wise reduction on data of all thread buffers
warpwise_reduce::Reduce(
in_thread_val_buf, in_thread_idx_buf, accuValue_buf(I0), accuIndex_buf(I0));
threadwise_src_val_load.MoveSrcSliceWindow(src2dDesc, in_thread_copy_step);
threadwise_src_idx_load.MoveSrcSliceWindow(src2dDesc, in_thread_copy_step);
}
constexpr auto ReducedDataDesc =
make_naive_tensor_descriptor_packed(make_tuple(Number<1>{}));
// The first thread in the warp stores the reduced result to the global location
// representing the Warp
if(thread_inwarp_id == 0)
{
if(!float_equal_one{}(alpha))
accuValue_buf(I0) *= type_convert<compType>(alpha);
StaticBuffer<AddressSpaceEnum_t::Vgpr, dstDataType, 1, true> dstValue_buf;
dstValue_buf(I0) = type_convert<dstDataType>(accuValue_buf[I0]);
if(!float_equal_zero{}(beta))
{
auto threadwise_dst_load =
ThreadwiseTensorSliceTransfer_v2<dstDataType,
dstDataType,
dst1dDescType,
decltype(ReducedDataDesc),
Sequence<1>,
Sequence<0>,
0,
1,
1,
true>(dst1dDesc,
make_multi_index(warp_global_1d_id));
StaticBuffer<AddressSpaceEnum_t::Vgpr, dstDataType, 1, true> priorDstValue_buf;
threadwise_dst_load.Run(dst1dDesc,
dst_global_val_buf,
ReducedDataDesc,
make_tuple(I0),
priorDstValue_buf);
dstValue_buf(I0) += priorDstValue_buf[I0] * beta;
}
auto threadwise_dst_val_store =
ThreadwiseTensorSliceTransfer_v1r3<dstDataType,
dstDataType,
decltype(ReducedDataDesc),
dst1dDescType,
Sequence<1>,
Sequence<0>,
0,
1,
InMemoryDataOperationEnum_t::Set,
1,
true>(dst1dDesc,
make_multi_index(warp_global_1d_id));
auto threadwise_dst_idx_store =
ThreadwiseTensorSliceTransfer_v1r3<int,
int,
decltype(ReducedDataDesc),
dst1dDescType,
Sequence<1>,
Sequence<0>,
0,
1,
InMemoryDataOperationEnum_t::Set,
1,
true>(dst1dDesc,
make_multi_index(warp_global_1d_id));
threadwise_dst_val_store.Run(
ReducedDataDesc, make_tuple(I0), dstValue_buf, dst1dDesc, dst_global_val_buf);
threadwise_dst_idx_store.Run(
ReducedDataDesc, make_tuple(I0), accuIndex_buf, dst1dDesc, dst_global_idx_buf);
}
};
};
} // namespace ck
#endif
/*******************************************************************************
*
* MIT License
*
* Copyright (c) 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#ifndef CK_GRIDWISE_GENERIC_2D_REDUCTION_MULTIBLOCK_HPP
#define CK_GRIDWISE_GENERIC_2D_REDUCTION_MULTIBLOCK_HPP
#include "reduction_common.hpp"
#include "reduction_operator.hpp"
#include "reduction_functions_blockwise.hpp"
#include "blockwise_tensor_slice_transfer.hpp"
namespace ck {
template <index_t BlockSize,
typename srcDataType,
typename dstDataType, // not used together with the beta input
typename compType,
typename src2dDescType,
typename dst1dDescType,
ReduceTensorOp_t op,
NanPropagation_t nanPropaOpt,
ReduceTensorIndices_t reduceIndicesOpt,
index_t GredAccessesPerThreadInBlock>
struct GridwiseReduction_xy_to_x_multiblock
{
using opReduce = typename reduce_binary_operator<compType, op>::opType;
using preUnaryOpType = typename reduce_unary_operator<compType, op, true, false>::preUnaryOp;
using posUnaryOpType = typename reduce_unary_operator<compType, op, true, false>::posUnaryOp;
static constexpr auto buffer2dDesc = make_naive_tensor_descriptor_packed(
make_tuple(Number<GredAccessesPerThreadInBlock>{}, Number<BlockSize>{}));
using blockwise_reduce =
BlockwiseReduction_2d_block_buffer<decltype(buffer2dDesc), true, opReduce, nanPropaOpt>;
static constexpr index_t BlockBufferSize = buffer2dDesc.GetElementSize();
static constexpr auto I0 = Number<0>{};
template <int RunId>
__device__ static void Run(const src2dDescType& src2dDesc,
const dst1dDescType& dst1dDesc,
int origReduceLen,
int BlkGroupSize,
srcDataType alpha,
const srcDataType* const __restrict__ p_src_global,
dstDataType beta,
srcDataType* const __restrict__ ws_values_global,
int* const __restrict__ ws_indices_global);
template <>
__device__ static void Run<1>(const src2dDescType& src2dDesc,
const dst1dDescType& dst1dDesc,
int origReduceLen,
int BlkGroupSize,
srcDataType alpha,
const srcDataType* const __restrict__ p_src_global,
dstDataType beta,
srcDataType* const __restrict__ ws_values_global,
int* const __restrict__ ws_indices_global)
{
(void)ws_indices_global;
(void)alpha; // unused
(void)beta; // unused
const auto zeroVal = opReduce::GetReductionZeroVal();
// LDS
__shared__ compType p_in_block_buffer[BlockBufferSize];
const auto src_global_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
p_src_global, src2dDesc.GetElementSpaceSize(), type_convert<srcDataType>(zeroVal));
auto workspace_global_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
ws_values_global, dst1dDesc.GetLength(I0) * BlkGroupSize);
auto in_block_buf =
make_dynamic_buffer<AddressSpaceEnum_t::Lds>(p_in_block_buffer, BlockBufferSize);
StaticBuffer<AddressSpaceEnum_t::Vgpr, compType, 1, true> accuValue_buf;
accuValue_buf(I0) = zeroVal;
const auto toReduceLength = src2dDesc.GetLength(Number<1>{});
const int divider = origReduceLen;
const preUnaryOpType preUnaryOp(divider);
const index_t thread_local_id = get_thread_local_1d_id();
const index_t block_global_id = get_block_1d_id();
const index_t blkgroup_id = block_global_id / BlkGroupSize;
const index_t block_local_id = block_global_id % BlkGroupSize;
const index_t reduceSizePerBlock =
(((toReduceLength + BlkGroupSize - 1) / BlkGroupSize + BlockBufferSize - 1) /
BlockBufferSize) *
BlockBufferSize;
constexpr auto in_block_desc = make_naive_tensor_descriptor_packed(
make_tuple(Number<1>{}, Number<BlockSize * GredAccessesPerThreadInBlock>{}));
using ThreadSliceLengths = Sequence<1, GredAccessesPerThreadInBlock>;
using ThreadClusterLengths = Sequence<1, BlockSize>;
auto blockwise_src_load = BlockwiseTensorSliceTransfer_v4<BlockSize,
InMemoryDataOperationEnum_t::Set,
Sequence<1, BlockBufferSize>,
ThreadSliceLengths,
ThreadClusterLengths,
Sequence<0, 1>,
srcDataType,
compType,
src2dDescType,
decltype(in_block_desc),
Sequence<0, 1>,
Sequence<0, 1>,
1,
1,
1,
1,
1,
1,
false,
true>(
src2dDesc,
make_multi_index(blkgroup_id, block_local_id * reduceSizePerBlock),
in_block_desc,
make_multi_index(0, 0));
constexpr auto in_block_copy_step = make_multi_index(0, BlockBufferSize);
const index_t toReduceBlocks = (reduceSizePerBlock + BlockSize - 1) / BlockSize;
for(index_t reducedBlocks = 0; reducedBlocks < toReduceBlocks;
reducedBlocks += GredAccessesPerThreadInBlock)
{
blockwise_src_load.RunRead(src2dDesc, src_global_buf);
blockwise_src_load.RunWrite(in_block_desc, in_block_buf);
__syncthreads();
// do element-wise pre-reduction operation
blockwise_reduce::operate_on_elements(preUnaryOp, in_block_buf);
index_t BlocksInOneOp = (reducedBlocks < toReduceBlocks - GredAccessesPerThreadInBlock)
? GredAccessesPerThreadInBlock
: toReduceBlocks - reducedBlocks;
blockwise_reduce::Reduce(in_block_buf, BlocksInOneOp, accuValue_buf(I0));
blockwise_src_load.MoveSrcSliceWindow(src2dDesc, in_block_copy_step);
}
constexpr auto ReducedDataDesc =
make_naive_tensor_descriptor_packed(make_tuple(Number<1>{}));
const auto workspace_desc =
make_naive_tensor_descriptor_packed(make_tuple(dst1dDesc.GetLength(I0) * BlkGroupSize));
// The first thread in the block stores the reduced result to the global location
// representing the block
if(thread_local_id == 0)
{
auto threadwise_workspace_store =
ThreadwiseTensorSliceTransfer_v1r3<compType,
srcDataType,
decltype(ReducedDataDesc),
decltype(workspace_desc),
Sequence<1>,
Sequence<0>,
0,
1,
InMemoryDataOperationEnum_t::Set,
1,
true>(workspace_desc,
make_multi_index(block_global_id));
threadwise_workspace_store.Run(ReducedDataDesc,
make_tuple(I0),
accuValue_buf,
workspace_desc,
workspace_global_buf);
}
};
template <>
__device__ static void Run<2>(const src2dDescType& src2dDesc,
const dst1dDescType& dst1dDesc,
int origReduceLen,
int BlkGroupSize,
srcDataType alpha,
const srcDataType* const __restrict__ p_src_global,
dstDataType beta,
srcDataType* const __restrict__ ws_values_global,
int* const __restrict__ ws_indices_global)
{
(void)alpha; // unused
(void)beta; // unused
const auto zeroVal = opReduce::GetReductionZeroVal();
// LDS
__shared__ compType p_in_block_values_buffer[BlockBufferSize];
__shared__ int p_in_block_indices_buffer[BlockBufferSize];
const auto src_global_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
p_src_global, src2dDesc.GetElementSpaceSize(), type_convert<srcDataType>(zeroVal));
auto workspace_global_val_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
ws_values_global, dst1dDesc.GetLength(I0) * BlkGroupSize);
auto workspace_global_idx_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
ws_indices_global, dst1dDesc.GetLength(I0) * BlkGroupSize);
auto in_block_val_buf =
make_dynamic_buffer<AddressSpaceEnum_t::Lds>(p_in_block_values_buffer, BlockBufferSize);
auto in_block_idx_buf = make_dynamic_buffer<AddressSpaceEnum_t::Lds>(
p_in_block_indices_buffer, BlockBufferSize);
StaticBuffer<AddressSpaceEnum_t::Vgpr, compType, 1, true> accuValue_buf;
StaticBuffer<AddressSpaceEnum_t::Vgpr, int, 1, true> accuIndex_buf;
accuValue_buf(I0) = zeroVal;
accuIndex_buf(I0) = 0;
const auto toReduceLength = src2dDesc.GetLength(Number<1>{});
const int divider = origReduceLen;
const preUnaryOpType preUnaryOp(divider);
const index_t thread_local_id = get_thread_local_1d_id();
const index_t block_global_id = get_block_1d_id();
const index_t blkgroup_id = block_global_id / BlkGroupSize;
const index_t block_local_id = block_global_id % BlkGroupSize;
const index_t reduceSizePerBlock =
(((toReduceLength + BlkGroupSize - 1) / BlkGroupSize + BlockBufferSize - 1) /
BlockBufferSize) *
BlockBufferSize;
constexpr auto in_block_desc = make_naive_tensor_descriptor_packed(
make_tuple(Number<1>{}, Number<BlockSize * GredAccessesPerThreadInBlock>{}));
using ThreadSliceLengths = Sequence<1, GredAccessesPerThreadInBlock>;
using ThreadClusterLengths = Sequence<1, BlockSize>;
auto blockwise_src_load = BlockwiseTensorSliceTransfer_v4<BlockSize,
InMemoryDataOperationEnum_t::Set,
Sequence<1, BlockBufferSize>,
ThreadSliceLengths,
ThreadClusterLengths,
Sequence<0, 1>,
srcDataType,
compType,
src2dDescType,
decltype(in_block_desc),
Sequence<0, 1>,
Sequence<0, 1>,
1,
1,
1,
1,
1,
1,
false,
true>(
src2dDesc,
make_multi_index(blkgroup_id, block_local_id * reduceSizePerBlock),
in_block_desc,
make_multi_index(0, 0));
constexpr auto in_block_copy_step = make_multi_index(0, BlockBufferSize);
const index_t toReduceBlocks = (reduceSizePerBlock + BlockSize - 1) / BlockSize;
int indexOffset = block_local_id * reduceSizePerBlock;
for(index_t reducedBlocks = 0; reducedBlocks < toReduceBlocks;
reducedBlocks += GredAccessesPerThreadInBlock)
{
blockwise_reduce::init_buffer_indices(in_block_idx_buf, indexOffset);
blockwise_src_load.RunRead(src2dDesc, src_global_buf);
blockwise_src_load.RunWrite(in_block_desc, in_block_val_buf);
__syncthreads();
// unary operation before reducing, needed by AMAX; For MIN/MAX, nothing is actually
// done here
blockwise_reduce::operate_on_elements(preUnaryOp, in_block_val_buf);
index_t BlocksInOneOp = (reducedBlocks < toReduceBlocks - GredAccessesPerThreadInBlock)
? GredAccessesPerThreadInBlock
: toReduceBlocks - reducedBlocks;
blockwise_reduce::Reduce2(in_block_val_buf,
in_block_idx_buf,
BlocksInOneOp,
accuValue_buf(I0),
accuIndex_buf(I0));
indexOffset += BlockBufferSize;
blockwise_src_load.MoveSrcSliceWindow(src2dDesc, in_block_copy_step);
}
constexpr auto ReducedDataDesc =
make_naive_tensor_descriptor_packed(make_tuple(Number<1>{}));
const auto workspace_desc =
make_naive_tensor_descriptor_packed(make_tuple(dst1dDesc.GetLength(I0) * BlkGroupSize));
// The first thread in the block stores the reduced result to the global location
// representing the block
if(thread_local_id == 0)
{
auto threadwise_workspace_val_store =
ThreadwiseTensorSliceTransfer_v1r3<compType,
srcDataType,
decltype(ReducedDataDesc),
decltype(workspace_desc),
Sequence<1>,
Sequence<0>,
0,
1,
InMemoryDataOperationEnum_t::Set,
1,
true>(workspace_desc,
make_multi_index(block_global_id));
auto threadwise_workspace_idx_store =
ThreadwiseTensorSliceTransfer_v1r3<int,
int,
decltype(ReducedDataDesc),
decltype(workspace_desc),
Sequence<1>,
Sequence<0>,
0,
1,
InMemoryDataOperationEnum_t::Set,
1,
true>(workspace_desc,
make_multi_index(block_global_id));
threadwise_workspace_val_store.Run(ReducedDataDesc,
make_tuple(I0),
accuValue_buf,
workspace_desc,
workspace_global_val_buf);
threadwise_workspace_idx_store.Run(ReducedDataDesc,
make_tuple(I0),
accuIndex_buf,
workspace_desc,
workspace_global_idx_buf);
}
};
};
} // namespace ck
#endif
/*******************************************************************************
*
* MIT License
*
* Copyright (c) 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#ifndef CK_GRIDWISE_SET_BUFFER_VALUE_HPP
#define CK_GRIDWISE_SET_BUFFER_VALUE_HPP
#include "threadwise_tensor_slice_transfer.hpp"
namespace ck {
template <index_t BlockSize, typename DataType, typename Grid1dBufferDescType>
__global__ void kernel_buffer_set_value(const Grid1dBufferDescType grid_1d_buffer_desc,
DataType* const __restrict__ p_global,
DataType value)
{
using PassThroughOp = tensor_operation::element_wise::UnaryIdentic<DataType, DataType>;
constexpr auto I0 = Number<0>{};
const index_t thread_local_id = get_thread_local_1d_id();
const index_t block_global_id = get_block_1d_id();
const index_t thread_global_id = block_global_id * BlockSize + thread_local_id;
StaticBuffer<AddressSpaceEnum_t::Vgpr, DataType, 1, true> value_buf;
value_buf(I0) = value;
constexpr auto val_buff_desc = make_naive_tensor_descriptor_packed(make_tuple(Number<1>{}));
auto global_buf = make_dynamic_buffer<AddressSpaceEnum_t::Global>(
p_global, grid_1d_buffer_desc.GetElementSpaceSize());
if(thread_global_id < grid_1d_buffer_desc.GetElementSize())
{
auto threadwise_store = ThreadwiseTensorSliceTransfer_v1r3<DataType,
DataType,
decltype(val_buff_desc),
Grid1dBufferDescType,
PassThroughOp,
Sequence<1>,
Sequence<0>,
0,
1,
InMemoryDataOperationEnum_t::Set,
1,
true>(
grid_1d_buffer_desc, make_multi_index(thread_global_id), PassThroughOp{});
threadwise_store.Run(
val_buff_desc, make_tuple(I0), value_buf, grid_1d_buffer_desc, global_buf);
}
};
} // namespace ck
#endif
......@@ -30,240 +30,154 @@
#include "reduction_common.hpp"
#include "reduction_operator.hpp"
#include "reduction_functions_binop.hpp"
#include "reduction_functions_accumulate.hpp"
namespace ck {
template <typename buffer2dDescType,
bool blockIsOneRow,
typename opReduce,
NanPropagation_t nanPropaOpt>
struct BlockwiseReduction_2d_block_buffer
template <typename Buffer1dDescType,
typename AccDataType,
index_t BlockSize,
index_t MThreadClusterSize,
index_t KThreadClusterSize,
bool ReorderThreadClusters,
typename OpReduce,
bool PropagateNan>
struct PartitionedBlockwiseReductionOn1dBuffer
{
using compType = typename opReduce::dataType;
static constexpr auto buffer_1d_desc = Buffer1dDescType{};
static constexpr auto buffer2dDesc = buffer2dDescType{};
static_assert(BlockSize == MThreadClusterSize * KThreadClusterSize,
"The product of cluster lengths should be same as BlockSize!");
static_assert(KThreadClusterSize > 1, "Parallel reduction need work on at least two elements");
static constexpr index_t BlockSize =
blockIsOneRow ? buffer2dDesc.GetLength(Number<1>{}) : buffer2dDesc.GetLength(Number<0>{});
static constexpr index_t NumBlocks =
blockIsOneRow ? buffer2dDesc.GetLength(Number<0>{}) : buffer2dDesc.GetLength(Number<1>{});
using binop = detail::binop_with_nan_check<nanPropaOpt, opReduce, compType>;
static_assert(buffer_1d_desc.GetElementSize() == BlockSize,
"The buffer size should be the same as BlockSize!");
// This interface does not accumulate on indices
template <typename BufferType>
__device__ static void
Reduce(BufferType& block_buffer, index_t toReduceBlocks, compType& accuData)
{
const index_t thread_local_id = get_thread_local_1d_id();
compType lAccuData = opReduce::GetReductionZeroVal();
using Accumulation = detail::AccumulateWithNanCheck<PropagateNan, OpReduce, AccDataType>;
index_t offset;
for(index_t otherDimInd = 0; otherDimInd < toReduceBlocks; otherDimInd++)
template <typename BufferType>
__device__ static void Reduce(BufferType& block_buffer,
AccDataType& accuData,
index_t thread_m_cluster_id,
index_t thread_k_cluster_id)
{
offset = blockIsOneRow
? buffer2dDesc.CalculateOffset(make_tuple(otherDimInd, thread_local_id))
: buffer2dDesc.CalculateOffset(make_tuple(thread_local_id, otherDimInd));
compType opData = type_convert<compType>(block_buffer[offset]);
binop::calculate(lAccuData, opData);
}
offset = blockIsOneRow ? buffer2dDesc.CalculateOffset(make_tuple(0, thread_local_id))
: buffer2dDesc.CalculateOffset(make_tuple(thread_local_id, 0));
constexpr auto cluster_len_shift = get_shift<KThreadClusterSize>();
block_buffer(offset) = lAccuData;
static_for<0, cluster_len_shift, 1>{}([&](auto I) {
constexpr index_t indOffset = 1 << (cluster_len_shift - 1 - I());
__syncthreads();
for(index_t indOffset = BlockSize / 2; indOffset > 0; indOffset /= 2)
{
if(thread_local_id < indOffset)
if(thread_k_cluster_id < indOffset)
{
// consider the thread clusters order, ensure the contiguous locations are accessed
// by contiguous Thread-ID
index_t offset1 =
blockIsOneRow ? buffer2dDesc.CalculateOffset(make_tuple(0, thread_local_id))
: buffer2dDesc.CalculateOffset(make_tuple(thread_local_id, 0));
index_t offset2 =
blockIsOneRow
? buffer2dDesc.CalculateOffset(make_tuple(0, thread_local_id + indOffset))
: buffer2dDesc.CalculateOffset(make_tuple(thread_local_id + indOffset, 0));
compType opData1 = type_convert<compType>(block_buffer[offset1]);
compType opData2 = type_convert<compType>(block_buffer[offset2]);
binop::calculate(opData1, opData2);
block_buffer(offset1) = type_convert<compType>(opData1);
ReorderThreadClusters
? buffer_1d_desc.CalculateOffset(make_tuple(
thread_k_cluster_id * MThreadClusterSize + thread_m_cluster_id))
: buffer_1d_desc.CalculateOffset(make_tuple(
thread_m_cluster_id * KThreadClusterSize + thread_k_cluster_id));
index_t offset2 = ReorderThreadClusters
? buffer_1d_desc.CalculateOffset(make_tuple(
(thread_k_cluster_id + indOffset) * MThreadClusterSize +
thread_m_cluster_id))
: buffer_1d_desc.CalculateOffset(
make_tuple(thread_m_cluster_id * KThreadClusterSize +
(thread_k_cluster_id + indOffset)));
AccDataType opData1 = type_convert<AccDataType>(block_buffer[offset1]);
AccDataType opData2 = type_convert<AccDataType>(block_buffer[offset2]);
Accumulation::Calculate(opData1, opData2);
block_buffer(offset1) = type_convert<AccDataType>(opData1);
}
__syncthreads();
}
});
if(thread_local_id == 0)
{
compType tmpVal = type_convert<compType>(block_buffer[0]);
index_t offset = ReorderThreadClusters
? buffer_1d_desc.CalculateOffset(make_tuple(thread_m_cluster_id))
: buffer_1d_desc.CalculateOffset(
make_tuple(thread_m_cluster_id * KThreadClusterSize));
binop::calculate(accuData, tmpVal);
}
accuData = type_convert<AccDataType>(block_buffer[offset]);
};
};
// This interface accumulates on both data values and indices
template <typename BufferType, typename IdxBufferType>
__device__ static void Reduce2(BufferType& block_buffer,
IdxBufferType& block_indices_buffer,
index_t toReduceBlocks,
compType& accuData,
int& accuIndex)
{
const index_t thread_local_id = get_thread_local_1d_id();
compType lAccuData = opReduce::GetReductionZeroVal();
int lAccuIndex = 0;
if constexpr(blockIsOneRow)
{
for(index_t otherDimInd = 0; otherDimInd < toReduceBlocks; otherDimInd++)
{
for(index_t indOffset = 1; indOffset < BlockSize; indOffset *= 2)
{
if(thread_local_id % (indOffset * 2) == 0)
{
index_t offset1 =
buffer2dDesc.CalculateOffset(make_tuple(otherDimInd, thread_local_id));
index_t offset2 = buffer2dDesc.CalculateOffset(
make_tuple(otherDimInd, thread_local_id + indOffset));
compType currVal1 = type_convert<compType>(block_buffer[offset1]);
compType currVal2 = type_convert<compType>(block_buffer[offset2]);
int currIndex1 = block_indices_buffer[offset1];
int currIndex2 = block_indices_buffer[offset2];
binop::calculate(currVal1, currVal2, currIndex1, currIndex2);
block_buffer(offset1) = type_convert<compType>(currVal1);
block_indices_buffer(offset1) = currIndex1;
}
__syncthreads();
}
}
if(thread_local_id == 0)
{
for(index_t otherDimInd = 0; otherDimInd < toReduceBlocks; otherDimInd++)
{
index_t offset = buffer2dDesc.CalculateOffset(make_tuple(otherDimInd, 0));
compType tmpVal = type_convert<compType>(block_buffer[offset]);
int tmpIndex = block_indices_buffer[offset];
binop::calculate(lAccuData, tmpVal, lAccuIndex, tmpIndex);
}
binop::calculate(accuData, lAccuData, accuIndex, lAccuIndex);
}
}
else
{
index_t offset;
for(index_t otherDimInd = 0; otherDimInd < toReduceBlocks; otherDimInd++)
{
offset = buffer2dDesc.CalculateOffset(make_tuple(thread_local_id, otherDimInd));
compType currVal = type_convert<compType>(block_buffer[offset]);
int currIndex = block_indices_buffer[offset];
binop::calculate(lAccuData, currVal, lAccuIndex, currIndex);
}
offset = buffer2dDesc.CalculateOffset(make_tuple(thread_local_id, 0));
block_buffer(offset) = lAccuData;
block_indices_buffer(offset) = lAccuIndex;
__syncthreads();
for(index_t indOffset = 1; indOffset < BlockSize; indOffset *= 2)
{
if(thread_local_id % (indOffset * 2) == 0)
{
index_t offset1 = buffer2dDesc.CalculateOffset(make_tuple(thread_local_id, 0));
index_t offset2 =
buffer2dDesc.CalculateOffset(make_tuple(thread_local_id + indOffset, 0));
template <typename Buffer1dDescType,
typename AccDataType,
typename IndexDataType,
index_t BlockSize,
index_t MThreadClusterSize,
index_t KThreadClusterSize,
bool ReorderThreadClusters,
typename OpReduce,
bool PropagateNan>
struct PartitionedBlockwiseReductionWithIndexOn1dBuffer
{
static constexpr auto buffer_1d_desc = Buffer1dDescType{};
compType currVal1 = type_convert<compType>(block_buffer[offset1]);
compType currVal2 = type_convert<compType>(block_buffer[offset2]);
int currIndex1 = block_indices_buffer[offset1];
int currIndex2 = block_indices_buffer[offset2];
static_assert(BlockSize == MThreadClusterSize * KThreadClusterSize,
"The product of cluster lengths should be same as BlockSize!");
static_assert(KThreadClusterSize > 1, "Parallel reduction need work on at least two elements");
binop::calculate(currVal1, currVal2, currIndex1, currIndex2);
block_buffer(offset1) = type_convert<compType>(currVal1);
block_indices_buffer(offset1) = currIndex1;
}
static_assert(buffer_1d_desc.GetElementSize() == BlockSize,
"The buffer size should be the same as BlockSize!");
__syncthreads();
}
using Accumulation =
detail::AccumulateWithIndexAndNanCheck<PropagateNan, OpReduce, AccDataType, IndexDataType>;
if(thread_local_id == 0)
// This interface accumulates on both data values and indices
template <typename BufferType, typename IdxBufferType>
__device__ static void Reduce(BufferType& block_val_buffer,
IdxBufferType& block_idx_buffer,
AccDataType& accuData,
IndexDataType& accuIndex,
index_t thread_m_cluster_id,
index_t thread_k_cluster_id)
{
compType tmpVal = type_convert<compType>(block_buffer[0]);
int tmpIndex = block_indices_buffer[0];
binop::calculate(accuData, tmpVal, accuIndex, tmpIndex);
}
}
};
constexpr auto cluster_len_shift = get_shift<KThreadClusterSize>();
template <typename BufferType>
__device__ static void set_buffer_value(BufferType& block_buffer, compType value)
{
index_t thread_id = get_thread_local_1d_id();
static_for<0, cluster_len_shift, 1>{}([&](auto I) {
constexpr index_t indOffset = 1 << I();
for(index_t otherDimInd = 0; otherDimInd < NumBlocks; otherDimInd++)
if(thread_k_cluster_id % (indOffset * 2) == 0)
{
index_t offset = blockIsOneRow
? buffer2dDesc.CalculateOffset(make_tuple(otherDimInd, thread_id))
: buffer2dDesc.CalculateOffset(make_tuple(thread_id, otherDimInd));
block_buffer(offset) = value;
__syncthreads();
// consider the thread clusters order, ensure the contiguous locations are accessed
// by contiguous Thread-ID
index_t offset1 =
ReorderThreadClusters
? buffer_1d_desc.CalculateOffset(make_tuple(
thread_k_cluster_id * MThreadClusterSize + thread_m_cluster_id))
: buffer_1d_desc.CalculateOffset(make_tuple(
thread_m_cluster_id * KThreadClusterSize + thread_k_cluster_id));
index_t offset2 = ReorderThreadClusters
? buffer_1d_desc.CalculateOffset(make_tuple(
(thread_k_cluster_id + indOffset) * MThreadClusterSize +
thread_m_cluster_id))
: buffer_1d_desc.CalculateOffset(
make_tuple(thread_m_cluster_id * KThreadClusterSize +
(thread_k_cluster_id + indOffset)));
AccDataType opData1 = type_convert<AccDataType>(block_val_buffer[offset1]);
AccDataType opData2 = type_convert<AccDataType>(block_val_buffer[offset2]);
IndexDataType currIndex1 = block_idx_buffer[offset1];
IndexDataType currIndex2 = block_idx_buffer[offset2];
Accumulation::Calculate(opData1, opData2, currIndex1, currIndex2);
block_val_buffer(offset1) = type_convert<AccDataType>(opData1);
block_idx_buffer(offset1) = currIndex1;
}
};
// Initialize the block-wise indices buffer, the index for each element in the block-wise
// data buffer is calculated according to its position in the buffer and the global starting
// index
template <typename IdxBufferType>
__device__ static void init_buffer_indices(IdxBufferType& block_indices_buffer, int indexStart)
{
index_t thread_id = get_thread_local_1d_id();
for(index_t otherDimInd = 0; otherDimInd < NumBlocks; otherDimInd++)
{
index_t offset = blockIsOneRow
? buffer2dDesc.CalculateOffset(make_tuple(otherDimInd, thread_id))
: buffer2dDesc.CalculateOffset(make_tuple(thread_id, otherDimInd));
block_indices_buffer(offset) = offset + indexStart;
__syncthreads();
}
};
// Execute unary operation on the block buffer elements
template <typename unary_op_type, typename BufferType>
__device__ static void operate_on_elements(unary_op_type& unary_op, BufferType& block_buffer)
{
index_t thread_id = get_thread_local_1d_id();
for(index_t otherDimInd = 0; otherDimInd < NumBlocks; otherDimInd++)
{
index_t offset = blockIsOneRow
? buffer2dDesc.CalculateOffset(make_tuple(otherDimInd, thread_id))
: buffer2dDesc.CalculateOffset(make_tuple(thread_id, otherDimInd));
});
block_buffer(offset) = unary_op(block_buffer[offset]);
index_t offset = ReorderThreadClusters
? buffer_1d_desc.CalculateOffset(make_tuple(thread_m_cluster_id))
: buffer_1d_desc.CalculateOffset(
make_tuple(thread_m_cluster_id * KThreadClusterSize));
__syncthreads();
accuData = type_convert<AccDataType>(block_val_buffer[offset]);
accuIndex = block_idx_buffer[offset];
}
};
};
}; // end of namespace ck
......
/*******************************************************************************
*
* MIT License
*
* Copyright (c) 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#ifndef CK_REDUCTION_FUNCTIONS_THREADWISE_HPP
#define CK_REDUCTION_FUNCTIONS_THREADWISE_HPP
#include "data_type.hpp"
#include "reduction_common.hpp"
#include "reduction_operator.hpp"
#include "reduction_functions_binop.hpp"
namespace ck {
template <typename BufferType, typename opReduce, NanPropagation_t nanPropaOpt>
struct ThreadReduce
{
using compType = typename opReduce::dataType;
static_assert(BufferType::IsStaticBuffer(), "Thread-wise reduction needs use StaticBuffer!");
static_assert(
std::is_same<typename BufferType::type, compType>::value,
"Data type of StaticBuffer for Thread-wise reduction should be same as the compType!");
static constexpr index_t ThreadBufferLen = BufferType::Size();
using binop = detail::binop_with_nan_check<nanPropaOpt, opReduce, compType>;
// This interface does not accumulate on indices
__device__ static void Reduce(const BufferType& thread_buffer, compType& accuData)
{
static_for<0, ThreadBufferLen, 1>{}(
[&](auto I) { binop::calculate(accuData, thread_buffer[I]); });
};
// This interface accumulates on both data values and indices and
// is called by Direct_ThreadWise reduction method at first-time reduction
__device__ static void
Reduce2(const BufferType& thread_buffer, compType& accuData, int& accuIndex, int indexStart)
{
static_for<0, ThreadBufferLen, 1>{}([&](auto I) {
int currIndex = I + indexStart;
binop::calculate(accuData, thread_buffer[I], accuIndex, currIndex);
});
};
// Set the elements in the per-thread buffer to a specific value
// cppcheck-suppress constParameter
__device__ static void set_buffer_value(BufferType& thread_buffer, compType value)
{
static_for<0, ThreadBufferLen, 1>{}([&](auto I) { thread_buffer(I) = value; });
};
// Execute unary operation on the per-thread buffer elements
template <typename unary_op_type>
__device__ static void operate_on_elements(unary_op_type& unary_op, BufferType& thread_buffer)
{
static_for<0, ThreadBufferLen, 1>{}(
[&](auto I) { thread_buffer(I) = unary_op(thread_buffer[I]); });
};
};
template <typename BufferType,
typename IdxBufferType,
typename opReduce,
NanPropagation_t nanPropaOpt>
struct ThreadReduceWithIndicesInput
{
using compType = typename opReduce::dataType;
static_assert(BufferType::IsStaticBuffer(), "Thread-wise reduction needs use StaticBuffer!");
static_assert(IdxBufferType::IsStaticBuffer(),
"Thread-wise reduction needs use StaticBuffer for indices!");
static_assert(
std::is_same<typename BufferType::type, compType>::value,
"Data type of StaticBuffer for Thread-wise reduction should be same as the compType!");
static_assert(std::is_same<typename IdxBufferType::type, index_t>::value,
"Indices type of StaticBuffer for Thread-wise reduction should be index_t!");
static_assert(BufferType::Size() == IdxBufferType::Size(),
"StaticBuffers for data and indices should have the same sizes!");
static constexpr index_t ThreadBufferLen = BufferType::Size();
using binop = detail::binop_with_nan_check<nanPropaOpt, opReduce, compType>;
// This interface accumulates on both data values and indices and
// is called by Direct_ThreadWise reduction method at second-time reduction
__device__ static void Reduce(const BufferType& thread_buffer,
const IdxBufferType& thread_indices_buffer,
compType& accuData,
int& accuIndex)
{
static_for<0, ThreadBufferLen, 1>{}([&](auto I) {
binop::calculate(accuData, thread_buffer[I], accuIndex, thread_indices_buffer[I]);
});
};
// Set the elements in the per-thread buffer to a specific value
// cppcheck-suppress constParameter
__device__ static void set_buffer_value(BufferType& thread_buffer, compType value)
{
static_for<0, ThreadBufferLen, 1>{}([&](auto I) { thread_buffer(I) = value; });
};
// Execute unary operation on the per-thread buffer elements
template <typename unary_op_type>
__device__ static void operate_on_elements(unary_op_type& unary_op, BufferType& thread_buffer)
{
static_for<0, ThreadBufferLen, 1>{}(
[&](auto I) { thread_buffer(I) = unary_op(thread_buffer[I]); });
};
};
}; // end of namespace ck
#endif
/*******************************************************************************
*
* MIT License
*
* Copyright (c) 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#ifndef CK_REDUCTION_FUNCTIONS_WARPWISE_HPP
#define CK_REDUCTION_FUNCTIONS_WARPWISE_HPP
#include "data_type.hpp"
#include "reduction_common.hpp"
#include "reduction_operator.hpp"
#include "reduction_functions_binop.hpp"
namespace ck {
template <typename BufferType, index_t BlockSize, typename opReduce, NanPropagation_t nanPropaOpt>
struct WarpReduce
{
using compType = typename opReduce::dataType;
using binop = detail::binop_with_nan_check<nanPropaOpt, opReduce, compType>;
static_assert(BufferType::IsStaticBuffer(),
"Per-thread buffer for WarpWise reduction should be StaticBuffer!");
static_assert(std::is_same<typename BufferType::type, compType>::value,
"Data type of per-thread StaticBuffer for WarpWise reduction should be same as "
"the compType!");
static constexpr index_t ThreadBufferLen = BufferType::Size();
static constexpr bool have_builtin_shuffle =
std::is_same<compType, float>::value || std::is_same<compType, double>::value;
// This interface does not accumulate on indices
__device__ static void Reduce(const BufferType& thread_buffer, compType& accuData)
{
if constexpr(have_builtin_shuffle)
ReduceImpl1(thread_buffer, accuData);
else
ReduceImpl2(thread_buffer, accuData);
};
// This interface implementation uses HIP built-in device shuffling functions
__device__ static void ReduceImpl1(const BufferType& thread_buffer, compType& accuData)
{
compType lAccuData = opReduce::GetReductionZeroVal();
static_for<0, ThreadBufferLen, 1>{}(
[&](auto I) { binop::calculate(lAccuData, thread_buffer[I]); });
// synchronize among all threads in this warp
__all(1);
for(index_t stride = warpSize / 2; stride > 0; stride /= 2)
{
compType tmpVal = __shfl_down(lAccuData, stride, warpSize);
binop::calculate(lAccuData, tmpVal);
__all(1);
}
binop::calculate(accuData, lAccuData);
};
// This interface implementation does not use HIP built-in device shuffling functions
// since for fp16, built-in shuffling functions is not provided by HIP
__device__ static void ReduceImpl2(const BufferType& thread_buffer, compType& accuData)
{
compType lAccuData = opReduce::GetReductionZeroVal();
static_for<0, ThreadBufferLen, 1>{}(
[&](auto I) { binop::calculate(lAccuData, thread_buffer[I]); });
__syncthreads();
index_t thread_id = get_thread_local_1d_id();
index_t warpId = thread_id / warpSize;
index_t thread_inwarp_id = thread_id % warpSize;
__shared__ compType shuffle_buffer[BlockSize];
compType* myBuffer = &shuffle_buffer[warpId * warpSize];
myBuffer[thread_inwarp_id] = lAccuData;
__syncthreads();
for(index_t stride = warpSize / 2; stride > 0; stride /= 2)
{
if(thread_inwarp_id < stride)
{
compType currVal1 = myBuffer[thread_inwarp_id];
compType currVal2 = myBuffer[thread_inwarp_id + stride];
binop::calculate(currVal1, currVal2);
myBuffer[thread_inwarp_id] = currVal1;
}
__syncthreads();
}
if(thread_inwarp_id == 0)
binop::calculate(accuData, myBuffer[0]);
};
// This interface accumulates on both data values and indices and is called by Direct_WarpWise
// reduction method at first-time reduction
__device__ static void
Reduce2(const BufferType& thread_buffer, compType& accuData, int& accuIndex, int indexStart)
{
if constexpr(have_builtin_shuffle)
Reduce2Impl1(thread_buffer, accuData, accuIndex, indexStart);
else
Reduce2Impl2(thread_buffer, accuData, accuIndex, indexStart);
};
// This interface implementation uses HIP built-in device shuffling functions
__device__ static void Reduce2Impl1(const BufferType& thread_buffer,
compType& accuData,
int& accuIndex,
int indexStart)
{
compType lAccuData = opReduce::GetReductionZeroVal();
int lAccuIndex = 0;
index_t thread_inwarp_id = get_thread_local_1d_id() % warpSize;
static_for<0, ThreadBufferLen, 1>{}([&](auto I) {
int currIndex = thread_inwarp_id * ThreadBufferLen + I + indexStart;
binop::calculate(lAccuData, thread_buffer[I], lAccuIndex, currIndex);
});
// synchronize among all threads in this warp
__all(1);
for(index_t stride = 1; stride < warpSize; stride *= 2)
{
compType tmpVal = __shfl_down(lAccuData, stride, warpSize);
int tmpIndex = __shfl_down(lAccuIndex, stride, warpSize);
binop::calculate(lAccuData, tmpVal, lAccuIndex, tmpIndex);
__all(1);
}
if(thread_inwarp_id == 0)
binop::calculate(accuData, lAccuData, accuIndex, lAccuIndex);
};
// This interface implementation does not use HIP built-in device shuffling functions since for
// fp16, built-in shuffling functions is not provided by HIP
__device__ static void Reduce2Impl2(const BufferType& thread_buffer,
compType& accuData,
int& accuIndex,
int indexStart)
{
compType lAccuData = opReduce::GetReductionZeroVal();
int lAccuIndex = 0;
index_t thread_id = get_thread_local_1d_id();
index_t warpId = thread_id / warpSize;
index_t thread_inwarp_id = thread_id % warpSize;
static_for<0, ThreadBufferLen, 1>{}([&](auto I) {
int currIndex = thread_inwarp_id * ThreadBufferLen + I + indexStart;
binop::calculate(lAccuData, thread_buffer[I], lAccuIndex, currIndex);
});
__shared__ compType shuffle_data_buffer[BlockSize];
__shared__ int shuffle_indices_buffer[BlockSize];
compType* myDataBuffer = &shuffle_data_buffer[warpId * warpSize];
int* myIndicesBuffer = &shuffle_indices_buffer[warpId * warpSize];
myDataBuffer[thread_inwarp_id] = lAccuData;
myIndicesBuffer[thread_inwarp_id] = lAccuIndex;
__syncthreads();
for(index_t stride = 1; stride < warpSize; stride *= 2)
{
compType currVal1 = myDataBuffer[thread_inwarp_id];
compType currVal2 = myDataBuffer[thread_inwarp_id + stride];
int currIndex1 = myIndicesBuffer[thread_inwarp_id];
int currIndex2 = myIndicesBuffer[thread_inwarp_id + stride];
binop::calculate(currVal1, currVal2, currIndex1, currIndex2);
myDataBuffer[thread_inwarp_id] = currVal1;
myIndicesBuffer[thread_inwarp_id] = currIndex1;
__syncthreads();
}
if(thread_inwarp_id == 0)
binop::calculate(accuData, myDataBuffer[0], accuIndex, myIndicesBuffer[0]);
};
// cppcheck-suppress constParameter
__device__ static void set_buffer_value(BufferType& thread_buffer, compType value)
{
static_for<0, ThreadBufferLen, 1>{}([&](auto I) { thread_buffer(I) = value; });
__all(1);
};
// Execute unary operation on the per-thread buffer elements
template <typename unary_op_type>
__device__ static void operate_on_elements(unary_op_type& unary_op, BufferType& thread_buffer)
{
static_for<0, ThreadBufferLen, 1>{}(
[&](auto I) { thread_buffer(I) = unary_op(thread_buffer[I]); });
__all(1);
};
};
template <typename BufferType,
typename IdxBufferType,
index_t BlockSize,
typename opReduce,
NanPropagation_t nanPropaOpt>
struct WarpReduceWithIndicesInput
{
using compType = typename opReduce::dataType;
using binop = detail::binop_with_nan_check<nanPropaOpt, opReduce, compType>;
static_assert(BufferType::IsStaticBuffer(),
"Per-thread buffer for WarpWise reduction should be StaticBuffer!");
static_assert(IdxBufferType::IsStaticBuffer(),
"Per-thread buffer for WarpWise reduction should be StaticBuffer for indices!");
static_assert(std::is_same<typename BufferType::type, compType>::value,
"Data type of per-thread StaticBuffer for WarpWise reduction should be same as "
"the compType!");
static_assert(
std::is_same<typename IdxBufferType::type, index_t>::value,
"Indices type per-thread of StaticBuffer for WarpWise reduction should be index_t!");
static_assert(BufferType::Size() == IdxBufferType::Size(),
"StaticBuffers for data and indices should have the same sizes!");
static constexpr index_t ThreadBufferLen = BufferType::Size();
static constexpr bool have_builtin_shuffle =
std::is_same<compType, float>::value || std::is_same<compType, double>::value;
// This interface accumulates on both data values and indices and is called by Direct_WarpWise
// reduction method at second-time reduction
__device__ static void Reduce(const BufferType& thread_buffer,
const IdxBufferType& thread_indices_buffer,
compType& accuData,
int& accuIndex)
{
if constexpr(have_builtin_shuffle)
ReduceImpl1(thread_buffer, thread_indices_buffer, accuData, accuIndex);
else
ReduceImpl2(thread_buffer, thread_indices_buffer, accuData, accuIndex);
};
// This interface implementation uses HIP built-in device shuffling functions
__device__ static void ReduceImpl1(const BufferType& thread_buffer,
const IdxBufferType& thread_indices_buffer,
compType& accuData,
int& accuIndex)
{
compType lAccuData = opReduce::GetReductionZeroVal();
int lAccuIndex = 0;
static_for<0, ThreadBufferLen, 1>{}([&](auto I) {
binop::calculate(lAccuData, thread_buffer[I], lAccuIndex, thread_indices_buffer[I]);
});
// synchronize among all threads in this warp
__all(1);
for(index_t stride = 1; stride < warpSize; stride *= 2)
{
compType tmpVal = __shfl_down(lAccuData, stride, warpSize);
int tmpIndex = __shfl_down(lAccuIndex, stride, warpSize);
binop::calculate(lAccuData, tmpVal, lAccuIndex, tmpIndex);
__all(1);
}
binop::calculate(accuData, lAccuData, accuIndex, lAccuIndex);
};
// This interface implementation does not use HIP built-in device shuffling functions
// since for fp16, built-in shuffling functions is not provided by HIP
__device__ static void ReduceImpl2(const BufferType& thread_buffer,
const IdxBufferType& thread_indices_buffer,
compType& accuData,
int& accuIndex)
{
compType lAccuData = opReduce::GetReductionZeroVal();
int lAccuIndex = 0;
index_t thread_id = get_thread_local_1d_id();
index_t warpId = thread_id / warpSize;
index_t thread_inwarp_id = thread_id % warpSize;
static_for<0, ThreadBufferLen, 1>{}([&](auto I) {
binop::calculate(lAccuData, thread_buffer[I], lAccuIndex, thread_indices_buffer[I]);
});
__shared__ compType shuffle_data_buffer[BlockSize];
__shared__ int shuffle_indices_buffer[BlockSize];
compType* myDataBuffer = &shuffle_data_buffer[warpId * warpSize];
int* myIndicesBuffer = &shuffle_indices_buffer[warpId * warpSize];
myDataBuffer[thread_inwarp_id] = lAccuData;
myIndicesBuffer[thread_inwarp_id] = lAccuIndex;
__syncthreads();
for(index_t stride = 1; stride < warpSize; stride *= 2)
{
compType currVal1 = myDataBuffer[thread_inwarp_id];
compType currVal2 = myDataBuffer[thread_inwarp_id + stride];
int currIndex1 = myIndicesBuffer[thread_inwarp_id];
int currIndex2 = myIndicesBuffer[thread_inwarp_id + stride];
binop::calculate(currVal1, currVal2, currIndex1, currIndex2);
myDataBuffer[thread_inwarp_id] = currVal1;
myIndicesBuffer[thread_inwarp_id] = currIndex1;
__syncthreads();
}
if(thread_inwarp_id == 0)
binop::calculate(accuData, myDataBuffer[0], accuIndex, myIndicesBuffer[0]);
};
// cppcheck-suppress constParameter
__device__ static void set_buffer_value(BufferType& thread_buffer, compType value)
{
static_for<0, ThreadBufferLen, 1>{}([&](auto I) { thread_buffer(I) = value; });
__all(1);
};
// Execute unary operation on the per-thread buffer elements
template <typename unary_op_type>
__device__ static void operate_on_elements(unary_op_type& unary_op, BufferType& thread_buffer)
{
static_for<0, ThreadBufferLen, 1>{}(
[&](auto I) { thread_buffer(I) = unary_op(thread_buffer[I]); });
__all(1);
};
};
}; // end of namespace ck
#endif
#ifndef CK_MATH_V2_HPP
#define CK_MATH_V2_HPP
#include "data_type.hpp"
namespace ck {
namespace math {
static inline __device__ half_t abs(half_t x) { return __habs(x); };
static inline __device__ half_t sqrtf(half_t x) { return hsqrt(x); };
static inline __device__ bool isnan(half_t x) { return __hisnan(x); };
} // namespace math
} // namespace ck
#endif
......@@ -48,6 +48,18 @@ struct float_equal_zero
};
};
template <index_t N>
static constexpr __device__ index_t get_shift()
{
return (get_shift<N / 2>() + 1);
};
template <>
constexpr __device__ index_t get_shift<1>()
{
return (0);
}
}; // end of namespace ck
#endif
......@@ -34,50 +34,79 @@
namespace ck {
namespace detail {
static inline __device__ bool isnan(half_t x) { return __hisnan(x); };
template <typename T>
static inline __device__ bool is_nan(T x)
{
return (isnan(x));
};
template <NanPropagation_t nanPropaOpt, typename opReduce, typename compType>
struct binop_with_nan_check;
template <>
inline __device__ bool is_nan<half_t>(half_t x)
{
return (__hisnan(x));
};
template <typename opReduce, typename compType>
struct binop_with_nan_check<NanPropagation_t::NOT_PROPAGATE_NAN, opReduce, compType>
template <bool PropagateNan, typename ReduceOperation, typename AccDataType>
struct AccumulateWithNanCheck;
template <typename ReduceOperation, typename AccDataType>
struct AccumulateWithNanCheck<false, ReduceOperation, AccDataType>
{
// cppcheck-suppress constParameter
__device__ static inline void calculate(compType& accuVal, compType currVal)
__device__ static inline void Calculate(AccDataType& accuVal, AccDataType currVal)
{
opReduce{}(accuVal, currVal);
ReduceOperation{}(accuVal, currVal);
};
};
// The method is called when the opReduce is indexable and the user asked for indices
template <typename ReduceOperation, typename AccDataType>
struct AccumulateWithNanCheck<true, ReduceOperation, AccDataType>
{
__device__ static inline void Calculate(AccDataType& accuVal, AccDataType currVal)
{
if(is_nan(currVal))
{
accuVal = currVal;
}
else
{
ReduceOperation{}(accuVal, currVal);
};
};
};
template <bool PropagateNan, typename ReduceOperation, typename AccDataType, typename IndexDataType>
struct AccumulateWithIndexAndNanCheck;
template <typename ReduceOperation, typename AccDataType, typename IndexDataType>
struct AccumulateWithIndexAndNanCheck<false, ReduceOperation, AccDataType, IndexDataType>
{
__device__ static inline void
// cppcheck-suppress constParameter
calculate(compType& accuVal, compType currVal, int& accuIndex, int currIndex)
Calculate(AccDataType& accuVal,
AccDataType currVal,
IndexDataType& accuIndex,
IndexDataType currIndex)
{
bool changed = false;
opReduce{}(accuVal, currVal, changed);
ReduceOperation{}(accuVal, currVal, changed);
if(changed)
accuIndex = currIndex;
};
};
template <typename opReduce, typename compType>
struct binop_with_nan_check<NanPropagation_t::PROPAGATE_NAN, opReduce, compType>
template <typename ReduceOperation, typename AccDataType, typename IndexDataType>
struct AccumulateWithIndexAndNanCheck<true, ReduceOperation, AccDataType, IndexDataType>
{
__device__ static inline void calculate(compType& accuVal, compType currVal)
{
if(isnan(currVal))
accuVal = currVal;
else
opReduce{}(accuVal, currVal);
};
// The method is called when the opReduce is indexable and the user asked for indices
__device__ static inline void
calculate(compType& accuVal, compType currVal, int& accuIndex, int currIndex)
// The method is called when the ReduceOperation is indexable and the user asked for indices
__device__ static inline void Calculate(AccDataType& accuVal,
AccDataType currVal,
IndexDataType& accuIndex,
IndexDataType currIndex)
{
if(isnan(currVal))
if(is_nan(currVal))
{
accuVal = currVal;
accuIndex = currIndex;
......@@ -86,7 +115,7 @@ struct binop_with_nan_check<NanPropagation_t::PROPAGATE_NAN, opReduce, compType>
{
bool changed = false;
opReduce{}(accuVal, currVal, changed);
ReduceOperation{}(accuVal, currVal, changed);
if(changed)
accuIndex = currIndex;
......
......@@ -26,7 +26,7 @@
#ifndef CK_REDUCTION_OPERATOR_HPP
#define CK_REDUCTION_OPERATOR_HPP
#include "reduction_common.hpp"
#include "common_header.hpp"
namespace ck {
......@@ -60,11 +60,9 @@ struct Add
{
using dataType = T;
__device__ static constexpr T GetReductionZeroVal() { return static_cast<T>(0.0f); };
__host__ __device__ static constexpr T GetReductionZeroVal() { return static_cast<T>(0.0f); };
__device__ inline constexpr void operator()(T& a, T b) const { a = a + b; }
static constexpr bool indexable = false;
__host__ __device__ inline constexpr void operator()(T& a, T b) const { a = a + b; }
};
template <class T>
......@@ -72,11 +70,9 @@ struct Mul
{
using dataType = T;
__device__ static constexpr T GetReductionZeroVal() { return static_cast<T>(1.0f); };
__device__ inline constexpr void operator()(T& a, T b) const { a = a * b; }
__host__ __device__ static constexpr T GetReductionZeroVal() { return static_cast<T>(1.0f); };
static constexpr bool indexable = false;
__host__ __device__ inline constexpr void operator()(T& a, T b) const { a = a * b; }
};
template <class T>
......@@ -84,15 +80,18 @@ struct Max
{
using dataType = T;
__device__ static constexpr T GetReductionZeroVal() { return NumericLimits<T>::Lowest(); };
__host__ __device__ static constexpr T GetReductionZeroVal()
{
return NumericLimits<T>::Lowest();
};
__device__ inline constexpr void operator()(T& a, T b) const
__host__ __device__ inline constexpr void operator()(T& a, T b) const
{
if(a < b)
a = b;
}
__device__ inline constexpr void operator()(T& a, T b, bool& changed) const
__host__ __device__ inline constexpr void operator()(T& a, T b, bool& changed) const
{
if(a < b)
{
......@@ -100,8 +99,6 @@ struct Max
changed = true;
}
}
static constexpr bool indexable = true;
};
template <class T>
......@@ -109,15 +106,18 @@ struct Min
{
using dataType = T;
__device__ static constexpr T GetReductionZeroVal() { return NumericLimits<T>::Max(); };
__host__ __device__ static constexpr T GetReductionZeroVal()
{
return NumericLimits<T>::Max();
};
__device__ inline constexpr void operator()(T& a, T b) const
__host__ __device__ inline constexpr void operator()(T& a, T b) const
{
if(a > b)
a = b;
}
__device__ inline constexpr void operator()(T& a, T b, bool& changed) const
__host__ __device__ inline constexpr void operator()(T& a, T b, bool& changed) const
{
if(a > b)
{
......@@ -125,8 +125,6 @@ struct Min
changed = true;
}
}
static constexpr bool indexable = true;
};
template <class T>
......@@ -134,15 +132,15 @@ struct AMax
{
using dataType = T;
__device__ static constexpr T GetReductionZeroVal() { return static_cast<T>(0.0f); };
__host__ __device__ static constexpr T GetReductionZeroVal() { return static_cast<T>(0.0f); };
__device__ inline constexpr void operator()(T& a, T b) const
__host__ __device__ inline constexpr void operator()(T& a, T b) const
{
if(a < b)
a = b;
}
__device__ inline constexpr void operator()(T& a, T b, bool& changed) const
__host__ __device__ inline constexpr void operator()(T& a, T b, bool& changed) const
{
if(a < b)
{
......@@ -150,270 +148,10 @@ struct AMax
changed = true;
}
}
static constexpr bool indexable = true;
};
// Unary operators are usually called element-wisely before the reduction is executed on the
// elements.
// They are needed for easy implementation of reduction types of AVG, NRM1, NRM2
template <class T, bool hasDividing>
struct unary_identic
{
__device__ unary_identic(const int divider = 1)
{
scaler = 1.0f / static_cast<float>(divider);
};
__device__ inline constexpr T operator()(T a) const { return a * type_convert<T>(scaler); };
float scaler = 1.0f;
};
template <class T>
struct unary_identic<T, false>
{
__device__ unary_identic(const int divider = 1) { (void)divider; };
__device__ inline constexpr T operator()(T a) const { return a; };
};
template <class T, bool hasDividing>
struct unary_square
{
__device__ unary_square(const int divider = 1) { scaler = 1.0f / static_cast<float>(divider); };
__device__ inline constexpr T operator()(T a) const
{
a = a * a;
return a * type_convert<T>(scaler);
};
float scaler = 1.0f;
};
template <class T>
struct unary_square<T, false>
{
__device__ unary_square(const int divider = 1) { (void)divider; };
__device__ inline constexpr T operator()(T a) const { return a * a; };
};
template <class T, bool hasDividing>
struct unary_abs
{
__device__ unary_abs(const int divider = 1) { scaler = 1.0f / static_cast<float>(divider); };
__device__ inline constexpr T operator()(T a) const
{
a = abs(a);
return a * type_convert<T>(scaler);
};
float scaler = 1.0f;
};
template <class T>
struct unary_abs<T, false>
{
__device__ unary_abs(const int divider = 1) { (void)divider; };
__device__ inline constexpr T operator()(T a) const { return abs(a); };
};
// We know for sure that 4.0 has __habs(), but 3.0 does not have it.
// Let's assume that __habs() exists since 3.5.
#if HIP_PACKAGE_VERSION_FLAT < 3005000000
inline __device__ __half __habs(__half x)
{
union
{
__half half;
unsigned short u16;
} val;
val.half = x;
val.u16 = val.u16 & 0x7fff;
return val.half;
}
#endif
template <bool hasDividing>
struct unary_abs<half_t, hasDividing>
{
__device__ unary_abs(const int divider = 1) { scaler = 1.0f / static_cast<float>(divider); };
__device__ inline half_t operator()(half_t a) const
{
a = static_cast<half_t>(__habs(a));
return a * type_convert<half_t>(scaler);
};
float scaler = 1.0f;
};
template <>
struct unary_abs<half_t, false>
{
__device__ unary_abs(const int divider = 1) { (void)divider; };
__device__ inline half_t operator()(half_t a) const { return static_cast<half_t>(__habs(a)); };
};
template <class T>
struct unary_sqrt
{
__device__ unary_sqrt(const int divider = 1) { (void)divider; };
__device__ inline T operator()(T a) const { return sqrtf(a); };
};
template <>
struct unary_sqrt<half_t>
{
__device__ unary_sqrt(const int divider = 1) { (void)divider; };
__device__ inline half_t operator()(half_t a) const { return static_cast<half_t>(hsqrt(a)); };
};
}; // end of namespace reduce
// The templated struct reduce_binary_operator maps the enum Ids of binary operators to their
// respective functor classes.
// The "GetReductionZeroVal()" interface and boolean member "indexable" are also provided in
// reduce_binary_operactor for
// easier checking by the upper-layer codes in the kernels.
template <typename T, ReduceTensorOp_t op>
struct reduce_binary_operator;
template <typename T>
struct reduce_binary_operator<T, ReduceTensorOp_t::ADD>
{
using opType = reduce::Add<T>;
using dataType = T;
static constexpr bool indexable = reduce::Add<T>::indexable;
};
template <typename T>
struct reduce_binary_operator<T, ReduceTensorOp_t::MUL>
{
using opType = reduce::Mul<T>;
using dataType = T;
static constexpr bool indexable = reduce::Mul<T>::indexable;
};
template <typename T>
struct reduce_binary_operator<T, ReduceTensorOp_t::MIN>
{
using opType = reduce::Min<T>;
using dataType = T;
static constexpr bool indexable = reduce::Min<T>::indexable;
};
template <typename T>
struct reduce_binary_operator<T, ReduceTensorOp_t::MAX>
{
using opType = reduce::Max<T>;
using dataType = T;
static constexpr bool indexable = reduce::Max<T>::indexable;
};
template <typename T>
struct reduce_binary_operator<T, ReduceTensorOp_t::AMAX>
{
using opType = reduce::AMax<T>;
using dataType = T;
static constexpr bool indexable = reduce::Max<T>::indexable;
};
template <typename T>
struct reduce_binary_operator<T, ReduceTensorOp_t::AVG>
{
using opType = reduce::Add<T>;
using dataType = T;
static constexpr bool indexable = reduce::Add<T>::indexable;
};
template <typename T>
struct reduce_binary_operator<T, ReduceTensorOp_t::NORM1>
{
using opType = reduce::Add<T>;
using dataType = T;
static constexpr bool indexable = reduce::Add<T>::indexable;
};
template <typename T>
struct reduce_binary_operator<T, ReduceTensorOp_t::NORM2>
{
using opType = reduce::Add<T>;
using dataType = T;
static constexpr bool indexable = reduce::Add<T>::indexable;
};
// The templated struct reduce_unary_operator maps the enum Ids of Reduce operators to two unary
// functor classes.
// The two unary functors are called before and afer the Reduction is executed respectively
template <typename T, ReduceTensorOp_t op, bool isFirsReduce, bool isLastReduce>
struct reduce_unary_operator
{
using preUnaryOp = reduce::unary_identic<T, false>;
using posUnaryOp = reduce::unary_identic<T, false>;
};
template <typename T, bool isFirstReduce>
struct reduce_unary_operator<T, ReduceTensorOp_t::AVG, isFirstReduce, true>
{
using preUnaryOp = reduce::unary_identic<T, false>;
using posUnaryOp = reduce::unary_identic<T, true>;
};
template <typename T, bool isLastReduce>
struct reduce_unary_operator<T, ReduceTensorOp_t::NORM1, true, isLastReduce>
{
using preUnaryOp = reduce::unary_abs<T, false>;
using posUnaryOp = reduce::unary_identic<T, false>;
};
template <typename T, bool isLastReduce>
struct reduce_unary_operator<T, ReduceTensorOp_t::AMAX, true, isLastReduce>
{
using preUnaryOp = reduce::unary_abs<T, false>;
using posUnaryOp = reduce::unary_identic<T, false>;
};
template <typename T>
struct reduce_unary_operator<T, ReduceTensorOp_t::NORM2, true, false>
{
using preUnaryOp = reduce::unary_square<T, false>;
using posUnaryOp = reduce::unary_identic<T, false>;
};
template <typename T>
struct reduce_unary_operator<T, ReduceTensorOp_t::NORM2, true, true>
{
using preUnaryOp = reduce::unary_square<T, false>;
using posUnaryOp = reduce::unary_sqrt<T>;
};
template <typename T>
struct reduce_unary_operator<T, ReduceTensorOp_t::NORM2, false, true>
{
using preUnaryOp = reduce::unary_identic<T, false>;
using posUnaryOp = reduce::unary_sqrt<T>;
};
} // end of namespace ck
#endif
/*******************************************************************************
*
* MIT License
*
* Copyright (c) 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#include "config.hpp"
#include "number.hpp"
#include "sequence.hpp"
#include "tensor_descriptor_helper.hpp"
#include "data_type_enum_helper.hpp"
#include "reduction_common.hpp"
#include "gridwise_generic_2d_reduction_blockwise.hpp"
using namespace ck;
using srcDataType =
typename get_datatype_from_enum<static_cast<DataTypeEnum_t>(CK_PARAM_SRC_DATATYPE)>::type;
using dstDataType =
typename get_datatype_from_enum<static_cast<DataTypeEnum_t>(CK_PARAM_DST_DATATYPE)>::type;
using compType =
typename get_datatype_from_enum<static_cast<DataTypeEnum_t>(CK_PARAM_REDUCE_COMPTYPE)>::type;
constexpr index_t BlockSize = CK_PARAM_BLOCKSIZE; // tunable
constexpr index_t srcDims = CK_PARAM_IN_DIMS;
constexpr ReduceTensorOp_t op = static_cast<ReduceTensorOp_t>(CK_PARAM_REDUCE_OP);
constexpr NanPropagation_t nanPropaOpt = CK_PARAM_NAN_PROPAGATE == 0
? NanPropagation_t::NOT_PROPAGATE_NAN
: NanPropagation_t::PROPAGATE_NAN;
constexpr ReduceTensorIndices_t reduceIndicesOpt = CK_PARAM_REDUCE_INDICES == 0
? ReduceTensorIndices_t::NO_INDICES
: ReduceTensorIndices_t::FLATTENED_INDICES;
constexpr bool src2d_need_padding = static_cast<bool>(CK_PARAM_SRC2D_PADDING);
constexpr bool dst1d_need_padding = static_cast<bool>(CK_PARAM_DST1D_PADDING);
constexpr bool indexable = reduce_binary_operator<compType, op>::indexable;
constexpr bool need_indices = indexable && (reduceIndicesOpt != ReduceTensorIndices_t::NO_INDICES);
constexpr index_t GredAccessesPerThreadInBlock = CK_PARAM_ACCESSES_PER_THREAD_INBLOCK; // tunable
// helper functions using variadic template arguments
template <index_t... Ns>
__device__ static auto make_tuple_from_array_and_index_seq(const int* lengths, Sequence<Ns...>)
{
return make_tuple(static_cast<index_t>(lengths[Ns])...);
};
template <index_t arraySize>
__device__ static auto make_tuple_from_array(const int* lengths, Number<arraySize>)
{
static_assert(arraySize >= 1 && arraySize <= 6, "The tensor should have 1 to 6 dimensions");
constexpr auto index_seq = typename arithmetic_sequence_gen<0, arraySize, 1>::type{};
return make_tuple_from_array_and_index_seq(lengths, index_seq);
};
template <index_t... Ns>
__device__ static constexpr auto make_tuple_from_seq(Sequence<Ns...>)
{
return make_tuple(Ns...);
};
extern "C" __global__ void gridwise_generic_reduce_1_prepare(int GridSize,
int BlkGroupSize,
int inLength0,
int inLength1,
int inLength2,
int inLength3,
int inLength4,
int inLength5,
int inStride0,
int inStride1,
int inStride2,
int inStride3,
int inStride4,
int inStride5,
void* __restrict__ ws_global)
{
(void)GridSize;
(void)BlkGroupSize;
void* p_src2dDesc = ws_global;
void* p_dst1dDesc = static_cast<char*>(ws_global) + 2048;
const int srcLengths[6] = {inLength0, inLength1, inLength2, inLength3, inLength4, inLength5};
const int srcStrides[6] = {inStride0, inStride1, inStride2, inStride3, inStride4, inStride5};
const auto tupleSrcLengths = make_tuple_from_array(srcLengths, Number<srcDims>{});
const auto tupleSrcStrides = make_tuple_from_array(srcStrides, Number<srcDims>{});
const auto tupleDstLengths = make_tuple(1);
const auto tupleDstStrides = make_tuple(1);
const auto srcDesc = make_naive_tensor_descriptor(tupleSrcLengths, tupleSrcStrides);
auto dstDesc = make_naive_tensor_descriptor(tupleDstLengths, tupleDstStrides);
const auto one_dim_srcDesc = transform_tensor_descriptor(
srcDesc,
make_tuple(make_merge_transform(tupleSrcLengths)),
make_tuple(typename arithmetic_sequence_gen<0, srcDims, 1>::type{}),
make_tuple(Sequence<0>{}));
auto src2dDesc = transform_tensor_descriptor(
one_dim_srcDesc,
make_tuple(make_unmerge_transform(make_tuple(1, one_dim_srcDesc.GetLength(Number<0>{})))),
make_tuple(Sequence<0>{}),
make_tuple(Sequence<0, 1>{}));
constexpr int invariantLen = 1;
const auto toReduceLen = src2dDesc.GetLength(Number<1>{});
constexpr auto copySliceLen = BlockSize * GredAccessesPerThreadInBlock;
if constexpr(src2d_need_padding)
{
const auto srcPad =
((toReduceLen + copySliceLen - 1) / copySliceLen) * copySliceLen - toReduceLen;
auto src2dDesc_2 =
transform_tensor_descriptor(src2dDesc,
make_tuple(make_pass_through_transform(invariantLen),
make_pad_transform(toReduceLen, 0, srcPad)),
make_tuple(Sequence<0>{}, Sequence<1>{}),
make_tuple(Sequence<0>{}, Sequence<1>{}));
if(get_thread_local_1d_id() == 0)
*static_cast<decltype(src2dDesc_2)*>(p_src2dDesc) = src2dDesc_2;
}
else
{
if(get_thread_local_1d_id() == 0)
*static_cast<decltype(src2dDesc)*>(p_src2dDesc) = src2dDesc;
}
if(get_thread_local_1d_id() == 0)
*static_cast<decltype(dstDesc)*>(p_dst1dDesc) = dstDesc;
};
template <index_t srcDims>
struct get_ref_desc_types
{
static constexpr auto ref_srcLengths = typename uniform_sequence_gen<srcDims, 8>::type{};
// don't have to use accurate strides to get an expected referrence type
static constexpr auto ref_srcDesc = make_naive_tensor_descriptor(
make_tuple_from_seq(ref_srcLengths), make_tuple_from_seq(ref_srcLengths));
static constexpr auto ref_dstDesc = make_naive_tensor_descriptor(make_tuple(1), make_tuple(1));
static constexpr auto ref_one_dim_srcDesc = transform_tensor_descriptor(
ref_srcDesc,
make_tuple(make_merge_transform(make_tuple_from_seq(ref_srcLengths))),
make_tuple(typename arithmetic_sequence_gen<0, srcDims, 1>::type{}),
make_tuple(Sequence<0>{}));
static constexpr auto ref_src2dDesc =
transform_tensor_descriptor(ref_one_dim_srcDesc,
make_tuple(make_unmerge_transform(
make_tuple(1, ref_one_dim_srcDesc.GetLength(Number<0>{})))),
make_tuple(Sequence<0>{}),
make_tuple(Sequence<0, 1>{}));
static constexpr auto ref_invariantLen = ref_src2dDesc.GetLength(Number<0>{});
static constexpr auto ref_toReduceLen = ref_src2dDesc.GetLength(Number<1>{});
// used by the BlockWise and MultiBlock method
using refType_src2dDesc_padded_34 = decltype(
transform_tensor_descriptor(ref_src2dDesc,
make_tuple(make_pass_through_transform(ref_invariantLen),
make_pad_transform(ref_toReduceLen, 0, 2)),
make_tuple(Sequence<0>{}, Sequence<1>{}),
make_tuple(Sequence<0>{}, Sequence<1>{})));
using refType_dst1dDesc_padded =
decltype(transform_tensor_descriptor(ref_dstDesc,
make_tuple(make_pad_transform(ref_invariantLen, 0, 2)),
make_tuple(Sequence<0>{}),
make_tuple(Sequence<0>{})));
using refType_src2dDesc = decltype(ref_src2dDesc);
using refType_dst1dDesc = decltype(ref_dstDesc);
};
using refType_src2dDesc = typename get_ref_desc_types<srcDims>::refType_src2dDesc;
using refType_dst1dDesc = typename get_ref_desc_types<srcDims>::refType_dst1dDesc;
using refType_src2dDesc_padded_34 =
typename get_ref_desc_types<srcDims>::refType_src2dDesc_padded_34;
using refType_dst1dDesc_padded = typename get_ref_desc_types<srcDims>::refType_dst1dDesc_padded;
template <bool need_padding>
static __device__ auto get_reduction_src2d_descriptor(const void* p_src2dDesc)
{
if constexpr(need_padding)
return (*reinterpret_cast<const refType_src2dDesc_padded_34*>(p_src2dDesc));
else
return (*reinterpret_cast<const refType_src2dDesc*>(p_src2dDesc));
};
template <bool need_padding>
static __device__ auto get_reduction_dst1d_descriptor(const void* p_dst1dDesc)
{
if constexpr(need_padding)
return (*reinterpret_cast<const refType_dst1dDesc_padded*>(p_dst1dDesc));
else
return (*reinterpret_cast<const refType_dst1dDesc*>(p_dst1dDesc));
};
extern "C" __global__ void gridwise_generic_reduce_1(int origReduceLen,
int BlkGroupSize,
float alpha,
const void* __restrict__ p_src_global,
float beta,
void* __restrict__ p_dst_global,
const void CONSTANT* ws_global,
long ws_buf2_bytes_offset,
void* __restrict__ indices_global)
{
(void)BlkGroupSize;
(void)ws_buf2_bytes_offset;
const void* p_src2dDesc = cast_pointer_to_generic_address_space(ws_global);
const void* p_dst1dDesc = static_cast<const char*>(p_src2dDesc) + 2048;
const auto src2dDesc = get_reduction_src2d_descriptor<src2d_need_padding>(p_src2dDesc);
const auto dst1dDesc = get_reduction_dst1d_descriptor<dst1d_need_padding>(p_dst1dDesc);
using gridwise_2d_reduce = GridwiseReduction_xy_to_x_blockwise<BlockSize,
srcDataType,
dstDataType,
compType,
decltype(src2dDesc),
decltype(dst1dDesc),
op,
nanPropaOpt,
reduceIndicesOpt,
true,
true,
GredAccessesPerThreadInBlock>;
constexpr int RunId = need_indices ? 2 : 1;
gridwise_2d_reduce::template Run<RunId>(
src2dDesc,
dst1dDesc,
origReduceLen,
alpha,
static_cast<const srcDataType* const __restrict__>(p_src_global),
beta,
static_cast<dstDataType* const __restrict__>(p_dst_global),
static_cast<const int* const __restrict__>(nullptr),
static_cast<int* const __restrict__>(indices_global));
};
/*******************************************************************************
*
* MIT License
*
* Copyright (c) 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#include "config.hpp"
#include "number.hpp"
#include "sequence.hpp"
#include "tensor_descriptor_helper.hpp"
#include "data_type_enum_helper.hpp"
#include "reduction_common.hpp"
#include "gridwise_generic_2d_reduction_blockwise.hpp"
using namespace ck;
using srcDataType =
typename get_datatype_from_enum<static_cast<DataTypeEnum_t>(CK_PARAM_SRC_DATATYPE)>::type;
using dstDataType =
typename get_datatype_from_enum<static_cast<DataTypeEnum_t>(CK_PARAM_DST_DATATYPE)>::type;
using compType =
typename get_datatype_from_enum<static_cast<DataTypeEnum_t>(CK_PARAM_REDUCE_COMPTYPE)>::type;
constexpr index_t BlockSize = CK_PARAM_BLOCKSIZE; // tunable
constexpr index_t srcDims = CK_PARAM_IN_DIMS;
constexpr index_t dstDims = CK_PARAM_OUT_DIMS;
constexpr index_t num_toReduceDims = CK_PARAM_NUM_TOREDUCE_DIMS;
constexpr index_t num_invariantDims = srcDims - num_toReduceDims;
using invariantDims = typename arithmetic_sequence_gen<0, num_invariantDims, 1>::type;
using toReduceDims = typename arithmetic_sequence_gen<num_invariantDims, srcDims, 1>::type;
constexpr ReduceTensorOp_t op = static_cast<ReduceTensorOp_t>(CK_PARAM_REDUCE_OP);
constexpr NanPropagation_t nanPropaOpt = CK_PARAM_NAN_PROPAGATE == 0
? NanPropagation_t::NOT_PROPAGATE_NAN
: NanPropagation_t::PROPAGATE_NAN;
constexpr ReduceTensorIndices_t reduceIndicesOpt = CK_PARAM_REDUCE_INDICES == 0
? ReduceTensorIndices_t::NO_INDICES
: ReduceTensorIndices_t::FLATTENED_INDICES;
constexpr bool src2d_need_padding = static_cast<bool>(CK_PARAM_SRC2D_PADDING);
constexpr bool dst1d_need_padding = static_cast<bool>(CK_PARAM_DST1D_PADDING);
static_assert(num_invariantDims > 0, "Not all dimensins are reduced for this kernel !!");
constexpr bool indexable = reduce_binary_operator<compType, op>::indexable;
constexpr bool need_indices = indexable && (reduceIndicesOpt != ReduceTensorIndices_t::NO_INDICES);
constexpr index_t GredAccessesPerThreadInBlock = CK_PARAM_ACCESSES_PER_THREAD_INBLOCK; // tunable
// helper functions using variadic template arguments
template <index_t... Ns>
__device__ static auto make_tuple_from_array_and_index_seq(const int* lengths, Sequence<Ns...>)
{
return make_tuple(static_cast<index_t>(lengths[Ns])...);
};
template <index_t arraySize>
__device__ static auto make_tuple_from_array(const int* lengths, Number<arraySize>)
{
static_assert(arraySize >= 1 && arraySize <= 6, "The tensor should have 1 to 6 dimensions");
constexpr auto index_seq = typename arithmetic_sequence_gen<0, arraySize, 1>::type{};
return make_tuple_from_array_and_index_seq(lengths, index_seq);
};
template <index_t... Ns>
__device__ static constexpr auto make_tuple_from_seq(Sequence<Ns...>)
{
return make_tuple(Ns...);
};
extern "C" __global__ void gridwise_generic_reduce_1_prepare(int GridSize,
int BlkGroupSize,
int inLength0,
int inLength1,
int inLength2,
int inLength3,
int inLength4,
int inLength5,
int inStride0,
int inStride1,
int inStride2,
int inStride3,
int inStride4,
int inStride5,
int outStride0,
int outStride1,
int outStride2,
int outStride3,
int outStride4,
int outStride5,
void* __restrict__ ws_global)
{
(void)GridSize;
(void)BlkGroupSize;
void* p_src2dDesc = ws_global;
void* p_dst1dDesc = static_cast<char*>(ws_global) + 2048;
const int srcLengths[6] = {inLength0, inLength1, inLength2, inLength3, inLength4, inLength5};
const int srcStrides[6] = {inStride0, inStride1, inStride2, inStride3, inStride4, inStride5};
const int dstStrides[6] = {
outStride0, outStride1, outStride2, outStride3, outStride4, outStride5};
const auto tupleSrcLengths = make_tuple_from_array(srcLengths, Number<srcDims>{});
const auto tupleSrcStrides = make_tuple_from_array(srcStrides, Number<srcDims>{});
const auto tupleDstLengths = make_tuple_from_array(srcLengths, Number<dstDims>{});
const auto tupleDstStrides = make_tuple_from_array(dstStrides, Number<dstDims>{});
const auto srcDesc = make_naive_tensor_descriptor(tupleSrcLengths, tupleSrcStrides);
const auto dstDesc = make_naive_tensor_descriptor(tupleDstLengths, tupleDstStrides);
const auto toReduceDimLengths = make_tuple_from_array_and_index_seq(srcLengths, toReduceDims{});
const auto invariantDimLengths =
make_tuple_from_array_and_index_seq(srcLengths, invariantDims{});
auto src2dDesc =
transform_tensor_descriptor(srcDesc,
make_tuple(make_merge_transform(invariantDimLengths),
make_merge_transform(toReduceDimLengths)),
make_tuple(invariantDims{}, toReduceDims{}),
make_tuple(Sequence<0>{}, Sequence<1>{}));
auto dst1dDesc = transform_tensor_descriptor(
dstDesc,
make_tuple(make_merge_transform(tupleDstLengths)),
make_tuple(typename arithmetic_sequence_gen<0, dstDims, 1>::type{}),
make_tuple(Sequence<0>{}));
const auto invariantLen = src2dDesc.GetLength(Number<0>{});
const auto toReduceLen = src2dDesc.GetLength(Number<1>{});
constexpr auto copySliceLen = BlockSize * GredAccessesPerThreadInBlock;
if constexpr(src2d_need_padding)
{
const auto srcPad =
((toReduceLen + copySliceLen - 1) / copySliceLen) * copySliceLen - toReduceLen;
auto src2dDesc_2 =
transform_tensor_descriptor(src2dDesc,
make_tuple(make_pass_through_transform(invariantLen),
make_pad_transform(toReduceLen, 0, srcPad)),
make_tuple(Sequence<0>{}, Sequence<1>{}),
make_tuple(Sequence<0>{}, Sequence<1>{}));
if(get_thread_local_1d_id() == 0)
*static_cast<decltype(src2dDesc_2)*>(p_src2dDesc) = src2dDesc_2;
}
else
{
if(get_thread_local_1d_id() == 0)
*static_cast<decltype(src2dDesc)*>(p_src2dDesc) = src2dDesc;
}
if(get_thread_local_1d_id() == 0)
*static_cast<decltype(dst1dDesc)*>(p_dst1dDesc) = dst1dDesc;
};
template <index_t srcDims, index_t dstDims, typename invariantDims, typename toReduceDims>
struct get_ref_desc_types
{
static constexpr auto ref_toReduceDimLengths =
typename uniform_sequence_gen<toReduceDims::Size(), 8>::type{};
static constexpr auto ref_invariantDimLengths =
typename uniform_sequence_gen<invariantDims::Size(), 8>::type{};
static constexpr auto ref_srcLengths = typename uniform_sequence_gen<srcDims, 8>::type{};
static constexpr auto ref_dstLengths = typename uniform_sequence_gen<dstDims, 8>::type{};
// don't have to use accurate strides to get an expected referrence type
static constexpr auto ref_srcDesc = make_naive_tensor_descriptor(
make_tuple_from_seq(ref_srcLengths), make_tuple_from_seq(ref_srcLengths));
static constexpr auto ref_dstDesc = make_naive_tensor_descriptor(
make_tuple_from_seq(ref_dstLengths), make_tuple_from_seq(ref_dstLengths));
static constexpr auto ref_src2dDesc = transform_tensor_descriptor(
ref_srcDesc,
make_tuple(make_merge_transform(make_tuple_from_seq(ref_invariantDimLengths)),
make_merge_transform(make_tuple_from_seq(ref_toReduceDimLengths))),
make_tuple(invariantDims{}, toReduceDims{}),
make_tuple(Sequence<0>{}, Sequence<1>{}));
static constexpr auto ref_dst1dDesc = transform_tensor_descriptor(
ref_dstDesc,
make_tuple(make_merge_transform(make_tuple_from_seq(ref_dstLengths))),
make_tuple(typename arithmetic_sequence_gen<0, dstDims, 1>::type{}),
make_tuple(Sequence<0>{}));
static constexpr auto ref_invariantLen = ref_src2dDesc.GetLength(Number<0>{});
static constexpr auto ref_toReduceLen = ref_src2dDesc.GetLength(Number<1>{});
// used by the BlockWise and MultiBlock method
using refType_src2dDesc_padded_34 = decltype(
transform_tensor_descriptor(ref_src2dDesc,
make_tuple(make_pass_through_transform(ref_invariantLen),
make_pad_transform(ref_toReduceLen, 0, 2)),
make_tuple(Sequence<0>{}, Sequence<1>{}),
make_tuple(Sequence<0>{}, Sequence<1>{})));
using refType_dst1dDesc_padded =
decltype(transform_tensor_descriptor(ref_dst1dDesc,
make_tuple(make_pad_transform(ref_invariantLen, 0, 2)),
make_tuple(Sequence<0>{}),
make_tuple(Sequence<0>{})));
using refType_src2dDesc = decltype(ref_src2dDesc);
using refType_dst1dDesc = decltype(ref_dst1dDesc);
};
using refType_src2dDesc =
typename get_ref_desc_types<srcDims, dstDims, invariantDims, toReduceDims>::refType_src2dDesc;
using refType_dst1dDesc =
typename get_ref_desc_types<srcDims, dstDims, invariantDims, toReduceDims>::refType_dst1dDesc;
using refType_src2dDesc_padded_34 =
typename get_ref_desc_types<srcDims, dstDims, invariantDims, toReduceDims>::
refType_src2dDesc_padded_34;
using refType_dst1dDesc_padded =
typename get_ref_desc_types<srcDims, dstDims, invariantDims, toReduceDims>::
refType_dst1dDesc_padded;
template <bool need_padding>
static __device__ auto get_reduction_src2d_descriptor(const void* p_src2dDesc)
{
if constexpr(need_padding)
return (*reinterpret_cast<const refType_src2dDesc_padded_34*>(p_src2dDesc));
else
return (*reinterpret_cast<const refType_src2dDesc*>(p_src2dDesc));
};
template <bool need_padding>
static __device__ auto get_reduction_dst1d_descriptor(const void* p_dst1dDesc)
{
if constexpr(need_padding)
return (*reinterpret_cast<const refType_dst1dDesc_padded*>(p_dst1dDesc));
else
return (*reinterpret_cast<const refType_dst1dDesc*>(p_dst1dDesc));
};
extern "C" __global__ void gridwise_generic_reduce_1(int origReduceLen,
int BlkGroupSize,
float alpha,
const void* __restrict__ p_src_global,
float beta,
void* __restrict__ p_dst_global,
const void CONSTANT* ws_global,
long ws_buf2_bytes_offset,
void* __restrict__ indices_global)
{
(void)BlkGroupSize;
(void)ws_buf2_bytes_offset;
const void* p_src2dDesc = cast_pointer_to_generic_address_space(ws_global);
const void* p_dst1dDesc = static_cast<const char*>(p_src2dDesc) + 2048;
const auto src2dDesc = get_reduction_src2d_descriptor<src2d_need_padding>(p_src2dDesc);
const auto dst1dDesc = get_reduction_dst1d_descriptor<dst1d_need_padding>(p_dst1dDesc);
using gridwise_2d_reduce = GridwiseReduction_xy_to_x_blockwise<BlockSize,
srcDataType,
dstDataType,
compType,
decltype(src2dDesc),
decltype(dst1dDesc),
op,
nanPropaOpt,
reduceIndicesOpt,
true,
true,
GredAccessesPerThreadInBlock>;
constexpr int RunId = need_indices ? 2 : 1;
gridwise_2d_reduce::template Run<RunId>(
src2dDesc,
dst1dDesc,
origReduceLen,
alpha,
static_cast<const srcDataType* const __restrict__>(p_src_global),
beta,
static_cast<dstDataType* const __restrict__>(p_dst_global),
static_cast<const int* const __restrict__>(nullptr),
static_cast<int* const __restrict__>(indices_global));
};
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment