Unverified Commit 1f543bfa authored by Qianfeng's avatar Qianfeng Committed by GitHub
Browse files

Regulate reduction accumulator operations and Element-wise operations (#274)

* Remove template from Reducton operation classes and add template to their operator() and GetIdentityValue() interfaces

* Change to unary elementwise operators and the reduce_unary_operator (class for mapping) and dependent variations in all host layers

* Remove the data type template parameter from reduce_binary_operator (class for mapping) and dependent variations in host layers

* Add InMemoryDataOperatonSupportedOnDataType to check the matching between data type and InMemoryDataOperation

* Use struct-scope operator template instantiation for binary and unary element-wise operations

* Change a few more elementwise operations to use template for operator()

* Tiny correction in Normalize operator

* Add static_assert to check the data type appliability for some reduction accumulator and element-wise operatons

* Correction in some examples with regard to using ReduceAccDataType

* Use static_assert for UnaryDivide

* Update to merged codes to use Element-wise operations and Reduction Accumulator operations correctly

* Tiny fix with regard to SetWorkSpacePointer()
parent 63cdd923
...@@ -33,11 +33,11 @@ constexpr ReduceTensorOp ReduceOpId = ReduceTensorOp::NORM2; ...@@ -33,11 +33,11 @@ constexpr ReduceTensorOp ReduceOpId = ReduceTensorOp::NORM2;
constexpr bool PropagateNan = true; constexpr bool PropagateNan = true;
constexpr bool OutputIndex = false; constexpr bool OutputIndex = false;
using ReduceOperation = typename reduce_binary_operator<AccDataType, ReduceOpId>::opType; using ReduceOperation = typename reduce_binary_operator<ReduceOpId>::opType;
using InElementwiseOperation = using InElementwiseOperation =
typename reduce_unary_operator<AccDataType, ReduceOpId, true, true>::InElementwiseOperation; typename reduce_unary_operator<ReduceOpId, true, true>::InElementwiseOperation;
using AccElementwiseOperation = using AccElementwiseOperation =
typename reduce_unary_operator<AccDataType, ReduceOpId, true, true>::AccElementwiseOperation; typename reduce_unary_operator<ReduceOpId, true, true>::AccElementwiseOperation;
using DeviceReduceInstance = DeviceReduceMultiBlock<InDataType, using DeviceReduceInstance = DeviceReduceMultiBlock<InDataType,
AccDataType, AccDataType,
...@@ -247,6 +247,13 @@ int main(int argc, char* argv[]) ...@@ -247,6 +247,13 @@ int main(int argc, char* argv[])
DeviceMem out_index_dev(indicesSizeInBytes); DeviceMem out_index_dev(indicesSizeInBytes);
InElementwiseOperation in_elementwise_op;
AccElementwiseOperation acc_elementwise_op;
std::tie(in_elementwise_op, acc_elementwise_op) =
reduce_unary_operator<ReduceOpId, true, true>::GetElementwiseOperator(
static_cast<int32_t>(reduce_total_length));
if(args.do_verification) if(args.do_verification)
{ {
ReductionHost<InDataType, ReductionHost<InDataType,
...@@ -261,8 +268,13 @@ int main(int argc, char* argv[]) ...@@ -261,8 +268,13 @@ int main(int argc, char* argv[])
OutputIndex> OutputIndex>
hostReduce(in.mDesc, out_ref.mDesc, invariantDims, reduceDims); hostReduce(in.mDesc, out_ref.mDesc, invariantDims, reduceDims);
hostReduce.Run( hostReduce.Run(alpha,
alpha, in.mData.data(), beta, out_ref.mData.data(), out_indices_ref.mData.data()); in.mData.data(),
beta,
out_ref.mData.data(),
out_indices_ref.mData.data(),
in_elementwise_op,
acc_elementwise_op);
}; };
std::vector<ck::index_t> i_inLengths; std::vector<ck::index_t> i_inLengths;
...@@ -277,20 +289,19 @@ int main(int argc, char* argv[]) ...@@ -277,20 +289,19 @@ int main(int argc, char* argv[])
auto reduce = DeviceReduceInstance{}; auto reduce = DeviceReduceInstance{};
auto argument_ptr = reduce.MakeArgumentPointer( auto argument_ptr = reduce.MakeArgumentPointer(i_inLengths,
i_inLengths, i_inStrides,
i_inStrides, i_outLengths,
i_outLengths, i_outStrides,
i_outStrides, reduceDims,
reduceDims, alpha,
alpha, beta,
beta, in_dev.GetDeviceBuffer(),
in_dev.GetDeviceBuffer(), nullptr,
nullptr, out_dev.GetDeviceBuffer(),
out_dev.GetDeviceBuffer(), out_index_dev.GetDeviceBuffer(),
out_index_dev.GetDeviceBuffer(), in_elementwise_op,
InElementwiseOperation{static_cast<int32_t>(reduce_total_length)}, acc_elementwise_op);
AccElementwiseOperation{static_cast<int32_t>(reduce_total_length)});
if(!reduce.IsSupportedArgument(argument_ptr.get())) if(!reduce.IsSupportedArgument(argument_ptr.get()))
{ {
......
...@@ -31,13 +31,13 @@ constexpr ReduceTensorOp ReduceOpId = ReduceTensorOp::NORM2; ...@@ -31,13 +31,13 @@ constexpr ReduceTensorOp ReduceOpId = ReduceTensorOp::NORM2;
constexpr bool PropagateNan = true; constexpr bool PropagateNan = true;
constexpr bool OutputIndex = false; constexpr bool OutputIndex = false;
using ReduceOperation = typename reduce_binary_operator<AccDataType, ReduceOpId>::opType; using ReduceOperation = typename reduce_binary_operator<ReduceOpId>::opType;
using InElementwiseOperation = using InElementwiseOperation =
typename reduce_unary_operator<AccDataType, ReduceOpId, true, true>::InElementwiseOperation; typename reduce_unary_operator<ReduceOpId, true, true>::InElementwiseOperation;
using AccElementwiseOperation = using AccElementwiseOperation =
typename reduce_unary_operator<AccDataType, ReduceOpId, true, true>::AccElementwiseOperation; typename reduce_unary_operator<ReduceOpId, true, true>::AccElementwiseOperation;
using PassThroughOp = tensor_operation::element_wise::UnaryIdentic<AccDataType, AccDataType>; using PassThroughOp = tensor_operation::element_wise::PassThrough;
using DeviceReduceInstance_1 = DeviceReduceMultiBlock<InOutDataType, using DeviceReduceInstance_1 = DeviceReduceMultiBlock<InOutDataType,
AccDataType, AccDataType,
...@@ -184,6 +184,13 @@ int main(int argc, char* argv[]) ...@@ -184,6 +184,13 @@ int main(int argc, char* argv[])
if(beta != 0.0f) if(beta != 0.0f)
out_dev.ToDevice(out.mData.data()); out_dev.ToDevice(out.mData.data());
InElementwiseOperation in_elementwise_op;
AccElementwiseOperation acc_elementwise_op;
std::tie(in_elementwise_op, acc_elementwise_op) =
reduce_unary_operator<ReduceOpId, true, true>::GetElementwiseOperator(
static_cast<int32_t>(reduce_total_length));
if(do_verify) if(do_verify)
{ {
ReductionHost<InOutDataType, ReductionHost<InOutDataType,
...@@ -198,7 +205,13 @@ int main(int argc, char* argv[]) ...@@ -198,7 +205,13 @@ int main(int argc, char* argv[])
OutputIndex> OutputIndex>
hostReduce(in_1.mDesc, out_ref.mDesc, invariantDims, reduceDims); hostReduce(in_1.mDesc, out_ref.mDesc, invariantDims, reduceDims);
hostReduce.Run(alpha, in_1.mData.data(), beta, out_ref.mData.data(), nullptr); hostReduce.Run(alpha,
in_1.mData.data(),
beta,
out_ref.mData.data(),
nullptr,
in_elementwise_op,
acc_elementwise_op);
}; };
std::vector<ck::index_t> i_inLengths_1; std::vector<ck::index_t> i_inLengths_1;
...@@ -217,20 +230,19 @@ int main(int argc, char* argv[]) ...@@ -217,20 +230,19 @@ int main(int argc, char* argv[])
auto reduce_1 = DeviceReduceInstance_1{}; auto reduce_1 = DeviceReduceInstance_1{};
auto argument_ptr_1 = reduce_1.MakeArgumentPointer( auto argument_ptr_1 = reduce_1.MakeArgumentPointer(i_inLengths_1,
i_inLengths_1, i_inStrides_1,
i_inStrides_1, i_inLengths_2,
i_inLengths_2, i_inStrides_2,
i_inStrides_2, reduceDims_1,
reduceDims_1, 1.0f,
1.0f, 0.0f,
0.0f, in_1_dev.GetDeviceBuffer(),
in_1_dev.GetDeviceBuffer(), nullptr,
nullptr, in_2_dev.GetDeviceBuffer(),
in_2_dev.GetDeviceBuffer(), nullptr,
nullptr, in_elementwise_op,
InElementwiseOperation{static_cast<int32_t>(reduce_total_length)}, PassThroughOp{});
PassThroughOp{});
if(!reduce_1.IsSupportedArgument(argument_ptr_1.get())) if(!reduce_1.IsSupportedArgument(argument_ptr_1.get()))
{ {
...@@ -243,20 +255,19 @@ int main(int argc, char* argv[]) ...@@ -243,20 +255,19 @@ int main(int argc, char* argv[])
auto reduce_2 = DeviceReduceInstance_2{}; auto reduce_2 = DeviceReduceInstance_2{};
auto argument_ptr_2 = reduce_2.MakeArgumentPointer( auto argument_ptr_2 = reduce_2.MakeArgumentPointer(i_inLengths_2,
i_inLengths_2, i_inStrides_2,
i_inStrides_2, i_outLengths,
i_outLengths, i_outStrides,
i_outStrides, reduceDims_2,
reduceDims_2, alpha,
alpha, beta,
beta, in_2_dev.GetDeviceBuffer(),
in_2_dev.GetDeviceBuffer(), nullptr,
nullptr, out_dev.GetDeviceBuffer(),
out_dev.GetDeviceBuffer(), nullptr,
nullptr, PassThroughOp{},
PassThroughOp{}, acc_elementwise_op);
AccElementwiseOperation{static_cast<int32_t>(reduce_total_length)});
if(!reduce_2.IsSupportedArgument(argument_ptr_2.get())) if(!reduce_2.IsSupportedArgument(argument_ptr_2.get()))
{ {
......
...@@ -31,16 +31,15 @@ static void pool_host_verify(const Tensor<InDataType>& in, ...@@ -31,16 +31,15 @@ static void pool_host_verify(const Tensor<InDataType>& in,
const std::array<ck::index_t, 2>& in_left_pads, const std::array<ck::index_t, 2>& in_left_pads,
const std::array<ck::index_t, 2>& /*in_right_pads*/) const std::array<ck::index_t, 2>& /*in_right_pads*/)
{ {
const int32_t divider = window_spatial_lengths[0] * window_spatial_lengths[1]; const int32_t reduceLength = window_spatial_lengths[0] * window_spatial_lengths[1];
using ReduceOperation = typename ck::reduce_binary_operator<AccDataType, ReduceOpId>::opType; using ReduceOperation = typename ck::reduce_binary_operator<ReduceOpId>::opType;
using InElementwiseOperation = typename ck::
reduce_unary_operator<AccDataType, ReduceOpId, true, true>::InElementwiseOperation;
using AccElementwiseOperation = typename ck::
reduce_unary_operator<AccDataType, ReduceOpId, true, true>::AccElementwiseOperation;
const InElementwiseOperation in_elementwise_op(divider); auto elementwise_ops =
const AccElementwiseOperation acc_elementwise_op(divider); ck::reduce_unary_operator<ReduceOpId, true, true>::GetElementwiseOperator(reduceLength);
auto in_elementwise_op = std::get<0>(elementwise_ops);
auto acc_elementwise_op = std::get<1>(elementwise_ops);
if constexpr(!OutputIndex) if constexpr(!OutputIndex)
{ {
...@@ -48,7 +47,7 @@ static void pool_host_verify(const Tensor<InDataType>& in, ...@@ -48,7 +47,7 @@ static void pool_host_verify(const Tensor<InDataType>& in,
ck::detail::AccumulateWithNanCheck<PropagateNan, ReduceOperation, AccDataType>; ck::detail::AccumulateWithNanCheck<PropagateNan, ReduceOperation, AccDataType>;
auto f_nchw = [&](auto n, auto c, auto ho, auto wo) { auto f_nchw = [&](auto n, auto c, auto ho, auto wo) {
auto accuVal = ReduceOperation::GetIdentityValue(); auto accuVal = ReduceOperation::template GetIdentityValue<AccDataType>();
for(ck::index_t y = 0; y < window_spatial_lengths[0]; ++y) for(ck::index_t y = 0; y < window_spatial_lengths[0]; ++y)
{ {
...@@ -86,7 +85,7 @@ static void pool_host_verify(const Tensor<InDataType>& in, ...@@ -86,7 +85,7 @@ static void pool_host_verify(const Tensor<InDataType>& in,
AccDataType, AccDataType,
IndexDataType>; IndexDataType>;
auto f_nchw = [&](auto n, auto c, auto ho, auto wo) { auto f_nchw = [&](auto n, auto c, auto ho, auto wo) {
auto accuVal = ReduceOperation::GetIdentityValue(); auto accuVal = ReduceOperation::template GetIdentityValue<AccDataType>();
IndexDataType accuIndex = 0; IndexDataType accuIndex = 0;
for(ck::index_t y = 0; y < window_spatial_lengths[0]; ++y) for(ck::index_t y = 0; y < window_spatial_lengths[0]; ++y)
......
...@@ -41,9 +41,8 @@ using CLayout = ck::tensor_layout::gemm::RowMajor; ...@@ -41,9 +41,8 @@ using CLayout = ck::tensor_layout::gemm::RowMajor;
using AElementOp = ck::tensor_operation::element_wise::PassThrough; using AElementOp = ck::tensor_operation::element_wise::PassThrough;
using BElementOp = ck::tensor_operation::element_wise::PassThrough; using BElementOp = ck::tensor_operation::element_wise::PassThrough;
using CElementOp = ck::tensor_operation::element_wise::PassThrough; using CElementOp = ck::tensor_operation::element_wise::PassThrough;
using DsReduceOp = ck::Tuple<ck::reduce::Max<ReduceAccDataType>>; using DsReduceOp = ck::Tuple<ck::reduce::Max>;
using DsElementOp = ck::Tuple< using DsElementOp = ck::Tuple<ck::tensor_operation::element_wise::PassThrough>;
ck::tensor_operation::element_wise::UnaryIdentic<ReduceAccDataType, ReduceAccDataType, false>>;
using DGlobalMemOp = using DGlobalMemOp =
ck::InMemoryDataOperationEnumSequence<ck::InMemoryDataOperationEnum::AtomicMax>; ck::InMemoryDataOperationEnumSequence<ck::InMemoryDataOperationEnum::AtomicMax>;
...@@ -236,10 +235,14 @@ int main(int argc, char* argv[]) ...@@ -236,10 +235,14 @@ int main(int argc, char* argv[])
for(int m = 0; m < M; ++m) for(int m = 0; m < M; ++m)
{ {
ReduceAccDataType d_acc = d_reduce_op.GetIdentityValue(); ReduceAccDataType d_acc = d_reduce_op.GetIdentityValue<ReduceAccDataType>();
for(int n = 0; n < N; ++n) for(int n = 0; n < N; ++n)
d_reduce_op(d_acc, c_m_n_host_result(m, n)); {
ReduceAccDataType curr_val =
ck::type_convert<ReduceAccDataType>(c_m_n_host_result(m, n));
d_reduce_op(d_acc, curr_val);
};
d_m_host_result(m) = d_acc; d_m_host_result(m) = d_acc;
} }
......
...@@ -41,18 +41,15 @@ using CLayout = ck::tensor_layout::gemm::RowMajor; ...@@ -41,18 +41,15 @@ using CLayout = ck::tensor_layout::gemm::RowMajor;
using AElementOp = ck::tensor_operation::element_wise::PassThrough; using AElementOp = ck::tensor_operation::element_wise::PassThrough;
using BElementOp = ck::tensor_operation::element_wise::PassThrough; using BElementOp = ck::tensor_operation::element_wise::PassThrough;
using CElementOp = ck::tensor_operation::element_wise::PassThrough; using CElementOp = ck::tensor_operation::element_wise::PassThrough;
using D0ReduceOp = ck::reduce::Add<ReduceAccDataType>; using D0ReduceOp = ck::reduce::Add;
using D1ReduceOp = ck::reduce::Add<ReduceAccDataType>; using D1ReduceOp = ck::reduce::Add;
using DxsReduceOp = ck::Tuple<D0ReduceOp, D1ReduceOp>; using DxsReduceOp = ck::Tuple<D0ReduceOp, D1ReduceOp>;
using UnaryIdenticElementOp = using UnaryIdenticElementOp = ck::tensor_operation::element_wise::PassThrough;
ck::tensor_operation::element_wise::UnaryIdentic<ReduceAccDataType, ReduceAccDataType, false>; using UnaryDivElementOp = ck::tensor_operation::element_wise::UnaryDivide;
using UnaryDivElementOp = using UnarySquareElementOp = ck::tensor_operation::element_wise::UnarySquare;
ck::tensor_operation::element_wise::UnaryIdentic<ReduceAccDataType, ReduceAccDataType, true>; using DxsInElementOps = ck::Tuple<UnaryIdenticElementOp, UnarySquareElementOp>;
using UnarySquareElementOp = using DxsOutElementOps = ck::Tuple<UnaryDivElementOp, UnaryDivElementOp>;
ck::tensor_operation::element_wise::UnarySquare<ReduceAccDataType, ReduceAccDataType, false>;
using DxsInElementOps = ck::Tuple<UnaryIdenticElementOp, UnarySquareElementOp>;
using DxsOutElementOps = ck::Tuple<UnaryDivElementOp, UnaryDivElementOp>;
using DGlobalMemOp = using DGlobalMemOp =
ck::InMemoryDataOperationEnumSequence<ck::InMemoryDataOperationEnum::AtomicAdd, ck::InMemoryDataOperationEnumSequence<ck::InMemoryDataOperationEnum::AtomicAdd,
...@@ -261,15 +258,14 @@ int main(int argc, char* argv[]) ...@@ -261,15 +258,14 @@ int main(int argc, char* argv[])
for(int m = 0; m < M; ++m) for(int m = 0; m < M; ++m)
{ {
ReduceAccDataType d0_acc = d0_reduce_op.GetIdentityValue(); auto d0_acc = d0_reduce_op.GetIdentityValue<ReduceAccDataType>();
ReduceAccDataType d1_acc = d1_reduce_op.GetIdentityValue(); auto d1_acc = d1_reduce_op.GetIdentityValue<ReduceAccDataType>();
for(int n = 0; n < N; ++n) for(int n = 0; n < N; ++n)
{ {
ReduceAccDataType c_val = auto c_val = ck::type_convert<ReduceAccDataType>(c_m_n_host_result(m, n));
ck::type_convert<ReduceAccDataType>(c_m_n_host_result(m, n)); ReduceAccDataType d0_val;
ReduceAccDataType d0_val = 0; ReduceAccDataType d1_val;
ReduceAccDataType d1_val = 0;
dxs_in_element_op(ck::Number<0>{})(d0_val, c_val); dxs_in_element_op(ck::Number<0>{})(d0_val, c_val);
dxs_in_element_op(ck::Number<1>{})(d1_val, c_val); dxs_in_element_op(ck::Number<1>{})(d1_val, c_val);
......
...@@ -39,16 +39,14 @@ using CLayout = ck::tensor_layout::gemm::RowMajor; ...@@ -39,16 +39,14 @@ using CLayout = ck::tensor_layout::gemm::RowMajor;
using AElementOp = ck::tensor_operation::element_wise::PassThrough; using AElementOp = ck::tensor_operation::element_wise::PassThrough;
using BElementOp = ck::tensor_operation::element_wise::PassThrough; using BElementOp = ck::tensor_operation::element_wise::PassThrough;
using CElementOp = ck::tensor_operation::element_wise::PassThrough; using CElementOp = ck::tensor_operation::element_wise::PassThrough;
using D0ReduceOp = ck::reduce::Add<ReduceAccDataType>; using D0ReduceOp = ck::reduce::Add;
using D1ReduceOp = ck::reduce::Add<ReduceAccDataType>; using D1ReduceOp = ck::reduce::Add;
using DxsReduceOp = ck::Tuple<D0ReduceOp, D1ReduceOp>; using DxsReduceOp = ck::Tuple<D0ReduceOp, D1ReduceOp>;
using UnaryIdenticElementOp = using UnaryIdenticElementOp = ck::tensor_operation::element_wise::PassThrough;
ck::tensor_operation::element_wise::UnaryIdentic<ReduceAccDataType, ReduceAccDataType, false>; using UnarySquareElementOp = ck::tensor_operation::element_wise::UnarySquare;
using UnarySquareElementOp = using DxsInElementOps = ck::Tuple<UnaryIdenticElementOp, UnarySquareElementOp>;
ck::tensor_operation::element_wise::UnarySquare<ReduceAccDataType, ReduceAccDataType, false>; using DxsOutElementOps = ck::Tuple<UnaryIdenticElementOp, UnaryIdenticElementOp>;
using DxsInElementOps = ck::Tuple<UnaryIdenticElementOp, UnarySquareElementOp>;
using DxsOutElementOps = ck::Tuple<UnaryIdenticElementOp, UnaryIdenticElementOp>;
using DGlobalMemOp = using DGlobalMemOp =
ck::InMemoryDataOperationEnumSequence<ck::InMemoryDataOperationEnum::AtomicAdd, ck::InMemoryDataOperationEnumSequence<ck::InMemoryDataOperationEnum::AtomicAdd,
...@@ -259,14 +257,15 @@ int main(int argc, char* argv[]) ...@@ -259,14 +257,15 @@ int main(int argc, char* argv[])
{ {
for(int m = 0; m < M; ++m) for(int m = 0; m < M; ++m)
{ {
float d0_acc = d0_reduce_op.GetIdentityValue(); auto d0_acc = d0_reduce_op.GetIdentityValue<ReduceAccDataType>();
float d1_acc = d1_reduce_op.GetIdentityValue(); auto d1_acc = d1_reduce_op.GetIdentityValue<ReduceAccDataType>();
for(int n = 0; n < N; ++n) for(int n = 0; n < N; ++n)
{ {
float c_val = ck::type_convert<float>(c_g_m_n_host_result(batch, m, n)); auto c_val =
float d0_val = 0; ck::type_convert<ReduceAccDataType>(c_g_m_n_host_result(batch, m, n));
float d1_val = 0; ReduceAccDataType d0_val;
ReduceAccDataType d1_val;
UnaryIdenticElementOp{}(d0_val, c_val); UnaryIdenticElementOp{}(d0_val, c_val);
UnarySquareElementOp{}(d1_val, c_val); UnarySquareElementOp{}(d1_val, c_val);
......
...@@ -42,8 +42,7 @@ using ABDataType = F16; ...@@ -42,8 +42,7 @@ using ABDataType = F16;
using CDataType = F16; using CDataType = F16;
using EltwiseComputeDataType = F32; using EltwiseComputeDataType = F32;
using Add = ck::tensor_operation::binary_element_wise:: using Add = ck::tensor_operation::element_wise::Add;
Add<EltwiseComputeDataType, EltwiseComputeDataType, EltwiseComputeDataType>;
using DeviceElementwiseAddInstance = using DeviceElementwiseAddInstance =
ck::tensor_operation::device::DeviceBinaryElementwise<ABDataType, ck::tensor_operation::device::DeviceBinaryElementwise<ABDataType,
......
...@@ -17,8 +17,7 @@ using ABDataType = F16; ...@@ -17,8 +17,7 @@ using ABDataType = F16;
using CDataType = F16; using CDataType = F16;
using EltwiseComputeDataType = F32; using EltwiseComputeDataType = F32;
using Add = ck::tensor_operation::binary_element_wise:: using Add = ck::tensor_operation::element_wise::Add;
Add<EltwiseComputeDataType, EltwiseComputeDataType, EltwiseComputeDataType>;
using DeviceElementwiseAddInstance = using DeviceElementwiseAddInstance =
ck::tensor_operation::device::DeviceBinaryElementwise<ABDataType, ck::tensor_operation::device::DeviceBinaryElementwise<ABDataType,
......
...@@ -42,8 +42,7 @@ using ABDataType = F16; ...@@ -42,8 +42,7 @@ using ABDataType = F16;
using CDataType = F16; using CDataType = F16;
using EltwiseComputeDataType = F32; using EltwiseComputeDataType = F32;
using Add = ck::tensor_operation::binary_element_wise:: using Add = ck::tensor_operation::element_wise::Add;
Add<EltwiseComputeDataType, EltwiseComputeDataType, EltwiseComputeDataType>;
using DeviceElementwiseAddInstance = using DeviceElementwiseAddInstance =
ck::tensor_operation::device::DeviceBinaryElementwise<ABDataType, ck::tensor_operation::device::DeviceBinaryElementwise<ABDataType,
......
...@@ -42,8 +42,7 @@ using ABDataType = F16; ...@@ -42,8 +42,7 @@ using ABDataType = F16;
using CDataType = F16; using CDataType = F16;
using EltwiseComputeDataType = F32; using EltwiseComputeDataType = F32;
using Add = ck::tensor_operation::binary_element_wise:: using Add = ck::tensor_operation::element_wise::Add;
Add<EltwiseComputeDataType, EltwiseComputeDataType, EltwiseComputeDataType>;
using DeviceElementwiseAddInstance = using DeviceElementwiseAddInstance =
ck::tensor_operation::device::DeviceBinaryElementwise<ABDataType, ck::tensor_operation::device::DeviceBinaryElementwise<ABDataType,
......
...@@ -48,17 +48,14 @@ using AElementOp = PassThrough; ...@@ -48,17 +48,14 @@ using AElementOp = PassThrough;
using BElementOp = PassThrough; using BElementOp = PassThrough;
using CElementOp = ck::tensor_operation::element_wise::Relu; using CElementOp = ck::tensor_operation::element_wise::Relu;
using C1ElementOp = PassThrough; using C1ElementOp = PassThrough;
using ReduceSumOp = ck::reduce::Add<ReduceAccDataType>; using ReduceSumOp = ck::reduce::Add;
using DxsReduceOp = ck::Tuple<ReduceSumOp, ReduceSumOp>; using DxsReduceOp = ck::Tuple<ReduceSumOp, ReduceSumOp>;
using UnaryIdenticElementOp = using UnaryIdenticElementOp = ck::tensor_operation::element_wise::PassThrough;
ck::tensor_operation::element_wise::UnaryIdentic<ReduceAccDataType, ReduceAccDataType, false>; using UnaryDivElementOp = ck::tensor_operation::element_wise::UnaryDivide;
using UnaryDivElementOp = using UnarySquareElementOp = ck::tensor_operation::element_wise::UnarySquare;
ck::tensor_operation::element_wise::UnaryIdentic<ReduceAccDataType, ReduceAccDataType, true>; using DxsInElementOps = ck::Tuple<UnaryIdenticElementOp, UnarySquareElementOp>;
using UnarySquareElementOp = using DxsOutElementOps = ck::Tuple<UnaryDivElementOp, UnaryDivElementOp>;
ck::tensor_operation::element_wise::UnarySquare<ReduceAccDataType, ReduceAccDataType, false>;
using DxsInElementOps = ck::Tuple<UnaryIdenticElementOp, UnarySquareElementOp>;
using DxsOutElementOps = ck::Tuple<UnaryDivElementOp, UnaryDivElementOp>;
using DxsGlobalMemOp = using DxsGlobalMemOp =
ck::InMemoryDataOperationEnumSequence<ck::InMemoryDataOperationEnum::AtomicAdd, ck::InMemoryDataOperationEnumSequence<ck::InMemoryDataOperationEnum::AtomicAdd,
...@@ -181,8 +178,8 @@ void host_gemm_layernorm(Tensor<LayerNormOutDataType>& out_m_n, ...@@ -181,8 +178,8 @@ void host_gemm_layernorm(Tensor<LayerNormOutDataType>& out_m_n,
auto reduceSumOpInst = ReduceSumOp{}; auto reduceSumOpInst = ReduceSumOp{};
for(int m = 0; m < M; ++m) for(int m = 0; m < M; ++m)
{ {
AccDataType mean_acc = reduceSumOpInst.GetIdentityValue(); auto mean_acc = reduceSumOpInst.GetIdentityValue<AccDataType>();
AccDataType square_mean_acc = reduceSumOpInst.GetIdentityValue(); auto square_mean_acc = reduceSumOpInst.GetIdentityValue<AccDataType>();
for(int n = 0; n < N; ++n) for(int n = 0; n < N; ++n)
{ {
...@@ -207,7 +204,12 @@ void host_gemm_layernorm(Tensor<LayerNormOutDataType>& out_m_n, ...@@ -207,7 +204,12 @@ void host_gemm_layernorm(Tensor<LayerNormOutDataType>& out_m_n,
for(int n = 0; n < N; ++n) for(int n = 0; n < N; ++n)
{ {
AccDataType out_acc = 0; AccDataType out_acc = 0;
layerNormInst(out_acc, c_m_n(m, n), mean_m(m), meanSquare_m(m), gamma_n(n), beta_n(n)); layerNormInst(out_acc,
static_cast<AccDataType>(c_m_n(m, n)),
static_cast<AccDataType>(mean_m(m)),
static_cast<AccDataType>(meanSquare_m(m)),
static_cast<AccDataType>(gamma_n(n)),
static_cast<AccDataType>(beta_n(n)));
out_m_n(m, n) = static_cast<DDataType>(out_acc); out_m_n(m, n) = static_cast<DDataType>(out_acc);
} }
} }
......
...@@ -44,17 +44,14 @@ using CLayout = ck::tensor_layout::gemm::RowMajor; ...@@ -44,17 +44,14 @@ using CLayout = ck::tensor_layout::gemm::RowMajor;
using AElementOp = ck::tensor_operation::element_wise::PassThrough; using AElementOp = ck::tensor_operation::element_wise::PassThrough;
using BElementOp = ck::tensor_operation::element_wise::PassThrough; using BElementOp = ck::tensor_operation::element_wise::PassThrough;
using CElementOp = ck::tensor_operation::element_wise::PassThrough; using CElementOp = ck::tensor_operation::element_wise::PassThrough;
using ReduceSumOp = ck::reduce::Add<ReduceAccDataType>; using ReduceSumOp = ck::reduce::Add;
using DxsReduceOp = ck::Tuple<ReduceSumOp, ReduceSumOp>; using DxsReduceOp = ck::Tuple<ReduceSumOp, ReduceSumOp>;
using UnaryIdenticElementOp = using UnaryIdenticElementOp = ck::tensor_operation::element_wise::PassThrough;
ck::tensor_operation::element_wise::UnaryIdentic<ReduceAccDataType, ReduceAccDataType, false>; using UnaryDivElementOp = ck::tensor_operation::element_wise::UnaryDivide;
using UnaryDivElementOp = using UnarySquareElementOp = ck::tensor_operation::element_wise::UnarySquare;
ck::tensor_operation::element_wise::UnaryIdentic<ReduceAccDataType, ReduceAccDataType, true>; using DxsInElementOps = ck::Tuple<UnaryIdenticElementOp, UnarySquareElementOp>;
using UnarySquareElementOp = using DxsOutElementOps = ck::Tuple<UnaryDivElementOp, UnaryDivElementOp>;
ck::tensor_operation::element_wise::UnarySquare<ReduceAccDataType, ReduceAccDataType, false>;
using DxsInElementOps = ck::Tuple<UnaryIdenticElementOp, UnarySquareElementOp>;
using DxsOutElementOps = ck::Tuple<UnaryDivElementOp, UnaryDivElementOp>;
using DxsGlobalMemOp = using DxsGlobalMemOp =
ck::InMemoryDataOperationEnumSequence<ck::InMemoryDataOperationEnum::AtomicAdd, ck::InMemoryDataOperationEnumSequence<ck::InMemoryDataOperationEnum::AtomicAdd,
...@@ -156,13 +153,14 @@ void host_gemm_layernorm(Tensor<LayerNormOutDataType>& out_m_n, ...@@ -156,13 +153,14 @@ void host_gemm_layernorm(Tensor<LayerNormOutDataType>& out_m_n,
auto reduceSumOpInst = ReduceSumOp{}; auto reduceSumOpInst = ReduceSumOp{};
for(int m = 0; m < M; ++m) for(int m = 0; m < M; ++m)
{ {
float mean_acc = reduceSumOpInst.GetIdentityValue(); auto mean_acc = reduceSumOpInst.GetIdentityValue<ReduceAccDataType>();
float square_mean_acc = reduceSumOpInst.GetIdentityValue(); auto square_mean_acc = reduceSumOpInst.GetIdentityValue<ReduceAccDataType>();
for(int n = 0; n < N; ++n) for(int n = 0; n < N; ++n)
{ {
ReduceAccDataType c_val = ck::type_convert<ReduceAccDataType>(c_m_n(m, n)); auto c_val = ck::type_convert<ReduceAccDataType>(c_m_n(m, n));
ReduceAccDataType square_c_val = 0; auto square_c_val = reduceSumOpInst.GetIdentityValue<ReduceAccDataType>();
UnarySquareElementOp{}(square_c_val, c_val); UnarySquareElementOp{}(square_c_val, c_val);
reduceSumOpInst(mean_acc, c_val); reduceSumOpInst(mean_acc, c_val);
...@@ -182,7 +180,12 @@ void host_gemm_layernorm(Tensor<LayerNormOutDataType>& out_m_n, ...@@ -182,7 +180,12 @@ void host_gemm_layernorm(Tensor<LayerNormOutDataType>& out_m_n,
for(int n = 0; n < N; ++n) for(int n = 0; n < N; ++n)
{ {
float out_f32 = 0; float out_f32 = 0;
layerNormInst(out_f32, c_m_n(m, n), mean_m(m), meanSquare_m(m), gamma_n(n), beta_n(n)); layerNormInst(out_f32,
static_cast<float>(c_m_n(m, n)),
static_cast<float>(mean_m(m)),
static_cast<float>(meanSquare_m(m)),
static_cast<float>(gamma_n(n)),
static_cast<float>(beta_n(n)));
out_m_n(m, n) = static_cast<out_type>(out_f32); out_m_n(m, n) = static_cast<out_type>(out_f32);
} }
} }
......
...@@ -44,7 +44,7 @@ struct BaseOperator ...@@ -44,7 +44,7 @@ struct BaseOperator
virtual size_t GetWorkSpaceSize(const BaseArgument*) const { return 0; } virtual size_t GetWorkSpaceSize(const BaseArgument*) const { return 0; }
virtual void SetWorkSpacePointer(BaseArgument* p_arg, void* p_workspace) const final virtual void SetWorkSpacePointer(BaseArgument* p_arg, void* p_workspace) const
{ {
assert(p_arg); assert(p_arg);
p_arg->p_workspace_ = p_workspace; p_arg->p_workspace_ = p_workspace;
......
...@@ -557,11 +557,9 @@ struct DeviceCGemm_4Gemm_Xdl_CShuffle ...@@ -557,11 +557,9 @@ struct DeviceCGemm_4Gemm_Xdl_CShuffle
float ave_time = 0; float ave_time = 0;
using Add = using Add = ck::tensor_operation::element_wise::Add;
ck::tensor_operation::binary_element_wise::Add<CDataType, CDataType, CDataType>; using Subtract = ck::tensor_operation::element_wise::Subtract;
using Substract = ck::tensor_operation::binary_element_wise:: using GridwiseBinAdd = GridwiseBinaryElementwise_1D<CDataType,
Substract<CDataType, CDataType, CDataType>;
using GridwiseBinAdd = GridwiseBinaryElementwise_1D<CDataType,
CDataType, CDataType,
CDataType, CDataType,
CDataType, CDataType,
...@@ -573,19 +571,19 @@ struct DeviceCGemm_4Gemm_Xdl_CShuffle ...@@ -573,19 +571,19 @@ struct DeviceCGemm_4Gemm_Xdl_CShuffle
AScalarPerVector, AScalarPerVector,
BScalarPerVector, BScalarPerVector,
CScalarPerVector>; CScalarPerVector>;
using GridwiseBinSubstract = GridwiseBinaryElementwise_1D<CDataType, using GridwiseBinSubtract = GridwiseBinaryElementwise_1D<CDataType,
CDataType, CDataType,
CDataType, CDataType,
CDataType, CDataType,
CGridDesc_M, CGridDesc_M,
CGridDesc_M, CGridDesc_M,
CGridDesc_M, CGridDesc_M,
Substract, Subtract,
MPerThread, MPerThread,
AScalarPerVector, AScalarPerVector,
BScalarPerVector, BScalarPerVector,
CScalarPerVector>; CScalarPerVector>;
const auto add_kernel = kernel_binary_elementwise_1d<GridwiseBinAdd, const auto add_kernel = kernel_binary_elementwise_1d<GridwiseBinAdd,
CDataType, CDataType,
CDataType, CDataType,
CDataType, CDataType,
...@@ -593,14 +591,14 @@ struct DeviceCGemm_4Gemm_Xdl_CShuffle ...@@ -593,14 +591,14 @@ struct DeviceCGemm_4Gemm_Xdl_CShuffle
CGridDesc_M, CGridDesc_M,
CGridDesc_M, CGridDesc_M,
Add>; Add>;
const auto substract_kernel = kernel_binary_elementwise_1d<GridwiseBinSubstract, const auto subtract_kernel = kernel_binary_elementwise_1d<GridwiseBinSubtract,
CDataType, CDataType,
CDataType, CDataType,
CDataType, CDataType,
CGridDesc_M, CGridDesc_M,
CGridDesc_M, CGridDesc_M,
CGridDesc_M, CGridDesc_M,
Substract>; Subtract>;
if(GridwiseGemm::CalculateHasMainKBlockLoop(K)) if(GridwiseGemm::CalculateHasMainKBlockLoop(K))
{ {
...@@ -653,7 +651,7 @@ struct DeviceCGemm_4Gemm_Xdl_CShuffle ...@@ -653,7 +651,7 @@ struct DeviceCGemm_4Gemm_Xdl_CShuffle
// c_real = aux - aux_2 // c_real = aux - aux_2
ave_time += launch_and_time_kernel(stream_config, ave_time += launch_and_time_kernel(stream_config,
substract_kernel, subtract_kernel,
dim3(grid_size), dim3(grid_size),
dim3(BlockSize), dim3(BlockSize),
0, 0,
...@@ -663,7 +661,7 @@ struct DeviceCGemm_4Gemm_Xdl_CShuffle ...@@ -663,7 +661,7 @@ struct DeviceCGemm_4Gemm_Xdl_CShuffle
arg.c_grid_desc_m_, arg.c_grid_desc_m_,
arg.c_grid_desc_m_, arg.c_grid_desc_m_,
arg.c_grid_desc_m_, arg.c_grid_desc_m_,
Substract{}); Subtract{});
ave_time += ave_time +=
launch_and_time_kernel(stream_config, launch_and_time_kernel(stream_config,
...@@ -764,7 +762,7 @@ struct DeviceCGemm_4Gemm_Xdl_CShuffle ...@@ -764,7 +762,7 @@ struct DeviceCGemm_4Gemm_Xdl_CShuffle
// c_real = aux - aux_2 // c_real = aux - aux_2
ave_time += launch_and_time_kernel(stream_config, ave_time += launch_and_time_kernel(stream_config,
substract_kernel, subtract_kernel,
dim3(grid_size), dim3(grid_size),
dim3(BlockSize), dim3(BlockSize),
0, 0,
...@@ -774,7 +772,7 @@ struct DeviceCGemm_4Gemm_Xdl_CShuffle ...@@ -774,7 +772,7 @@ struct DeviceCGemm_4Gemm_Xdl_CShuffle
arg.c_grid_desc_m_, arg.c_grid_desc_m_,
arg.c_grid_desc_m_, arg.c_grid_desc_m_,
arg.c_grid_desc_m_, arg.c_grid_desc_m_,
Substract{}); Subtract{});
ave_time += ave_time +=
launch_and_time_kernel(stream_config, launch_and_time_kernel(stream_config,
......
...@@ -35,14 +35,13 @@ struct DevicePool2dFwd_Input_N_Hi_Wi_C_Output_N_Ho_Wo_C : public DevicePool2dFwd ...@@ -35,14 +35,13 @@ struct DevicePool2dFwd_Input_N_Hi_Wi_C_Output_N_Ho_Wo_C : public DevicePool2dFwd
using IndexDataType = int32_t; using IndexDataType = int32_t;
using ReduceOperation = typename reduce_binary_operator<AccDataType, ReduceOpId>::opType; using ReduceOperation = typename reduce_binary_operator<ReduceOpId>::opType;
using InElementwiseOperation = using InElementwiseOperation =
typename reduce_unary_operator<AccDataType, ReduceOpId, true, true>::InElementwiseOperation; typename reduce_unary_operator<ReduceOpId, true, true>::InElementwiseOperation;
using AccElementwiseOperation = using AccElementwiseOperation =
typename reduce_unary_operator<AccDataType, ReduceOpId, true, true>:: typename reduce_unary_operator<ReduceOpId, true, true>::AccElementwiseOperation;
AccElementwiseOperation;
static constexpr index_t InSrcOutDstVectorDim = static constexpr index_t InSrcOutDstVectorDim =
0; // for NHWC, the dim C is the vector Dim for both input and output in memory, which is 0; // for NHWC, the dim C is the vector Dim for both input and output in memory, which is
...@@ -178,13 +177,10 @@ struct DevicePool2dFwd_Input_N_Hi_Wi_C_Output_N_Ho_Wo_C : public DevicePool2dFwd ...@@ -178,13 +177,10 @@ struct DevicePool2dFwd_Input_N_Hi_Wi_C_Output_N_Ho_Wo_C : public DevicePool2dFwd
invariant_lowest_length_ = C; invariant_lowest_length_ = C;
reduce_lowest_length_ = window_spatial_lengths[1]; reduce_lowest_length_ = window_spatial_lengths[1];
// TODO: is this correct? int32_t reduceLength = window_spatial_lengths[0] * window_spatial_lengths[1];
if constexpr(ReduceOpId == ck::ReduceTensorOp::AVG)
{ std::tie(in_element_op_, acc_element_op_) =
ck::index_t divider = window_spatial_lengths[0] * window_spatial_lengths[1]; reduce_unary_operator<ReduceOpId, true, true>::GetElementwiseOperator(reduceLength);
in_element_op_ = InElementwiseOperation{divider};
acc_element_op_ = AccElementwiseOperation{divider};
}
} }
const InDataType* p_in_dev_; const InDataType* p_in_dev_;
......
...@@ -61,12 +61,9 @@ struct DeviceReduceMultiBlock : public DeviceReduce<InElementwiseOperation, AccE ...@@ -61,12 +61,9 @@ struct DeviceReduceMultiBlock : public DeviceReduce<InElementwiseOperation, AccE
static constexpr bool use_multiblock = static constexpr bool use_multiblock =
(OutMemoryDataOperation == InMemoryDataOperationEnum::AtomicAdd); (OutMemoryDataOperation == InMemoryDataOperationEnum::AtomicAdd);
static constexpr bool out_type_compatible_with_atomic_op = static_assert(ck::reduce::InMemoryDataOperatonSupportedOnDataType<OutMemoryDataOperation,
std::is_same<OutDataType, float>::value || std::is_same<OutDataType, double>::value; OutDataType>::value,
"The OutDataType must support the specified OutMemoryDataOperation!");
static_assert(
!use_multiblock || (use_multiblock && out_type_compatible_with_atomic_op),
"The OutDataType must support the atomic operation for using MultiBlock reduction");
static_assert(!use_multiblock || (use_multiblock && !OutputIndex), static_assert(!use_multiblock || (use_multiblock && !OutputIndex),
"MultiBlock reduction can only be used when outputing index is not required"); "MultiBlock reduction can only be used when outputing index is not required");
...@@ -349,7 +346,7 @@ struct DeviceReduceMultiBlock : public DeviceReduce<InElementwiseOperation, AccE ...@@ -349,7 +346,7 @@ struct DeviceReduceMultiBlock : public DeviceReduce<InElementwiseOperation, AccE
if constexpr(use_multiblock) if constexpr(use_multiblock)
{ {
const auto identityVal = const auto identityVal =
ck::reduce::GetIdentityValueueForInMemoryDataOperation<OutDataType>( ck::reduce::GetIdentityValueForInMemoryDataOperation<OutDataType>(
OutMemoryDataOperation); OutMemoryDataOperation);
const auto kernel_pre = const auto kernel_pre =
...@@ -492,7 +489,7 @@ struct DeviceReduceMultiBlock : public DeviceReduce<InElementwiseOperation, AccE ...@@ -492,7 +489,7 @@ struct DeviceReduceMultiBlock : public DeviceReduce<InElementwiseOperation, AccE
auto str = std::stringstream(); auto str = std::stringstream();
// clang-format off // clang-format off
str << "DeviceReduceMultiBlockAtomicAdd<" << BlockSize << ","; str << (OutMemoryDataOperation == InMemoryDataOperationEnum::Set? "DeviceReduceBlockWise<" : "DeviceReduceMultiBlock<") << BlockSize << ",";
str << "M_C" << MThreadClusterSize << "_S" << MThreadSliceSize << ","; str << "M_C" << MThreadClusterSize << "_S" << MThreadSliceSize << ",";
str << "K_C" << KThreadClusterSize << "_S" << KThreadSliceSize << ","; str << "K_C" << KThreadClusterSize << "_S" << KThreadSliceSize << ",";
str << "InSrcVectorDim_" << InSrcVectorDim << "_InSrcVectorSize_" << InSrcVectorSize << "_OutDstVectorSize_" << OutDstVectorSize << ">"; str << "InSrcVectorDim_" << InSrcVectorDim << "_InSrcVectorSize_" << InSrcVectorSize << "_OutDstVectorSize_" << OutDstVectorSize << ">";
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include "reduction_operator.hpp" #include "reduction_operator.hpp"
#include "reduction_enums.hpp" #include "reduction_enums.hpp"
#include "element_wise_operation.hpp" #include "element_wise_operation.hpp"
#include <tuple>
namespace ck { namespace ck {
...@@ -37,77 +38,69 @@ namespace ck { ...@@ -37,77 +38,69 @@ namespace ck {
// The boolean member "indexable" are also provided in reduce_binary_operactor for // The boolean member "indexable" are also provided in reduce_binary_operactor for
// easier checking by the upper-layer codes in the kernels. // easier checking by the upper-layer codes in the kernels.
template <typename T, ReduceTensorOp Op> template <ReduceTensorOp Op>
struct reduce_binary_operator; struct reduce_binary_operator;
template <typename T> template <>
struct reduce_binary_operator<T, ReduceTensorOp::ADD> struct reduce_binary_operator<ReduceTensorOp::ADD>
{ {
using opType = reduce::Add<T>; using opType = reduce::Add;
using dataType = T;
static constexpr bool indexable = false; static constexpr bool indexable = false;
}; };
template <typename T> template <>
struct reduce_binary_operator<T, ReduceTensorOp::MUL> struct reduce_binary_operator<ReduceTensorOp::MUL>
{ {
using opType = reduce::Mul<T>; using opType = reduce::Mul;
using dataType = T;
static constexpr bool indexable = false; static constexpr bool indexable = false;
}; };
template <typename T> template <>
struct reduce_binary_operator<T, ReduceTensorOp::MIN> struct reduce_binary_operator<ReduceTensorOp::MIN>
{ {
using opType = reduce::Min<T>; using opType = reduce::Min;
using dataType = T;
static constexpr bool indexable = true; static constexpr bool indexable = true;
}; };
template <typename T> template <>
struct reduce_binary_operator<T, ReduceTensorOp::MAX> struct reduce_binary_operator<ReduceTensorOp::MAX>
{ {
using opType = reduce::Max<T>; using opType = reduce::Max;
using dataType = T;
static constexpr bool indexable = true; static constexpr bool indexable = true;
}; };
template <typename T> template <>
struct reduce_binary_operator<T, ReduceTensorOp::AMAX> struct reduce_binary_operator<ReduceTensorOp::AMAX>
{ {
using opType = reduce::AMax<T>; using opType = reduce::AMax;
using dataType = T;
static constexpr bool indexable = true; static constexpr bool indexable = true;
}; };
template <typename T> template <>
struct reduce_binary_operator<T, ReduceTensorOp::AVG> struct reduce_binary_operator<ReduceTensorOp::AVG>
{ {
using opType = reduce::Add<T>; using opType = reduce::Add;
using dataType = T;
static constexpr bool indexable = false; static constexpr bool indexable = false;
}; };
template <typename T> template <>
struct reduce_binary_operator<T, ReduceTensorOp::NORM1> struct reduce_binary_operator<ReduceTensorOp::NORM1>
{ {
using opType = reduce::Add<T>; using opType = reduce::Add;
using dataType = T;
static constexpr bool indexable = false; static constexpr bool indexable = false;
}; };
template <typename T> template <>
struct reduce_binary_operator<T, ReduceTensorOp::NORM2> struct reduce_binary_operator<ReduceTensorOp::NORM2>
{ {
using opType = reduce::Add<T>; using opType = reduce::Add;
using dataType = T;
static constexpr bool indexable = false; static constexpr bool indexable = false;
}; };
...@@ -115,53 +108,101 @@ struct reduce_binary_operator<T, ReduceTensorOp::NORM2> ...@@ -115,53 +108,101 @@ struct reduce_binary_operator<T, ReduceTensorOp::NORM2>
// The templated struct reduce_unary_operator maps the enum Ids of Reduce operators to two unary // The templated struct reduce_unary_operator maps the enum Ids of Reduce operators to two unary
// functor classes. // functor classes.
// The two unary functors are called before and afer the Reduction is executed respectively // The two unary functors are called before and afer the Reduction is executed respectively
template <typename T, ReduceTensorOp Op, bool IsFirstReduce, bool IsLastReduce> template <ReduceTensorOp Op, bool IsFirstReduce, bool IsLastReduce>
struct reduce_unary_operator struct reduce_unary_operator
{ {
using InElementwiseOperation = tensor_operation::element_wise::UnaryIdentic<T, T>; using InElementwiseOperation = tensor_operation::element_wise::PassThrough;
using AccElementwiseOperation = tensor_operation::element_wise::UnaryIdentic<T, T>; using AccElementwiseOperation = tensor_operation::element_wise::PassThrough;
static std::tuple<InElementwiseOperation, AccElementwiseOperation>
GetElementwiseOperator(int32_t reduceLength)
{
(void)reduceLength;
return std::make_tuple(InElementwiseOperation{}, AccElementwiseOperation{});
};
}; };
template <typename T, bool IsFirstReduce> template <bool IsFirstReduce>
struct reduce_unary_operator<T, ReduceTensorOp::AVG, IsFirstReduce, true> struct reduce_unary_operator<ReduceTensorOp::AVG, IsFirstReduce, true>
{ {
using InElementwiseOperation = tensor_operation::element_wise::UnaryIdentic<T, T>; using InElementwiseOperation = tensor_operation::element_wise::PassThrough;
using AccElementwiseOperation = tensor_operation::element_wise::UnaryIdentic<T, T, true>; using AccElementwiseOperation = tensor_operation::element_wise::UnaryDivide;
static std::tuple<InElementwiseOperation, AccElementwiseOperation>
GetElementwiseOperator(int32_t reduceLength)
{
return std::make_tuple(InElementwiseOperation{}, AccElementwiseOperation{reduceLength});
};
}; };
template <typename T, bool IsLastReduce> template <bool IsLastReduce>
struct reduce_unary_operator<T, ReduceTensorOp::NORM1, true, IsLastReduce> struct reduce_unary_operator<ReduceTensorOp::NORM1, true, IsLastReduce>
{ {
using InElementwiseOperation = tensor_operation::element_wise::UnaryAbs<T, T>; using InElementwiseOperation = tensor_operation::element_wise::UnaryAbs;
using AccElementwiseOperation = tensor_operation::element_wise::UnaryIdentic<T, T>; using AccElementwiseOperation = tensor_operation::element_wise::PassThrough;
static std::tuple<InElementwiseOperation, AccElementwiseOperation>
GetElementwiseOperator(int32_t reduceLength)
{
(void)reduceLength;
return std::make_tuple(InElementwiseOperation{}, AccElementwiseOperation{});
};
}; };
template <typename T, bool IsLastReduce> template <bool IsLastReduce>
struct reduce_unary_operator<T, ReduceTensorOp::AMAX, true, IsLastReduce> struct reduce_unary_operator<ReduceTensorOp::AMAX, true, IsLastReduce>
{ {
using InElementwiseOperation = tensor_operation::element_wise::UnaryAbs<T, T>; using InElementwiseOperation = tensor_operation::element_wise::UnaryAbs;
using AccElementwiseOperation = tensor_operation::element_wise::UnaryIdentic<T, T>; using AccElementwiseOperation = tensor_operation::element_wise::PassThrough;
static std::tuple<InElementwiseOperation, AccElementwiseOperation>
GetElementwiseOperator(int32_t reduceLength)
{
(void)reduceLength;
return std::make_tuple(InElementwiseOperation{}, AccElementwiseOperation{});
};
}; };
template <typename T> template <>
struct reduce_unary_operator<T, ReduceTensorOp::NORM2, true, false> struct reduce_unary_operator<ReduceTensorOp::NORM2, true, false>
{ {
using InElementwiseOperation = tensor_operation::element_wise::UnarySquare<T, T>; using InElementwiseOperation = tensor_operation::element_wise::UnarySquare;
using AccElementwiseOperation = tensor_operation::element_wise::UnaryIdentic<T, T>; using AccElementwiseOperation = tensor_operation::element_wise::PassThrough;
static std::tuple<InElementwiseOperation, AccElementwiseOperation>
GetElementwiseOperator(int32_t reduceLength)
{
(void)reduceLength;
return std::make_tuple(InElementwiseOperation{}, AccElementwiseOperation{});
};
}; };
template <typename T> template <>
struct reduce_unary_operator<T, ReduceTensorOp::NORM2, true, true> struct reduce_unary_operator<ReduceTensorOp::NORM2, true, true>
{ {
using InElementwiseOperation = tensor_operation::element_wise::UnarySquare<T, T>; using InElementwiseOperation = tensor_operation::element_wise::UnarySquare;
using AccElementwiseOperation = tensor_operation::element_wise::UnarySqrt<T, T>; using AccElementwiseOperation = tensor_operation::element_wise::UnarySqrt;
static std::tuple<InElementwiseOperation, AccElementwiseOperation>
GetElementwiseOperator(int32_t reduceLength)
{
(void)reduceLength;
return std::make_tuple(InElementwiseOperation{}, AccElementwiseOperation{});
};
}; };
template <typename T> template <>
struct reduce_unary_operator<T, ReduceTensorOp::NORM2, false, true> struct reduce_unary_operator<ReduceTensorOp::NORM2, false, true>
{ {
using InElementwiseOperation = tensor_operation::element_wise::UnaryIdentic<T, T>; using InElementwiseOperation = tensor_operation::element_wise::PassThrough;
using AccElementwiseOperation = tensor_operation::element_wise::UnarySqrt<T, T>; using AccElementwiseOperation = tensor_operation::element_wise::UnarySqrt;
static std::tuple<InElementwiseOperation, AccElementwiseOperation>
GetElementwiseOperator(int32_t reduceLength)
{
(void)reduceLength;
return std::make_tuple(InElementwiseOperation{}, AccElementwiseOperation{});
};
}; };
} // end of namespace ck } // end of namespace ck
......
...@@ -28,100 +28,189 @@ ...@@ -28,100 +28,189 @@
namespace ck { namespace ck {
namespace tensor_operation { namespace tensor_operation {
namespace binary_element_wise {
template <typename Y, typename X1, typename X2> namespace element_wise {
struct Add;
template <> struct Add
struct Add<double, double, double>
{ {
template <typename T>
__host__ __device__ constexpr void operator()(T& y, const T& x0, const T& x1) const;
template <>
__host__ __device__ constexpr void __host__ __device__ constexpr void
operator()(double& dst, const double& src1, const double& src2) const operator()<float>(float& y, const float& x0, const float& x1) const
{ {
dst = src1 + src2; y = x0 + x1;
} };
};
template <> template <>
struct Add<float, float, float>
{
__host__ __device__ constexpr void __host__ __device__ constexpr void
operator()(float& dst, const float& src1, const float& src2) const operator()<double>(double& y, const double& x0, const double& x1) const
{ {
dst = src1 + src2; y = x0 + x1;
} };
};
template <> // Question: should half_t be supported ?
struct Add<half_t, half_t, half_t> template <>
{ __host__ __device__ constexpr void
operator()<half_t>(half_t& y, const half_t& x0, const half_t& x1) const
{
y = x0 + x1;
};
// Question: should bhalf_t be supported ?
template <>
__host__ __device__ constexpr void __host__ __device__ constexpr void
operator()(half_t& dst, const half_t& src1, const half_t& src2) const operator()<bhalf_t>(bhalf_t& y, const bhalf_t& x0, const bhalf_t& x1) const
{ {
dst = src1 + src2; const float x1_tmp = ck::type_convert<float>(x0);
const float x2_tmp = ck::type_convert<float>(x1);
const float y_tmp = x1_tmp + x2_tmp;
y = ck::type_convert<bhalf_t>(y_tmp);
} }
}; };
template <> struct Subtract
struct Add<bhalf_t, bhalf_t, bhalf_t>
{ {
template <typename T>
__host__ __device__ constexpr void operator()(T& y, const T& x0, const T& x1) const;
template <>
__host__ __device__ constexpr void __host__ __device__ constexpr void
operator()(bhalf_t& dst, const bhalf_t& src1, const bhalf_t& src2) const operator()<float>(float& y, const float& x0, const float& x1) const
{ {
const float x1 = ck::type_convert<float>(src1); y = x0 - x1;
const float x2 = ck::type_convert<float>(src2); };
const float y = x1 + x2;
dst = ck::type_convert<bhalf_t>(y);
}
};
template <typename Y, typename X1, typename X2> template <>
struct Substract; __host__ __device__ constexpr void
operator()<double>(double& y, const double& x0, const double& x1) const
{
y = x0 - x1;
};
template <> // Question: should half_t be supported ?
struct Substract<double, double, double> template <>
{
__host__ __device__ constexpr void __host__ __device__ constexpr void
operator()(double& dst, const double& src1, const double& src2) const operator()<half_t>(half_t& y, const half_t& x0, const half_t& x1) const
{ {
dst = src1 - src2; y = x0 - x1;
};
// Question: should bhalf_t be supported ?
template <>
__host__ __device__ constexpr void
operator()<bhalf_t>(bhalf_t& y, const bhalf_t& x0, const bhalf_t& x1) const
{
const float x1_tmp = ck::type_convert<float>(x0);
const float x2_tmp = ck::type_convert<float>(x1);
const float y_tmp = x1_tmp - x2_tmp;
y = ck::type_convert<bhalf_t>(y_tmp);
} }
}; };
template <> struct AlphaBetaAdd
struct Substract<float, float, float>
{ {
AlphaBetaAdd(float alpha, float beta) : alpha_(alpha), beta_(beta){};
template <typename T>
__host__ __device__ constexpr void operator()(T& y, const T& x0, const T& x1) const;
template <>
__host__ __device__ constexpr void __host__ __device__ constexpr void
operator()(float& dst, const float& src1, const float& src2) const operator()<float>(float& y, const float& x0, const float& x1) const
{ {
dst = src1 - src2; y = alpha_ * x0 + beta_ * x1;
} };
template <>
__host__ __device__ constexpr void
operator()<double>(double& y, const double& x0, const double& x1) const
{
y = static_cast<double>(alpha_) * x0 + static_cast<double>(beta_) * x1;
};
// Question: should half_t be supported ?
template <>
__host__ __device__ constexpr void
operator()<half_t>(half_t& y, const half_t& x0, const half_t& x1) const
{
y = static_cast<half_t>(alpha_ * static_cast<float>(x0) + beta_ * static_cast<float>(x1));
};
float alpha_;
float beta_;
}; };
template <> struct AddRelu
struct Substract<half_t, half_t, half_t>
{ {
template <typename T>
__host__ __device__ constexpr void operator()(T& y, const T& x0, const T& x1) const;
template <>
__host__ __device__ constexpr void __host__ __device__ constexpr void
operator()(half_t& dst, const half_t& src1, const half_t& src2) const operator()<float>(float& y, const float& x0, const float& x1) const
{ {
dst = src1 - src2; const float a = x0 + x1;
} y = a > 0.0f ? a : 0.0f;
};
template <>
__host__ __device__ constexpr void
operator()<double>(double& y, const double& x0, const double& x1) const
{
const double a = x0 + x1;
y = a > 0.0 ? a : 0.0;
};
// Question: should half_t be supported ?
template <>
__host__ __device__ constexpr void
operator()<half_t>(half_t& y, const half_t& x0, const half_t& x1) const
{
const half_t a = x0 + x1;
y = a > static_cast<half_t>(0.0f) ? a : static_cast<half_t>(0.0f);
};
}; };
template <> struct AddHardswish
struct Substract<bhalf_t, bhalf_t, bhalf_t>
{ {
template <typename T>
__host__ __device__ constexpr void operator()(T& y, const T& x0, const T& x1) const;
template <>
__host__ __device__ constexpr void __host__ __device__ constexpr void
operator()(bhalf_t& dst, const bhalf_t& src1, const bhalf_t& src2) const operator()<float>(float& y, const float& x0, const float& x1) const
{ {
const float x1 = ck::type_convert<float>(src1); float a = x0 + x1;
const float x2 = ck::type_convert<float>(src2); float b = a + float{3};
const float y = x1 - x2; float c = (b > 0) * (b > 6.0f ? 6.0f : b) * a * 0.166667f;
dst = ck::type_convert<bhalf_t>(y); y = c;
} };
template <>
__host__ __device__ constexpr void
operator()<double>(double& y, const double& x0, const double& x1) const
{
double a = x0 + x1;
double b = a + 3.0;
double c = (b > 0) * (b > 6.0 ? 6.0 : b) * a * 0.166667;
y = c;
};
// Question: should half_t be supported ?
template <>
__host__ __device__ constexpr void
operator()<half_t>(half_t& y, const half_t& x0, const half_t& x1) const
{
float a = x0 + x1;
float b = a + 3.0f;
float c = (b > 0) * (b > 6.0f ? 6.0f : b) * a * 0.166667f;
y = c;
};
}; };
} // namespace binary_element_wise } // namespace element_wise
} // namespace tensor_operation } // namespace tensor_operation
} // namespace ck } // namespace ck
#pragma once #pragma once
#include "data_type.hpp" #include "data_type.hpp"
#include "math_v2.hpp" #include "math_v2.hpp"
#include "unary_element_wise_operation.hpp"
#include "binary_element_wise_operation.hpp"
namespace ck { namespace ck {
namespace tensor_operation { namespace tensor_operation {
namespace element_wise { namespace element_wise {
struct PassThrough
{
__host__ __device__ void operator()(float& y, const float& x) const { y = x; }
__host__ __device__ void operator()(half_t& y, const half_t& x) const { y = x; }
__host__ __device__ void operator()(bhalf_t& y, const bhalf_t& x) const { y = x; }
__host__ __device__ void operator()(int32_t& y, const int32_t& x) const { y = x; }
__host__ __device__ void operator()(int8_t& y, const int8_t& x) const { y = x; }
__host__ __device__ void operator()(double& y, const double& x) const { y = x; }
};
struct Add
{
__host__ __device__ constexpr void operator()(float& y, const float& x0, const float& x1) const
{
y = x0 + x1;
}
__host__ __device__ constexpr void
operator()(half_t& y, const half_t& x0, const half_t& x1) const
{
// FIXME - Use float (acc type) bias in the future.
y = x0 + x1;
}
};
struct AlphaBetaAdd
{
AlphaBetaAdd(float alpha, float beta) : alpha_(alpha), beta_(beta) {}
__host__ __device__ constexpr void operator()(float& y, const float& x0, const float& x1) const
{
y = alpha_ * x0 + beta_ * x1;
}
__host__ __device__ constexpr void
operator()(half_t& y, const half_t& x0, const half_t& x1) const
{
// FIXME - Let x0 be acc type
y = static_cast<half_t>(alpha_ * static_cast<float>(x0) + beta_ * static_cast<float>(x1));
}
float alpha_;
float beta_;
};
struct AddRelu
{
__host__ __device__ constexpr void operator()(float& y, const float& x0, const float& x1) const
{
const float a = x0 + x1;
y = a > 0 ? a : 0;
}
__host__ __device__ constexpr void
operator()(half_t& y, const half_t& x0, const half_t& x1) const
{
const half_t a = x0 + x1;
y = a > 0 ? a : 0;
}
};
struct AddHardswish
{
__host__ __device__ constexpr void operator()(float& y, const float& x0, const float& x1) const
{
float a = x0 + x1;
float b = a + float{3};
float c = (b > 0) * (b > float{6} ? float{6} : b) * a * float{0.166667};
y = c;
}
__host__ __device__ constexpr void
operator()(half_t& y, const half_t& x0, const half_t& x1) const
{
float a = x0 + x1;
float b = a + float{3};
float c = (b > 0) * (b > float{6} ? float{6} : b) * a * float{0.166667};
y = c;
}
};
struct AddReluAdd struct AddReluAdd
{ {
__host__ __device__ constexpr void __host__ __device__ constexpr void
...@@ -167,204 +83,41 @@ struct Relu ...@@ -167,204 +83,41 @@ struct Relu
struct Normalize struct Normalize
{ {
Normalize(float epsilon = 1e-4) : epsilon_(epsilon) {} Normalize(double epsilon = 1e-4) : epsilon_(epsilon) {}
__host__ __device__ constexpr void operator()(float& y,
const float& x,
const float& mean,
const float& mean_square,
const float& gamma,
const float& beta) const
{
float variance = mean_square - (mean * mean);
y = ((x - mean) / sqrtf(variance + epsilon_)) * gamma + beta;
}
float epsilon_; template <typename T>
}; __host__ __device__ constexpr void operator()(
T& y, const T& x, const T& mean, const T& mean_square, const T& gamma, const T& beta) const;
// Unary operators are usually called element-wisely before/after the reduction is executed on the
// elements. They are needed for easy implementation of reduction types of AVG, NRM1, NRM2
template <typename Y, typename X, bool HasDividing = false>
struct UnaryIdentic;
template <>
struct UnaryIdentic<float, float, false>
{
__host__ __device__ UnaryIdentic(const int32_t divider = 1) { (void)divider; };
__host__ __device__ void operator()(float& y, const float& x) const { y = x; };
};
template <>
struct UnaryIdentic<float, float, true>
{
__host__ __device__ UnaryIdentic(const int32_t divider = 1) { divider_ = divider; };
__host__ __device__ void operator()(float& y, const float& x) const template <>
__host__ __device__ constexpr void operator()<float>(float& y,
const float& x,
const float& mean,
const float& mean_square,
const float& gamma,
const float& beta) const
{ {
y = x / type_convert<float>(divider_); using ck::math::sqrt;
};
int32_t divider_ = 1;
};
template <>
struct UnaryIdentic<half_t, half_t, false>
{
__host__ __device__ UnaryIdentic(const int32_t divider = 1) { (void)divider; };
__host__ __device__ void operator()(half_t& y, const half_t& x) const { y = x; };
};
template <> float variance = mean_square - (mean * mean);
struct UnaryIdentic<double, double, false> y = ((x - mean) / sqrt(variance + static_cast<float>(epsilon_))) * gamma + beta;
{
__host__ __device__ UnaryIdentic(const int32_t divider = 1) { (void)divider; };
__host__ __device__ void operator()(double& y, const double& x) const { y = x; };
};
template <>
struct UnaryIdentic<double, double, true>
{
__host__ __device__ UnaryIdentic(const int32_t divider = 1) { divider_ = divider; };
__host__ __device__ void operator()(double& y, const double& x) const
{
y = x / type_convert<double>(divider_);
}; };
int32_t divider_ = 1; template <>
}; __host__ __device__ constexpr void operator()<double>(double& y,
const double& x,
template <> const double& mean,
struct UnaryIdentic<int32_t, int32_t, false> const double& mean_square,
{ const double& gamma,
__host__ __device__ UnaryIdentic(const int32_t divider = 1) { (void)divider; }; const double& beta) const
__host__ __device__ void operator()(int32_t& y, const int32_t& x) const { y = x; };
};
template <>
struct UnaryIdentic<int32_t, int32_t, true>
{
__host__ __device__ UnaryIdentic(const int32_t divider = 1) { divider_ = divider; };
__host__ __device__ void operator()(int32_t& y, const int32_t& x) const { y = x / divider_; };
int32_t divider_ = 1;
};
template <>
struct UnaryIdentic<int8_t, int8_t, false>
{
__host__ __device__ UnaryIdentic(const int8_t divider = 1) { (void)divider; };
__host__ __device__ void operator()(int8_t& y, const int8_t& x) const { y = x; };
};
template <typename Y, typename X, bool HasDividing = false>
struct UnarySquare;
template <>
struct UnarySquare<float, float, false>
{
__host__ __device__ UnarySquare(const int32_t divider = 1) { (void)divider; };
__host__ __device__ void operator()(float& y, const float& x) const { y = x * x; };
};
template <>
struct UnarySquare<float, float, true>
{
__host__ __device__ UnarySquare(const int32_t divider = 1) { divider_ = divider; };
__host__ __device__ void operator()(float& y, const float& x) const
{ {
y = x * x / type_convert<float>(divider_); using ck::math::sqrt;
};
int32_t divider_ = 1;
};
template <>
struct UnarySquare<double, double, false>
{
__host__ __device__ UnarySquare(const int32_t divider = 1) { (void)divider; };
__host__ __device__ void operator()(double& y, const double& x) const { y = x * x; };
};
template <>
struct UnarySquare<double, double, true>
{
__host__ __device__ UnarySquare(const int32_t divider = 1) { divider_ = divider; };
__host__ __device__ void operator()(double& y, const double& x) const double variance = mean_square - (mean * mean);
{ y = ((x - mean) / sqrt(variance + epsilon_)) * gamma + beta;
y = x * x / type_convert<double>(divider_);
}; };
int32_t divider_ = 1; double epsilon_;
};
template <typename Y, typename X>
struct UnaryAbs;
template <>
struct UnaryAbs<float, float>
{
__host__ __device__ UnaryAbs(const int32_t divider = 1) { (void)divider; };
__host__ __device__ void operator()(float& y, const float& x) const { y = ck::math::abs(x); };
};
template <>
struct UnaryAbs<half_t, half_t>
{
__host__ __device__ UnaryAbs(const int32_t divider = 1) { (void)divider; };
__host__ __device__ void operator()(half_t& y, const half_t& x) const { y = ck::math::abs(x); };
};
template <>
struct UnaryAbs<double, double>
{
__host__ __device__ UnaryAbs(const int32_t divider = 1) { (void)divider; };
__host__ __device__ void operator()(double& y, const double& x) const { y = ck::math::abs(x); };
};
template <>
struct UnaryAbs<int8_t, int8_t>
{
__host__ __device__ UnaryAbs(const int32_t divider = 1) { (void)divider; };
__host__ __device__ void operator()(int8_t& y, const int8_t& x) const { y = ck::math::abs(x); };
};
template <typename Y, typename X>
struct UnarySqrt;
template <>
struct UnarySqrt<float, float>
{
__host__ __device__ UnarySqrt(const int32_t divider = 1) { (void)divider; };
__host__ __device__ void operator()(float& y, const float& x) const { y = ck::math::sqrt(x); };
};
template <>
struct UnarySqrt<double, double>
{
__host__ __device__ UnarySqrt(const int32_t divider = 1) { (void)divider; };
__host__ __device__ void operator()(double& y, const double& x) const
{
y = ck::math::sqrt(x);
};
}; };
template <typename Y, typename X> template <typename Y, typename X>
......
#pragma once
#include "data_type.hpp"
#include "math_v2.hpp"
namespace ck {
namespace tensor_operation {
namespace element_wise {
struct PassThrough
{
template <typename T>
__host__ __device__ void operator()(T& y, const T& x) const
{
static_assert(is_same<T, float>::value || is_same<T, double>::value ||
is_same<T, half_t>::value || is_same<T, bhalf_t>::value ||
is_same<T, int32_t>::value || is_same<T, int8_t>::value,
"Data type is not supported by this operation!");
y = x;
};
};
struct UnaryDivide
{
__host__ __device__ UnaryDivide(const int32_t divider = 1) : divider_(divider){};
template <typename T>
__host__ __device__ void operator()(T& y, const T& x) const
{
static_assert(is_same<T, float>::value || is_same<T, double>::value ||
is_same<T, int32_t>::value,
"Data type is not supported by this operation!");
y = x / type_convert<T>(divider_);
};
int32_t divider_ = 1;
};
struct UnarySquare
{
template <typename T>
__host__ __device__ void operator()(T& y, const T& x) const
{
static_assert(is_same<T, float>::value || is_same<T, double>::value,
"Data type is not supported by this operation!");
y = x * x;
};
};
struct UnaryAbs
{
template <typename T>
__host__ __device__ void operator()(T& y, const T& x) const
{
static_assert(is_same<T, float>::value || is_same<T, double>::value ||
is_same<T, half_t>::value || is_same<T, int32_t>::value ||
is_same<T, int8_t>::value,
"Data type is not supported by this operation!");
y = ck::math::abs(x);
};
};
struct UnarySqrt
{
template <typename T>
__host__ __device__ void operator()(T& y, const T& x) const
{
static_assert(is_same<T, float>::value || is_same<T, double>::value,
"Data type is not supported by this operation!");
y = ck::math::sqrt(x);
};
};
} // namespace element_wise
} // namespace tensor_operation
} // namespace ck
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment