Unverified Commit 3e8cc094 authored by rocking5566's avatar rocking5566 Committed by GitHub
Browse files

Merge branch 'develop' into gemm_layernorm_welford

parents 24af0144 7038723a
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include "ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_multiple_d_xdl_cshuffle.hpp" #include "ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_multiple_d_xdl_cshuffle.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp" #include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "ck/library/utility/algorithm.hpp"
#include "ck/library/utility/check_err.hpp" #include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/device_memory.hpp" #include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp" #include "ck/library/utility/host_tensor.hpp"
...@@ -131,7 +132,7 @@ bool run_grouped_conv_fwd(bool do_verification, ...@@ -131,7 +132,7 @@ bool run_grouped_conv_fwd(bool do_verification,
std::array<ck::index_t, NDimSpatial> input_left_pads{}; std::array<ck::index_t, NDimSpatial> input_left_pads{};
std::array<ck::index_t, NDimSpatial> input_right_pads{}; std::array<ck::index_t, NDimSpatial> input_right_pads{};
auto copy = [](auto& x, auto& y) { std::copy(x.begin(), x.end(), y.begin()); }; auto copy = [](auto& x, auto& y) { ck::ranges::copy(x, y.begin()); };
copy(in_g_n_c_wis_desc.GetLengths(), a_g_n_c_wis_lengths); copy(in_g_n_c_wis_desc.GetLengths(), a_g_n_c_wis_lengths);
copy(in_g_n_c_wis_desc.GetStrides(), a_g_n_c_wis_strides); copy(in_g_n_c_wis_desc.GetStrides(), a_g_n_c_wis_strides);
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include "ck/tensor_operation/gpu/element/binary_element_wise_operation.hpp" #include "ck/tensor_operation/gpu/element/binary_element_wise_operation.hpp"
#include "ck/tensor_operation/gpu/device/impl/device_elementwise.hpp" #include "ck/tensor_operation/gpu/device/impl/device_elementwise.hpp"
#include "ck/library/utility/algorithm.hpp"
#include "ck/library/utility/check_err.hpp" #include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/device_memory.hpp" #include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp" #include "ck/library/utility/host_tensor.hpp"
...@@ -69,7 +70,7 @@ int main() ...@@ -69,7 +70,7 @@ int main()
static_cast<int>(nhwc[2] * nhwc[3]), static_cast<int>(nhwc[2] * nhwc[3]),
static_cast<int>(nhwc[3])}; static_cast<int>(nhwc[3])};
std::copy(nchw.begin(), nchw.end(), ab_lengths.begin()); ck::ranges::copy(nchw, ab_lengths.begin());
auto broadcastPermute = DeviceElementwisePermuteInstance{}; auto broadcastPermute = DeviceElementwisePermuteInstance{};
auto argument = broadcastPermute.MakeArgumentPointer( auto argument = broadcastPermute.MakeArgumentPointer(
......
...@@ -116,6 +116,10 @@ __global__ void ...@@ -116,6 +116,10 @@ __global__ void
ignore = batch_count; ignore = batch_count;
ignore = block_2_ctile_map; ignore = block_2_ctile_map;
ignore = compute_ptr_offset_of_batch; ignore = compute_ptr_offset_of_batch;
compute_ptr_offset_of_batch.GetAPtrOffset(0);
compute_ptr_offset_of_batch.GetBPtrOffset(0);
compute_ptr_offset_of_batch.GetCPtrOffset(0);
#endif // end of if (defined(__gfx908__) || defined(__gfx90a__)) #endif // end of if (defined(__gfx908__) || defined(__gfx90a__))
} }
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include "ck/host_utility/device_prop.hpp" #include "ck/host_utility/device_prop.hpp"
#include "ck/host_utility/kernel_launch.hpp" #include "ck/host_utility/kernel_launch.hpp"
#include "ck/host_utility/io.hpp" #include "ck/host_utility/io.hpp"
#include "ck/library/utility/numeric.hpp"
namespace ck { namespace ck {
namespace tensor_operation { namespace tensor_operation {
...@@ -410,10 +411,9 @@ struct DeviceGroupedConvFwdMultipleDMultipleR_Xdl_CShuffle ...@@ -410,10 +411,9 @@ struct DeviceGroupedConvFwdMultipleDMultipleR_Xdl_CShuffle
{ {
const index_t N = r_g_n_wos_lengths[1]; const index_t N = r_g_n_wos_lengths[1];
const index_t NHoWo = N * std::accumulate(r_g_n_wos_lengths.begin() + 2, const index_t NHoWo =
r_g_n_wos_lengths.begin() + 2 + NDimSpatial, N * ck::accumulate_n<index_t>(
index_t{1}, r_g_n_wos_lengths.begin() + 2, NDimSpatial, 1, std::multiplies<>());
std::multiplies<index_t>());
const auto r_grid_desc_mraw = make_naive_tensor_descriptor_packed(make_tuple(NHoWo)); const auto r_grid_desc_mraw = make_naive_tensor_descriptor_packed(make_tuple(NHoWo));
...@@ -435,10 +435,9 @@ struct DeviceGroupedConvFwdMultipleDMultipleR_Xdl_CShuffle ...@@ -435,10 +435,9 @@ struct DeviceGroupedConvFwdMultipleDMultipleR_Xdl_CShuffle
const index_t WoStride = r_g_n_wos_strides[NDimSpatial + 2]; const index_t WoStride = r_g_n_wos_strides[NDimSpatial + 2];
const index_t NHoWo = N * std::accumulate(r_g_n_wos_lengths.begin() + 2, const index_t NHoWo =
r_g_n_wos_lengths.begin() + 2 + NDimSpatial, N * ck::accumulate_n<index_t>(
index_t{1}, r_g_n_wos_lengths.begin() + 2, NDimSpatial, 1, std::multiplies<>());
std::multiplies<index_t>());
const auto r_grid_desc_mraw = const auto r_grid_desc_mraw =
make_naive_tensor_descriptor(make_tuple(NHoWo), make_tuple(WoStride)); make_naive_tensor_descriptor(make_tuple(NHoWo), make_tuple(WoStride));
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#pragma once #pragma once
#include "ck/library/utility/numeric.hpp"
#include "ck/utility/common_header.hpp" #include "ck/utility/common_header.hpp"
#include "ck/tensor_description/tensor_descriptor.hpp" #include "ck/tensor_description/tensor_descriptor.hpp"
#include "ck/tensor_description/tensor_descriptor_helper.hpp" #include "ck/tensor_description/tensor_descriptor_helper.hpp"
...@@ -47,10 +48,9 @@ struct TransformConvFwdToGemm ...@@ -47,10 +48,9 @@ struct TransformConvFwdToGemm
if constexpr(ConvForwardSpecialization == if constexpr(ConvForwardSpecialization ==
device::ConvolutionForwardSpecialization::Filter1x1Stride1Pad0) device::ConvolutionForwardSpecialization::Filter1x1Stride1Pad0)
{ {
const index_t NWo = N * std::accumulate(c_g_n_k_wos_lengths.begin() + 3, const index_t NWo =
c_g_n_k_wos_lengths.begin() + 3 + NDimSpatial, N * ck::accumulate_n<index_t>(
index_t{1}, c_g_n_k_wos_lengths.begin() + 3, NDimSpatial, 1, std::multiplies<>());
std::multiplies<index_t>());
const auto in_gemmm_gemmk_desc = const auto in_gemmm_gemmk_desc =
make_naive_tensor_descriptor_packed(make_tuple(NWo, C)); make_naive_tensor_descriptor_packed(make_tuple(NWo, C));
...@@ -146,10 +146,9 @@ struct TransformConvFwdToGemm ...@@ -146,10 +146,9 @@ struct TransformConvFwdToGemm
if constexpr(ConvForwardSpecialization == if constexpr(ConvForwardSpecialization ==
device::ConvolutionForwardSpecialization::Filter1x1Stride1Pad0) device::ConvolutionForwardSpecialization::Filter1x1Stride1Pad0)
{ {
const index_t NHoWo = N * std::accumulate(c_g_n_k_wos_lengths.begin() + 3, const index_t NHoWo =
c_g_n_k_wos_lengths.begin() + 3 + NDimSpatial, N * ck::accumulate_n<index_t>(
index_t{1}, c_g_n_k_wos_lengths.begin() + 3, NDimSpatial, 1, std::multiplies<>());
std::multiplies<index_t>());
const auto in_gemmm_gemmk_desc = const auto in_gemmm_gemmk_desc =
make_naive_tensor_descriptor_packed(make_tuple(NHoWo, C)); make_naive_tensor_descriptor_packed(make_tuple(NHoWo, C));
...@@ -262,10 +261,8 @@ struct TransformConvFwdToGemm ...@@ -262,10 +261,8 @@ struct TransformConvFwdToGemm
device::ConvolutionForwardSpecialization::Filter1x1Stride1Pad0) device::ConvolutionForwardSpecialization::Filter1x1Stride1Pad0)
{ {
const index_t NDoHoWo = const index_t NDoHoWo =
N * std::accumulate(c_g_n_k_wos_lengths.begin() + 3, N * ck::accumulate_n<index_t>(
c_g_n_k_wos_lengths.begin() + 3 + NDimSpatial, c_g_n_k_wos_lengths.begin() + 3, NDimSpatial, 1, std::multiplies<>());
index_t{1},
std::multiplies<index_t>());
const auto in_gemmm_gemmk_desc = const auto in_gemmm_gemmk_desc =
make_naive_tensor_descriptor_packed(make_tuple(NDoHoWo, C)); make_naive_tensor_descriptor_packed(make_tuple(NDoHoWo, C));
...@@ -390,10 +387,9 @@ struct TransformConvFwdToGemm ...@@ -390,10 +387,9 @@ struct TransformConvFwdToGemm
if constexpr(ConvForwardSpecialization == if constexpr(ConvForwardSpecialization ==
device::ConvolutionForwardSpecialization::Filter1x1Stride1Pad0) device::ConvolutionForwardSpecialization::Filter1x1Stride1Pad0)
{ {
const index_t NHoWo = N * std::accumulate(c_g_n_k_wos_lengths.begin() + 3, const index_t NHoWo =
c_g_n_k_wos_lengths.begin() + 3 + NDimSpatial, N * ck::accumulate_n<index_t>(
index_t{1}, c_g_n_k_wos_lengths.begin() + 3, NDimSpatial, 1, std::multiplies<>());
std::multiplies<index_t>());
// This is different // This is different
const index_t WiStride = a_g_n_c_wis_strides[2 + NDimSpatial]; const index_t WiStride = a_g_n_c_wis_strides[2 + NDimSpatial];
...@@ -506,10 +502,9 @@ struct TransformConvFwdToGemm ...@@ -506,10 +502,9 @@ struct TransformConvFwdToGemm
if constexpr(ConvForwardSpecialization == if constexpr(ConvForwardSpecialization ==
device::ConvolutionForwardSpecialization::Filter1x1Stride1Pad0) device::ConvolutionForwardSpecialization::Filter1x1Stride1Pad0)
{ {
const index_t NHoWo = N * std::accumulate(c_g_n_k_wos_lengths.begin() + 3, const index_t NHoWo =
c_g_n_k_wos_lengths.begin() + 3 + NDimSpatial, N * ck::accumulate_n<index_t>(
index_t{1}, c_g_n_k_wos_lengths.begin() + 3, NDimSpatial, 1, std::multiplies<>());
std::multiplies<index_t>());
// This is different // This is different
const index_t WiStride = a_g_n_c_wis_strides[2 + NDimSpatial]; const index_t WiStride = a_g_n_c_wis_strides[2 + NDimSpatial];
...@@ -639,10 +634,8 @@ struct TransformConvFwdToGemm ...@@ -639,10 +634,8 @@ struct TransformConvFwdToGemm
device::ConvolutionForwardSpecialization::Filter1x1Stride1Pad0) device::ConvolutionForwardSpecialization::Filter1x1Stride1Pad0)
{ {
const index_t NDoHoWo = const index_t NDoHoWo =
N * std::accumulate(c_g_n_k_wos_lengths.begin() + 3, N * ck::accumulate_n<index_t>(
c_g_n_k_wos_lengths.begin() + 3 + NDimSpatial, c_g_n_k_wos_lengths.begin() + 3, NDimSpatial, 1, std::multiplies<>());
index_t{1},
std::multiplies<index_t>());
// This is different // This is different
const index_t WiStride = a_g_n_c_wis_strides[2 + NDimSpatial]; const index_t WiStride = a_g_n_c_wis_strides[2 + NDimSpatial];
...@@ -768,10 +761,8 @@ struct TransformConvFwdToGemm ...@@ -768,10 +761,8 @@ struct TransformConvFwdToGemm
const index_t K = b_g_k_c_xs_lengths[1]; const index_t K = b_g_k_c_xs_lengths[1];
const index_t C = b_g_k_c_xs_lengths[2]; const index_t C = b_g_k_c_xs_lengths[2];
const index_t YX = std::accumulate(b_g_k_c_xs_lengths.begin() + 3, const index_t YX = ck::accumulate_n<index_t>(
b_g_k_c_xs_lengths.begin() + 3 + NDimSpatial, b_g_k_c_xs_lengths.begin() + 3, NDimSpatial, 1, std::multiplies<>());
index_t{1},
std::multiplies<index_t>());
const auto wei_gemmn_gemmk_desc = const auto wei_gemmn_gemmk_desc =
make_naive_tensor_descriptor_packed(make_tuple(K, YX * C)); make_naive_tensor_descriptor_packed(make_tuple(K, YX * C));
...@@ -794,10 +785,8 @@ struct TransformConvFwdToGemm ...@@ -794,10 +785,8 @@ struct TransformConvFwdToGemm
const index_t K = b_g_k_c_xs_lengths[1]; const index_t K = b_g_k_c_xs_lengths[1];
const index_t C = b_g_k_c_xs_lengths[2]; const index_t C = b_g_k_c_xs_lengths[2];
const index_t YX = std::accumulate(b_g_k_c_xs_lengths.begin() + 3, const index_t YX = ck::accumulate_n<index_t>(
b_g_k_c_xs_lengths.begin() + 3 + NDimSpatial, b_g_k_c_xs_lengths.begin() + 3, NDimSpatial, 1, std::multiplies<>());
index_t{1},
std::multiplies<index_t>());
const index_t KStride = b_g_k_c_xs_strides[1]; const index_t KStride = b_g_k_c_xs_strides[1];
const index_t XStride = b_g_k_c_xs_strides[2 + NDimSpatial]; const index_t XStride = b_g_k_c_xs_strides[2 + NDimSpatial];
...@@ -827,10 +816,9 @@ struct TransformConvFwdToGemm ...@@ -827,10 +816,9 @@ struct TransformConvFwdToGemm
const index_t N = c_g_n_k_wos_lengths[1]; const index_t N = c_g_n_k_wos_lengths[1];
const index_t K = c_g_n_k_wos_lengths[2]; const index_t K = c_g_n_k_wos_lengths[2];
const index_t NHoWo = N * std::accumulate(c_g_n_k_wos_lengths.begin() + 3, const index_t NHoWo =
c_g_n_k_wos_lengths.begin() + 3 + NDimSpatial, N * ck::accumulate_n<index_t>(
index_t{1}, c_g_n_k_wos_lengths.begin() + 3, NDimSpatial, 1, std::multiplies<>());
std::multiplies<index_t>());
const auto out_gemmm_gemmn_desc = make_naive_tensor_descriptor_packed(make_tuple(NHoWo, K)); const auto out_gemmm_gemmn_desc = make_naive_tensor_descriptor_packed(make_tuple(NHoWo, K));
...@@ -855,10 +843,9 @@ struct TransformConvFwdToGemm ...@@ -855,10 +843,9 @@ struct TransformConvFwdToGemm
const auto KStride = I1; const auto KStride = I1;
const index_t WoStride = c_g_n_k_wos_strides[NDimSpatial + 2]; const index_t WoStride = c_g_n_k_wos_strides[NDimSpatial + 2];
const index_t NHoWo = N * std::accumulate(c_g_n_k_wos_lengths.begin() + 3, const index_t NHoWo =
c_g_n_k_wos_lengths.begin() + 3 + NDimSpatial, N * ck::accumulate_n<index_t>(
index_t{1}, c_g_n_k_wos_lengths.begin() + 3, NDimSpatial, 1, std::multiplies<>());
std::multiplies<index_t>());
const auto out_gemmm_gemmn_desc = const auto out_gemmm_gemmn_desc =
make_naive_tensor_descriptor(make_tuple(NHoWo, K), make_tuple(WoStride, KStride)); make_naive_tensor_descriptor(make_tuple(NHoWo, K), make_tuple(WoStride, KStride));
...@@ -878,10 +865,9 @@ struct TransformConvFwdToGemm ...@@ -878,10 +865,9 @@ struct TransformConvFwdToGemm
const index_t N = c_g_n_k_wos_lengths[1]; const index_t N = c_g_n_k_wos_lengths[1];
const index_t K = c_g_n_k_wos_lengths[2]; const index_t K = c_g_n_k_wos_lengths[2];
const index_t NHoWo = N * std::accumulate(c_g_n_k_wos_lengths.begin() + 3, const index_t NHoWo =
c_g_n_k_wos_lengths.begin() + 3 + NDimSpatial, N * ck::accumulate_n<index_t>(
index_t{1}, c_g_n_k_wos_lengths.begin() + 3, NDimSpatial, 1, std::multiplies<>());
std::multiplies<index_t>());
const auto out_gemmm_gemmn_desc = const auto out_gemmm_gemmn_desc =
make_naive_tensor_descriptor(make_tuple(NHoWo, K), make_tuple(I0, I1)); make_naive_tensor_descriptor(make_tuple(NHoWo, K), make_tuple(I0, I1));
......
...@@ -10,6 +10,8 @@ ...@@ -10,6 +10,8 @@
#include "ck/ck.hpp" #include "ck/ck.hpp"
#include "ck/library/utility/numeric.hpp"
namespace ck { namespace ck {
namespace utils { namespace utils {
namespace conv { namespace conv {
...@@ -55,10 +57,8 @@ struct ConvParam ...@@ -55,10 +57,8 @@ struct ConvParam
// sizeof(InDataType) * (G * N * C * <input spatial lengths product>) + // sizeof(InDataType) * (G * N * C * <input spatial lengths product>) +
return sizeof(InDataType) * return sizeof(InDataType) *
(G_ * N_ * C_ * (G_ * N_ * C_ *
std::accumulate(std::begin(input_spatial_lengths_), ck::accumulate_n<std::size_t>(
std::begin(input_spatial_lengths_) + num_dim_spatial_, std::begin(input_spatial_lengths_), num_dim_spatial_, 1, std::multiplies<>()));
static_cast<std::size_t>(1),
std::multiplies<std::size_t>()));
} }
template <typename WeiDataType> template <typename WeiDataType>
...@@ -67,10 +67,8 @@ struct ConvParam ...@@ -67,10 +67,8 @@ struct ConvParam
// sizeof(WeiDataType) * (G * K * C * <filter spatial lengths product>) + // sizeof(WeiDataType) * (G * K * C * <filter spatial lengths product>) +
return sizeof(WeiDataType) * return sizeof(WeiDataType) *
(G_ * K_ * C_ * (G_ * K_ * C_ *
std::accumulate(std::begin(filter_spatial_lengths_), ck::accumulate_n<std::size_t>(
std::begin(filter_spatial_lengths_) + num_dim_spatial_, std::begin(filter_spatial_lengths_), num_dim_spatial_, 1, std::multiplies<>()));
static_cast<std::size_t>(1),
std::multiplies<std::size_t>()));
} }
template <typename OutDataType> template <typename OutDataType>
......
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include <iterator>
#include <numeric>
namespace ck {
template <typename T, typename ForwardIterator, typename Size, typename BinaryOperation>
auto accumulate_n(ForwardIterator first, Size count, T init, BinaryOperation op)
-> decltype(std::accumulate(first, std::next(first, count), init, op))
{
return std::accumulate(first, std::next(first, count), init, op);
}
} // namespace ck
...@@ -72,14 +72,10 @@ std::size_t ConvParam::GetFlops() const ...@@ -72,14 +72,10 @@ std::size_t ConvParam::GetFlops() const
{ {
// 2 * G * N * K * C * <output spatial lengths product> * <filter spatial lengths product> // 2 * G * N * K * C * <output spatial lengths product> * <filter spatial lengths product>
return static_cast<std::size_t>(2) * G_ * N_ * K_ * C_ * return static_cast<std::size_t>(2) * G_ * N_ * K_ * C_ *
std::accumulate(std::begin(output_spatial_lengths_), ck::accumulate_n<std::size_t>(
std::begin(output_spatial_lengths_) + num_dim_spatial_, std::begin(output_spatial_lengths_), num_dim_spatial_, 1, std::multiplies<>()) *
static_cast<std::size_t>(1), ck::accumulate_n<std::size_t>(
std::multiplies<std::size_t>()) * std::begin(filter_spatial_lengths_), num_dim_spatial_, 1, std::multiplies<>());
std::accumulate(std::begin(filter_spatial_lengths_),
std::begin(filter_spatial_lengths_) + num_dim_spatial_,
static_cast<std::size_t>(1),
std::multiplies<std::size_t>());
} }
std::string get_conv_param_parser_helper_msg() std::string get_conv_param_parser_helper_msg()
......
...@@ -6,8 +6,9 @@ ...@@ -6,8 +6,9 @@
#include "ck/utility/reduction_enums.hpp" #include "ck/utility/reduction_enums.hpp"
#include "ck/tensor_operation/gpu/device/device_reduce.hpp" #include "ck/tensor_operation/gpu/device/device_reduce.hpp"
#include "ck/library/utility/check_err.hpp"
#include "ck/library/tensor_operation_instance/gpu/reduce/device_reduce_instance.hpp" #include "ck/library/tensor_operation_instance/gpu/reduce/device_reduce_instance.hpp"
#include "ck/library/utility/algorithm.hpp"
#include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/device_memory.hpp" #include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_reduction.hpp" #include "ck/library/utility/host_reduction.hpp"
#include "ck/library/utility/host_common_util.hpp" #include "ck/library/utility/host_common_util.hpp"
...@@ -359,10 +360,10 @@ bool profile_reduce_impl_impl(bool do_verification, ...@@ -359,10 +360,10 @@ bool profile_reduce_impl_impl(bool do_verification,
std::array<index_t, NumOutDim> arrOutLengths; std::array<index_t, NumOutDim> arrOutLengths;
std::array<index_t, NumOutDim> arrOutStrides; std::array<index_t, NumOutDim> arrOutStrides;
std::copy(inLengths.begin(), inLengths.end(), arrInLengths.begin()); ck::ranges::copy(inLengths, arrInLengths.begin());
std::copy(inStrides.begin(), inStrides.end(), arrInStrides.begin()); ck::ranges::copy(inStrides, arrInStrides.begin());
std::copy(outLengths.begin(), outLengths.end(), arrOutLengths.begin()); ck::ranges::copy(outLengths, arrOutLengths.begin());
std::copy(outStrides.begin(), outStrides.end(), arrOutStrides.begin()); ck::ranges::copy(outStrides, arrOutStrides.begin());
for(auto& reduce_ptr : reduce_ptrs) for(auto& reduce_ptr : reduce_ptrs)
{ {
...@@ -491,7 +492,7 @@ bool profile_reduce_impl(bool do_verification, ...@@ -491,7 +492,7 @@ bool profile_reduce_impl(bool do_verification,
std::array<ck::index_t, descType::NumReduceDim_> arrReduceDims; std::array<ck::index_t, descType::NumReduceDim_> arrReduceDims;
std::copy(reduceDims.begin(), reduceDims.end(), arrReduceDims.begin()); ck::ranges::copy(reduceDims, arrReduceDims.begin());
pass = pass && profile_reduce_impl_impl<InDataType, pass = pass && profile_reduce_impl_impl<InDataType,
AccDataType, AccDataType,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment