// SPDX-License-Identifier: MIT // Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. #pragma once #include #include "ck/ck.hpp" #include "ck/utility/reduction_enums.hpp" #include "ck/utility/reduction_functions_accumulate.hpp" #include "ck/tensor_operation/gpu/device/reduction_operator_mapping.hpp" #include "ck/tensor_operation/gpu/device/impl/device_pool3d_fwd_ndhwc_ndhwc.hpp" #include "ck/tensor_operation/gpu/element/element_wise_operation.hpp" #include "ck/library/utility/check_err.hpp" #include "ck/library/utility/device_memory.hpp" #include "ck/library/utility/host_tensor.hpp" #include "ck/library/utility/host_tensor_generator.hpp" #include "ck/library/utility/literals.hpp" template static void pool3d_host_verify(const Tensor& in, Tensor& out, Tensor& out_indices, const std::array& window_spatial_lengths, const std::array& window_strides, const std::array& in_left_pads, const std::array& /*in_right_pads*/) { const int32_t reduceLength = window_spatial_lengths[0] * window_spatial_lengths[1] * window_spatial_lengths[2]; using ReduceOperation = typename ck::reduce_binary_operator::opType; auto elementwise_ops = ck::reduce_unary_operator::GetElementwiseOperator(reduceLength); auto in_elementwise_op = std::get<0>(elementwise_ops); auto acc_elementwise_op = std::get<1>(elementwise_ops); if constexpr(!OutputIndex) { using Accumulation = ck::detail::AccumulateWithNanCheck; auto f_ncdhw = [&](auto n, auto c, auto do_, auto ho, auto wo) { auto accuVal = ReduceOperation::template GetIdentityValue(); for(ck::index_t z = 0; z < window_spatial_lengths[0]; ++z) { ck::index_t di = do_ * window_strides[0] + z - in_left_pads[0]; for(ck::index_t y = 0; y < window_spatial_lengths[1]; ++y) { ck::index_t hi = ho * window_strides[1] + y - in_left_pads[1]; for(ck::index_t x = 0; x < window_spatial_lengths[2]; ++x) { ck::index_t wi = wo * window_strides[2] + x - in_left_pads[2]; if(di >= 0 && di < static_cast(in.mDesc.GetLengths()[2]) && hi >= 0 && hi < static_cast(in.mDesc.GetLengths()[3]) && wi >= 0 && wi < static_cast(in.mDesc.GetLengths()[4])) { AccDataType currVal = static_cast(in(n, c, di, hi, wi)); in_elementwise_op(currVal, currVal); Accumulation::Calculate(accuVal, currVal); } } } } acc_elementwise_op(accuVal, accuVal); out(n, c, do_, ho, wo) = accuVal; }; make_ParallelTensorFunctor(f_ncdhw, out.mDesc.GetLengths()[0], out.mDesc.GetLengths()[1], out.mDesc.GetLengths()[2], out.mDesc.GetLengths()[3], out.mDesc.GetLengths()[4])(std::thread::hardware_concurrency()); } else { using Accumulation = ck::detail::AccumulateWithIndexAndNanCheck; auto f_ncdhw = [&](auto n, auto c, auto do_, auto ho, auto wo) { auto accuVal = ReduceOperation::template GetIdentityValue(); IndexDataType accuIndex = 0; for(ck::index_t z = 0; z < window_spatial_lengths[0]; ++z) { ck::index_t di = do_ * window_strides[0] + z - in_left_pads[0]; for(ck::index_t y = 0; y < window_spatial_lengths[1]; ++y) { ck::index_t hi = ho * window_strides[1] + y - in_left_pads[1]; for(ck::index_t x = 0; x < window_spatial_lengths[2]; ++x) { ck::index_t wi = wo * window_strides[2] + x - in_left_pads[2]; if(di >= 0 && di < static_cast(in.mDesc.GetLengths()[2]) && hi >= 0 && hi < static_cast(in.mDesc.GetLengths()[3]) && wi >= 0 && wi < static_cast(in.mDesc.GetLengths()[4])) { AccDataType currVal = static_cast(in(n, c, di, hi, wi)); IndexDataType currIndex = z * window_spatial_lengths[1] * window_spatial_lengths[2] + y * window_spatial_lengths[2] + x; in_elementwise_op(currVal, currVal); Accumulation::Calculate(accuVal, currVal, accuIndex, currIndex); } } } } acc_elementwise_op(accuVal, accuVal); out(n, c, do_, ho, wo) = accuVal; out_indices(n, c, do_, ho, wo) = accuIndex; }; make_ParallelTensorFunctor(f_ncdhw, out.mDesc.GetLengths()[0], out.mDesc.GetLengths()[1], out.mDesc.GetLengths()[2], out.mDesc.GetLengths()[3], out.mDesc.GetLengths()[4])(std::thread::hardware_concurrency()); }; } template bool pool3d_test(bool do_verification, bool time_kernel, ck::index_t N, ck::index_t C, ck::index_t Z, ck::index_t Y, ck::index_t X, ck::index_t Di, ck::index_t Hi, ck::index_t Wi, ck::index_t window_stride_d, ck::index_t window_stride_h, ck::index_t window_stride_w, ck::index_t in_left_pad_d, ck::index_t in_left_pad_h, ck::index_t in_left_pad_w, ck::index_t in_right_pad_d, ck::index_t in_right_pad_h, ck::index_t in_right_pad_w) { using DevicePoolFwdInstance = ck::tensor_operation::device::DevicePool3dFwd_Input_N_Di_Hi_Wi_C_Output_N_Do_Ho_Wo_C< InDataType, // InDataType OutDataType, // OutDataType AccDataType, // AccDataType ReduceOpId, OutputIndex, 64, // BlockSize 64, // ReduceMThreadClusterSize 1, // ReduceKThreadClusterSize 4, // ReduceMThreadSliceSize 1, // ReduceKThreadSliceSize 4>; // InSrcOutDstVectorSize const ck::index_t Do = (Di + in_left_pad_d + in_right_pad_d - Z) / window_stride_d + 1; const ck::index_t Ho = (Hi + in_left_pad_h + in_right_pad_h - Y) / window_stride_h + 1; const ck::index_t Wo = (Wi + in_left_pad_w + in_right_pad_w - X) / window_stride_w + 1; const std::array window_spatial_lengths{{Z, Y, X}}; const std::array window_strides{ {window_stride_d, window_stride_h, window_stride_w}}; const std::array input_left_pads{{in_left_pad_d, in_left_pad_h, in_left_pad_w}}; const std::array input_right_pads{ {in_right_pad_d, in_right_pad_h, in_right_pad_w}}; // tensor layout auto f_host_tensor_descriptor = [](std::size_t N_, std::size_t C_, std::size_t D, std::size_t H, std::size_t W, auto layout) { using namespace ck::literals; if constexpr(ck::is_same::value) { return HostTensorDescriptor({N_, C_, D, H, W}, {C_ * D * H * W, D * H * W, H * W, W, 1_uz}); } else if constexpr(ck::is_same::value) { return HostTensorDescriptor({N_, C_, D, H, W}, {D * C_ * H * W, 1_uz, C_ * H * W, W * C_, C_}); } }; Tensor in_n_c_di_hi_wi(f_host_tensor_descriptor(N, C, Di, Hi, Wi, InLayout{})); Tensor out_n_c_do_ho_wo_host( f_host_tensor_descriptor(N, C, Do, Ho, Wo, OutLayout{})); Tensor out_indices_n_c_do_ho_wo_host( f_host_tensor_descriptor(N, C, Do, Ho, Wo, OutLayout{})); Tensor out_n_c_do_ho_wo_device( f_host_tensor_descriptor(N, C, Do, Ho, Wo, OutLayout{})); Tensor out_indices_n_c_do_ho_wo_device( f_host_tensor_descriptor(N, C, Do, Ho, Wo, OutLayout{})); std::cout << "in_n_c_di_hi_wi: " << in_n_c_di_hi_wi.mDesc << std::endl; std::cout << "out_n_c_do_ho_wo: " << out_n_c_do_ho_wo_host.mDesc << std::endl; in_n_c_di_hi_wi.GenerateTensorValue(GeneratorTensor_3{-1.0, 1.0}); DeviceMem in_device_buf(sizeof(InDataType) * in_n_c_di_hi_wi.mDesc.GetElementSpaceSize()); DeviceMem out_device_buf(sizeof(OutDataType) * out_n_c_do_ho_wo_device.mDesc.GetElementSpaceSize()); DeviceMem out_indices_device_buf(sizeof(IndexDataType) * out_indices_n_c_do_ho_wo_device.mDesc.GetElementSpaceSize()); in_device_buf.ToDevice(in_n_c_di_hi_wi.mData.data()); auto pool = DevicePoolFwdInstance{}; auto invoker_ptr = pool.MakeInvokerPointer(); auto argument_ptr = pool.MakeArgumentPointer( static_cast(in_device_buf.GetDeviceBuffer()), static_cast(out_device_buf.GetDeviceBuffer()), static_cast(out_indices_device_buf.GetDeviceBuffer()), {N, C, Di, Hi, Wi}, {Z, Y, X}, {N, C, Do, Ho, Wo}, window_strides, input_left_pads, input_right_pads, {2, 3, 4}); if(!pool.IsSupportedArgument(argument_ptr.get())) { throw std::runtime_error("wrong! device_op with the specified compilation parameters does " "not support this problem"); } float ave_time = invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, time_kernel}); std::cout << "Perf: " << ave_time << std::endl; bool pass = true; if(do_verification) { pool3d_host_verify(in_n_c_di_hi_wi, out_n_c_do_ho_wo_host, out_indices_n_c_do_ho_wo_host, window_spatial_lengths, window_strides, input_left_pads, input_right_pads); out_device_buf.FromDevice(out_n_c_do_ho_wo_device.mData.data()); pass = pass && ck::utils::check_err(out_n_c_do_ho_wo_device, out_n_c_do_ho_wo_host); if constexpr(OutputIndex) { out_indices_device_buf.FromDevice(out_indices_n_c_do_ho_wo_device.mData.data()); pass = pass && ck::utils::check_err(out_indices_n_c_do_ho_wo_device, out_indices_n_c_do_ho_wo_host); }; } return (pass); };