#include #include #include #include #include #include #include "config.hpp" #include "print.hpp" #include "device.hpp" #include "host_tensor.hpp" #include "host_tensor_generator.hpp" #include "conv_common.hpp" #include "host_conv_bwd_data.hpp" #include "device_tensor.hpp" #include "device_dynamic_convolution_backward_data_implicit_gemm_v4r1_xdlops_nhwc_kyxc_nhwk.hpp" #include "device_dynamic_convolution_backward_data_implicit_gemm_v4r1r2_xdlops_nhwc_kyxc_nhwk.hpp" #define USE_DYNAMIC_MODE 1 #define USE_CONV_BWD_V4R1_XDL_NHWC 0 #define USE_CONV_BWD_V4R1R2_XDL_NHWC 1 enum ConvBackwardDataAlgo { V4R1XDLNHWC, V4R1R2XDLNHWC, }; int main(int argc, char* argv[]) { using namespace ck; constexpr auto I0 = Number<0>{}; constexpr auto I1 = Number<1>{}; constexpr auto I2 = Number<2>{}; constexpr auto I3 = Number<3>{}; constexpr auto I4 = Number<4>{}; constexpr auto I5 = Number<5>{}; constexpr auto I6 = Number<6>{}; #if USE_DYNAMIC_MODE // dynamic mode if(argc != 22) { printf("arg1 to 5: layout, algo, do_verification, init_method, do_log, nrepeat\n"); printf("rest: N, K, C, Y, X, Hi, Wi, Sy, Sx, Dy, Dx, LeftPy, LeftPx, RightPy, RightPx\n"); exit(1); } const ConvTensorLayout layout = static_cast(atoi(argv[1])); const ConvBackwardDataAlgo algo = static_cast(atoi(argv[2])); const bool do_verification = atoi(argv[3]); const int init_method = atoi(argv[4]); const bool do_log = atoi(argv[5]); const int nrepeat = atoi(argv[6]); const index_t N = atoi(argv[7]); const index_t K = atoi(argv[8]); const index_t C = atoi(argv[9]); const index_t Y = atoi(argv[10]); const index_t X = atoi(argv[11]); const index_t Hi = atoi(argv[12]); const index_t Wi = atoi(argv[13]); const index_t conv_stride_h = atoi(argv[14]); const index_t conv_stride_w = atoi(argv[15]); const index_t conv_dilation_h = atoi(argv[16]); const index_t conv_dilation_w = atoi(argv[17]); const index_t in_left_pad_h = atoi(argv[18]); const index_t in_left_pad_w = atoi(argv[19]); const index_t in_right_pad_h = atoi(argv[20]); const index_t in_right_pad_w = atoi(argv[21]); const index_t YEff = (Y - 1) * conv_dilation_h + 1; const index_t XEff = (X - 1) * conv_dilation_w + 1; const index_t Ho = (Hi + in_left_pad_h + in_right_pad_h - YEff) / conv_stride_h + 1; const index_t Wo = (Wi + in_left_pad_w + in_right_pad_w - XEff) / conv_stride_w + 1; #else // static mode if(argc < 7) { printf("arg1 to 5: layout, algo, do_verification, init_method, do_log, nrepeat\n"); exit(1); } const ConvTensorLayout layout = static_cast(atoi(argv[1])); const ConvBackwardDataAlgo algo = static_cast(atoi(argv[2])); const bool do_verification = atoi(argv[3]); const int init_method = atoi(argv[4]); const bool do_log = atoi(argv[5]); const int nrepeat = atoi(argv[6]); constexpr index_t N = 128; constexpr index_t C = 192; constexpr index_t Hi = 71; constexpr index_t Wi = 71; constexpr index_t K = 256; constexpr index_t Y = 3; constexpr index_t X = 3; const index_t conv_stride_h = 2; const index_t conv_stride_w = 2; const index_t conv_dilation_h = 1; const index_t conv_dilation_w = 1; const index_t in_left_pad_h = 1; const index_t in_left_pad_w = 1; const index_t in_right_pad_h = 1; const index_t in_right_pad_w = 1; const index_t YEff = (Y - 1) * conv_dilation_h + 1; const index_t XEff = (X - 1) * conv_dilation_w + 1; const index_t Ho = (Hi + in_left_pad_h + in_right_pad_h - YEff) / conv_stride_h + 1; const index_t Wo = (Wi + in_left_pad_w + in_right_pad_w - XEff) / conv_stride_w + 1; #endif #if 0 constexpr index_t in_vector_size = 1; using in_data_t = float; using acc_data_t = float; using out_data_t = float; #elif 1 constexpr index_t in_vector_size = 1; using in_data_t = half_t; using acc_data_t = float; using out_data_t = half_t; #endif std::vector in_lengths_host(4), wei_lengths_host(4), out_lengths_host(4); switch(layout) { case ConvTensorLayout::NCHW: // NCHW in_lengths_host[0] = static_cast(N); in_lengths_host[1] = static_cast(C); in_lengths_host[2] = static_cast(Hi); in_lengths_host[3] = static_cast(Wi); wei_lengths_host[0] = static_cast(K); wei_lengths_host[1] = static_cast(C); wei_lengths_host[2] = static_cast(Y); wei_lengths_host[3] = static_cast(X); out_lengths_host[0] = static_cast(N); out_lengths_host[1] = static_cast(K); out_lengths_host[2] = static_cast(Ho); out_lengths_host[3] = static_cast(Wo); break; case ConvTensorLayout::NHWC: // NHWC in_lengths_host[0] = static_cast(N); in_lengths_host[1] = static_cast(Hi); in_lengths_host[2] = static_cast(Wi); in_lengths_host[3] = static_cast(C); wei_lengths_host[0] = static_cast(K); wei_lengths_host[1] = static_cast(Y); wei_lengths_host[2] = static_cast(X); wei_lengths_host[3] = static_cast(C); out_lengths_host[0] = static_cast(N); out_lengths_host[1] = static_cast(Ho); out_lengths_host[2] = static_cast(Wo); out_lengths_host[3] = static_cast(K); break; default: throw std::runtime_error("wrong! not implemented"); } Tensor in_host(in_lengths_host); Tensor in_device(in_lengths_host); Tensor wei(wei_lengths_host); Tensor out(out_lengths_host); std::cout << "layout: " << layout << std::endl; ostream_HostTensorDescriptor(in_host.mDesc, std::cout << "in: "); ostream_HostTensorDescriptor(wei.mDesc, std::cout << "wei: "); ostream_HostTensorDescriptor(out.mDesc, std::cout << "out: "); print_array("InLeftPads", make_tuple(in_left_pad_h, in_left_pad_w)); print_array("InRightPads", make_tuple(in_right_pad_h, in_right_pad_w)); print_array("ConvStrides", make_tuple(conv_stride_h, conv_stride_w)); print_array("ConvDilations", make_tuple(conv_dilation_h, conv_dilation_w)); std::size_t num_thread = std::thread::hardware_concurrency(); switch(init_method) { case 0: // no initialization break; case 1: out.GenerateTensorValue(GeneratorTensor_1{}, num_thread); wei.GenerateTensorValue(GeneratorTensor_1{}, num_thread); break; case 2: out.GenerateTensorValue(GeneratorTensor_1{}, num_thread); wei.GenerateTensorValue(GeneratorTensor_2{-5, 5}, num_thread); break; case 3: out.GenerateTensorValue(GeneratorTensor_2{-5, 5}, num_thread); wei.GenerateTensorValue(GeneratorTensor_1{}, num_thread); break; case 4: out.GenerateTensorValue(GeneratorTensor_2{-5, 5}, num_thread); wei.GenerateTensorValue(GeneratorTensor_2{-5, 5}, num_thread); break; case 5: out.GenerateTensorValue(GeneratorTensor_3{0.0, 1.0}, num_thread); wei.GenerateTensorValue(GeneratorTensor_3{-0.5, 0.5}, num_thread); break; default: out.GenerateTensorValue(GeneratorTensor_2{1, 5}, num_thread); auto gen_wei = [](auto... is) { return GeneratorTensor_2{1, 5}(is...) * GeneratorTensor_Checkboard{}(is...); }; wei.GenerateTensorValue(gen_wei, num_thread); } auto f_make_for_device_nchw = [&]() { #if USE_DYNAMIC_MODE const auto in_lengths_dev = make_tuple(N, C, Hi, Wi); const auto wei_lengths_dev = make_tuple(K, C, Y, X); const auto out_lengths_dev = make_tuple(N, K, Ho, Wo); const auto conv_strides_dev = make_tuple(conv_stride_h, conv_stride_w); const auto conv_dilations_dev = make_tuple(conv_dilation_h, conv_dilation_w); const auto in_left_pads_dev = make_tuple(in_left_pad_h, in_left_pad_w); const auto in_right_pads_dev = make_tuple(in_right_pad_h, in_right_pad_w); #else const auto in_lengths_dev = make_tuple(Number{}, Number{}, Number{}, Number{}); const auto wei_lengths_dev = make_tuple(Number{}, Number{}, Number{}, Number{}); const auto out_lengths_dev = make_tuple(Number{}, Number{}, Number{}, Number{}); const auto conv_strides_dev = make_tuple(Number{}, Number{}); const auto conv_dilations_dev = make_tuple(Number{}, Number{}); const auto in_left_pads_dev = make_tuple(Number{}, Number{}); const auto in_right_pads_dev = make_tuple(Number{}, Number{}); #endif return make_tuple(in_lengths_dev, wei_lengths_dev, out_lengths_dev, conv_strides_dev, conv_dilations_dev, in_left_pads_dev, in_right_pads_dev); }; auto f_make_for_device_nhwc = [&]() { #if USE_DYNAMIC_MODE const auto in_lengths_dev = make_tuple(N, Hi, Wi, C); const auto wei_lengths_dev = make_tuple(K, Y, X, C); const auto out_lengths_dev = make_tuple(N, Ho, Wo, K); const auto conv_strides_dev = make_tuple(conv_stride_h, conv_stride_w); const auto conv_dilations_dev = make_tuple(conv_dilation_h, conv_dilation_w); const auto in_left_pads_dev = make_tuple(in_left_pad_h, in_left_pad_w); const auto in_right_pads_dev = make_tuple(in_right_pad_h, in_right_pad_w); #else const auto in_lengths_dev = make_tuple(Number{}, Number{}, Number{}, Number{}); const auto wei_lengths_dev = make_tuple(Number{}, Number{}, Number{}, Number{}); const auto out_lengths_dev = make_tuple(Number{}, Number{}, Number{}, Number{}); const auto conv_strides_dev = make_tuple(Number{}, Number{}); const auto conv_dilations_dev = make_tuple(Number{}, Number{}); const auto in_left_pads_dev = make_tuple(Number{}, Number{}); const auto in_right_pads_dev = make_tuple(Number{}, Number{}); #endif return make_tuple(in_lengths_dev, wei_lengths_dev, out_lengths_dev, conv_strides_dev, conv_dilations_dev, in_left_pads_dev, in_right_pads_dev); }; const auto nhwc_desc = f_make_for_device_nhwc(); #if USE_CONV_BWD_V4R1_XDL_NHWC if(algo == ConvBackwardDataAlgo::V4R1XDLNHWC) { if(layout != ConvTensorLayout::NHWC) { throw std::runtime_error("wrong! layout"); } const auto tmp = f_make_for_device_nhwc(); device_dynamic_convolution_backward_data_implicit_gemm_v4r1_xdlops_nhwc_kyxc_nhwk< in_data_t, acc_data_t, out_data_t>(tmp[I0], tmp[I1], tmp[I2], tmp[I3], tmp[I4], tmp[I5], tmp[I6], in_device, wei, out, nrepeat); } #endif #if USE_CONV_BWD_V4R1R2_XDL_NHWC if(algo == ConvBackwardDataAlgo::V4R1R2XDLNHWC) { if(layout != ConvTensorLayout::NHWC) { throw std::runtime_error("wrong! layout"); } const auto tmp = f_make_for_device_nhwc(); device_dynamic_convolution_backward_data_implicit_gemm_v4r1r2_xdlops_nhwc_kyxc_nhwk< in_data_t, acc_data_t, out_data_t>(tmp[I0], tmp[I1], tmp[I2], tmp[I3], tmp[I4], tmp[I5], tmp[I6], in_device, wei, out, nrepeat); } #endif if(do_verification) { host_direct_convolution_backward_data(in_host, wei, out, make_tuple(conv_stride_h, conv_stride_w), make_tuple(conv_dilation_h, conv_dilation_w), make_tuple(in_left_pad_h, in_left_pad_w), make_tuple(in_right_pad_h, in_right_pad_w), layout); check_error(in_host, in_device); if(do_log) { LogRangeAsType(std::cout << "out : ", out.mData, ",") << std::endl; LogRangeAsType(std::cout << "wei: ", wei.mData, ",") << std::endl; LogRangeAsType(std::cout << "in_host : ", in_host.mData, ",") << std::endl; LogRangeAsType(std::cout << "in_device: ", in_device.mData, ",") << std::endl; } } }