profile_conv_bwd_weight_impl.hpp 10.7 KB
Newer Older
Chao Liu's avatar
Chao Liu committed
1
2
3
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.

4
#pragma once
JD's avatar
JD committed
5

6
7
8
9
10
#include "ck/ck.hpp"
#include <iomanip>
#include <iostream>
#include <typeinfo>

Chao Liu's avatar
Chao Liu committed
11
12
#include "ck/ck.hpp"
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
13
#include "ck/tensor_operation/gpu/device/device_conv_fwd.hpp"
Chao Liu's avatar
Chao Liu committed
14
15
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"

16
#include "ck/library/tensor_operation_instance/gpu/convolution_backward_weight.hpp"
17

18
19
20
21
22
23
24
#include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/utility/convolution_parameter.hpp"
#include "ck/library/utility/convolution_host_tensor_descriptor_helper.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_conv_bwd_weight.hpp"
25
26
27
28

namespace ck {
namespace profiler {

29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
template <typename DataType>
void show_data_nhwc_layout(Tensor<DataType>& nhwc)
{
    std::cout << "[";
    for(int n = 0; n < ck::type_convert<int>(nhwc.mDesc.GetLengths()[0]); n++)
    {
        std::cout << "[";
        for(int hi = 0; hi < ck::type_convert<int>(nhwc.mDesc.GetLengths()[2]); hi++)
        {
            std::cout << "[";
            for(int wi = 0; wi < ck::type_convert<int>(nhwc.mDesc.GetLengths()[3]); wi++)
            {
                std::cout << "[";
                for(int c = 0; c < ck::type_convert<int>(nhwc.mDesc.GetLengths()[1]); c++)
                {
                    std::cout << static_cast<float>(nhwc(n, c, hi, wi)) << "  ";
                }
                std::cout << "]";
            }
            std::cout << "]";
        }
        std::cout << "]";
    }
    std::cout << "]";
}

template <ck::index_t NDimSpatial,
56
57
          typename InLayout,
          typename WeiLayout,
58
59
60
61
          typename OutLayout,
          typename InDataType,
          typename WeiDataType,
          typename OutDataType>
62
63
64
bool profile_conv_bwd_weight_impl(int do_verification,
                                  int init_method,
                                  bool do_log,
JD's avatar
JD committed
65
                                  bool time_kernel,
66
                                  const ck::utils::conv::ConvParam& conv_param,
67
68
                                  ck::index_t split_k)
{
69
70
71
    using InElementOp  = ck::tensor_operation::element_wise::PassThrough;
    using WeiElementOp = ck::tensor_operation::element_wise::PassThrough;
    using OutElementOp = ck::tensor_operation::element_wise::PassThrough;
72

73
74
75
    const auto in_element_op  = InElementOp{};
    const auto wei_element_op = WeiElementOp{};
    const auto out_element_op = OutElementOp{};
76

77
78
    const auto in_g_n_c_wis_desc =
        ck::utils::conv::make_input_host_tensor_descriptor_g_n_c_wis_packed<InLayout>(conv_param);
79

80
81
82
83
84
    const auto wei_g_k_c_xs_desc =
        ck::utils::conv::make_weight_host_tensor_descriptor_g_k_c_xs_packed<WeiLayout>(conv_param);

    const auto out_g_n_k_wos_desc =
        ck::utils::conv::make_output_host_tensor_descriptor_g_n_k_wos_packed<OutLayout>(conv_param);
85

86
87
88
89
    Tensor<InDataType> input(in_g_n_c_wis_desc);
    Tensor<WeiDataType> weight_host_result(wei_g_k_c_xs_desc);
    Tensor<WeiDataType> weight_device_result(wei_g_k_c_xs_desc);
    Tensor<OutDataType> output(out_g_n_k_wos_desc);
90

91
92
93
    std::cout << "input: " << input.mDesc << std::endl;
    std::cout << "weight: " << weight_host_result.mDesc << std::endl;
    std::cout << "output: " << output.mDesc << std::endl;
94
95
96
97
98

    switch(init_method)
    {
    case 0: break;
    case 1:
99
100
        input.GenerateTensorValue(GeneratorTensor_2<InDataType>{-5, 5});
        output.GenerateTensorValue(GeneratorTensor_2<OutDataType>{-5, 5});
101
102
        break;
    default:
103
104
        input.GenerateTensorValue(GeneratorTensor_3<InDataType>{0.0, 1.0});
        output.GenerateTensorValue(GeneratorTensor_3<OutDataType>{-0.5, 0.5});
105
106
    }

107
108
109
110
    DeviceMem in_device_buf(sizeof(InDataType) * input.mDesc.GetElementSpaceSize());
    DeviceMem wei_device_buf(sizeof(WeiDataType) *
                             weight_device_result.mDesc.GetElementSpaceSize());
    DeviceMem out_device_buf(sizeof(OutDataType) * output.mDesc.GetElementSpaceSize());
111

112
113
    in_device_buf.ToDevice(input.mData.data());
    out_device_buf.ToDevice(output.mData.data());
114
115
116

    if(do_verification)
    {
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
        auto ref_conv = ck::tensor_operation::host::ReferenceConvBwdWeight<NDimSpatial,
                                                                           InDataType,
                                                                           WeiDataType,
                                                                           OutDataType,
                                                                           InElementOp,
                                                                           WeiElementOp,
                                                                           OutElementOp>{};

        auto ref_invoker = ref_conv.MakeInvoker();

        auto ref_argument = ref_conv.MakeArgument(input,
                                                  weight_host_result,
                                                  output,
                                                  conv_param.conv_filter_strides_,
                                                  conv_param.conv_filter_dilations_,
                                                  conv_param.input_left_pads_,
                                                  conv_param.input_right_pads_,
134
135
136
137
138
139
140
                                                  in_element_op,
                                                  wei_element_op,
                                                  out_element_op);

        ref_invoker.Run(ref_argument);
    }

141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
    using DeviceOp = ck::tensor_operation::device::DeviceConvBwdWeight<NDimSpatial,
                                                                       InLayout,
                                                                       WeiLayout,
                                                                       OutLayout,
                                                                       InDataType,
                                                                       WeiDataType,
                                                                       OutDataType,
                                                                       InElementOp,
                                                                       WeiElementOp,
                                                                       OutElementOp>;

    // get device op instances
    const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
        DeviceOp>::GetInstances();

    std::cout << "found " << op_ptrs.size() << " instances" << std::endl;

    std::string best_op_name;
    float best_avg_time   = 0;
160
161
162
163
    float best_tflops     = 0;
    float best_gb_per_sec = 0;

    // profile device Conv instances
164
    bool all_pass = true;
JD's avatar
JD committed
165

166
    for(auto& op_ptr : op_ptrs)
167
    {
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
        auto argument_ptr =
            op_ptr->MakeArgumentPointer(static_cast<InDataType*>(in_device_buf.GetDeviceBuffer()),
                                        static_cast<WeiDataType*>(wei_device_buf.GetDeviceBuffer()),
                                        static_cast<OutDataType*>(out_device_buf.GetDeviceBuffer()),
                                        conv_param.N_,
                                        conv_param.K_,
                                        conv_param.C_,
                                        conv_param.input_spatial_lengths_,
                                        conv_param.filter_spatial_lengths_,
                                        conv_param.output_spatial_lengths_,
                                        conv_param.conv_filter_strides_,
                                        conv_param.conv_filter_dilations_,
                                        conv_param.input_left_pads_,
                                        conv_param.input_right_pads_,
                                        in_element_op,
                                        wei_element_op,
                                        out_element_op,
                                        split_k);

        if(op_ptr->IsSupportedArgument(argument_ptr.get()))
188
        {
189
            // using atomic add, so need to reset input
190
            wei_device_buf.SetZero();
JD's avatar
JD committed
191

192
            std::string op_name = op_ptr->GetTypeString();
193

194
            auto invoker_ptr = op_ptr->MakeInvokerPointer();
195

196
197
            float avg_time =
                invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, time_kernel});
198

199
200
            std::size_t flop      = conv_param.GetFlops();
            std::size_t num_btype = conv_param.GetByte<InDataType, WeiDataType, OutDataType>();
201

202
203
            float tflops     = static_cast<float>(flop) / 1.E9 / avg_time;
            float gb_per_sec = num_btype / 1.E6 / avg_time;
204

205
206
            std::cout << "Perf: " << std::setw(10) << avg_time << " ms, " << tflops << " TFlops, "
                      << gb_per_sec << " GB/s, " << op_name << std::endl;
207
208
209

            if(tflops > best_tflops)
            {
210
                best_op_name    = op_name;
211
                best_tflops     = tflops;
212
                best_avg_time   = avg_time;
213
214
215
216
217
                best_gb_per_sec = gb_per_sec;
            }

            if(do_verification)
            {
218
                wei_device_buf.FromDevice(weight_device_result.mData.data());
219

220
221
                bool pass =
                    ck::utils::check_err(weight_host_result.mData, weight_device_result.mData);
JD's avatar
JD committed
222

223
                if(!pass)
224
                {
225
                    std::cout << "Fail info:" << op_ptr->GetTypeString() << std::endl;
226
227
                }

228
229
                all_pass &= pass;

230
231
                if(do_log)
                {
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
                    std::cout << "in : ";
                    show_data_nhwc_layout(output);
                    std::cout << std::endl;

                    std::cout << "wei: ";
                    show_data_nhwc_layout(weight_host_result);
                    std::cout << std::endl;

                    std::cout << "out  : ";
                    show_data_nhwc_layout(input);
                    std::cout << std::endl;

                    std::cout << "wei_device: ";
                    show_data_nhwc_layout(weight_device_result);
                    std::cout << std::endl;
247
248
249
                }
            }
        }
250
251
252
253
        else
        {
            std::cout << op_ptr->GetTypeString() << " does not support this problem" << std::endl;
        }
254
255
    }

256
257
258
    std::cout << "Best configuration parameters:"
              << "\nname: " << best_op_name << "\navg_time: " << best_avg_time
              << "\ntflops: " << best_tflops << "\nGB/s: " << best_gb_per_sec << std::endl;
259

260
    return all_pass;
261
262
263
264
}

} // namespace profiler
} // namespace ck