profile_conv_bwd_data_impl.hpp 13.4 KB
Newer Older
Chao Liu's avatar
Chao Liu committed
1
2
3
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.

4
#pragma once
Chao Liu's avatar
Chao Liu committed
5
6
7
8
9
10

#include "ck/ck.hpp"
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
#include "ck/tensor_operation/gpu/device/device_conv_bwd_data.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"

Chao Liu's avatar
Chao Liu committed
11
12
#include "ck/library/tensor_operation_instance/gpu/convolution_backward_data.hpp"

13
#include "ck/library/utility/check_err.hpp"
Chao Liu's avatar
Chao Liu committed
14
15
16
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
Chao Liu's avatar
Chao Liu committed
17
#include "ck/library/utility/convolution_parameter.hpp"
Chao Liu's avatar
Chao Liu committed
18
#include "ck/library/reference_tensor_operation/cpu/reference_conv_bwd_data.hpp"
19
20
21
22
23
24
25
26

namespace ck {
namespace profiler {

template <typename DataType>
void show_data_nhwc_layout(Tensor<DataType>& nhwc)
{
    std::cout << "[";
27
    for(int n = 0; n < ck::type_convert<int>(nhwc.mDesc.GetLengths()[0]); n++)
28
29
    {
        std::cout << "[";
30
        for(int hi = 0; hi < ck::type_convert<int>(nhwc.mDesc.GetLengths()[2]); hi++)
31
32
        {
            std::cout << "[";
33
            for(int wi = 0; wi < ck::type_convert<int>(nhwc.mDesc.GetLengths()[3]); wi++)
34
35
            {
                std::cout << "[";
36
                for(int c = 0; c < ck::type_convert<int>(nhwc.mDesc.GetLengths()[1]); c++)
37
38
39
40
41
42
43
44
45
46
47
48
                {
                    std::cout << static_cast<float>(nhwc(n, c, hi, wi)) << "  ";
                }
                std::cout << "]";
            }
            std::cout << "]";
        }
        std::cout << "]";
    }
    std::cout << "]";
}

Chao Liu's avatar
Chao Liu committed
49
template <ck::index_t NDimSpatial,
50
51
          typename InLayout,
          typename WeiLayout,
52
53
54
55
56
57
58
59
60
          typename OutLayout,
          typename InDataType,
          typename WeiDataType,
          typename OutDataType>
bool profile_conv_bwd_data_impl(int do_verification,
                                int init_method,
                                bool do_log,
                                bool time_kernel,
                                const ck::tensor_operation::device::ConvParams& params)
61
62
63
64
65
66
67
68
69
{
    using InElementOp  = ck::tensor_operation::element_wise::PassThrough;
    using WeiElementOp = ck::tensor_operation::element_wise::PassThrough;
    using OutElementOp = ck::tensor_operation::element_wise::PassThrough;

    const auto in_element_op  = InElementOp{};
    const auto wei_element_op = WeiElementOp{};
    const auto out_element_op = OutElementOp{};

70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
    // make host tensor descritpor
    auto f_nhwc_host_tensor_descriptor =
        [](ck::index_t n, ck::index_t c, std::vector<ck::index_t> spatial_lengths) {
            std::vector<std::size_t> nhwc_lengths{static_cast<std::size_t>(n),
                                                  static_cast<std::size_t>(c)};
            nhwc_lengths.insert(
                nhwc_lengths.begin() + 1, spatial_lengths.begin(), spatial_lengths.end());

            return HostTensorDescriptor(nhwc_lengths);
        };

    auto f_nchw_host_tensor_descriptor =
        [](ck::index_t n, ck::index_t c, std::vector<ck::index_t> spatial_lengths) {
            std::vector<std::size_t> nchw_lengths{static_cast<std::size_t>(n),
                                                  static_cast<std::size_t>(c)};
            nchw_lengths.insert(nchw_lengths.end(), spatial_lengths.begin(), spatial_lengths.end());

            return HostTensorDescriptor(nchw_lengths);
        };

    HostTensorDescriptor in_desc, wei_desc, out_desc;

    // FIXME: properly implement "make host descriptor" for different layout
    if constexpr(is_same_v<InLayout, ck::tensor_layout::convolution::NWC> ||
                 is_same_v<InLayout, ck::tensor_layout::convolution::NHWC> ||
                 is_same_v<InLayout, ck::tensor_layout::convolution::NDHWC>)
    {
        in_desc =
            f_nhwc_host_tensor_descriptor(params.N_, params.C_, params.input_spatial_lengths_);
    }
    else if constexpr(is_same_v<InLayout, ck::tensor_layout::convolution::NCW> ||
                      is_same_v<InLayout, ck::tensor_layout::convolution::NCHW> ||
                      is_same_v<InLayout, ck::tensor_layout::convolution::NCDHW>)
    {
        in_desc =
            f_nchw_host_tensor_descriptor(params.N_, params.C_, params.input_spatial_lengths_);
    }

    // FIXME: properly implement "make host descriptor" for different layout
    if constexpr(is_same_v<WeiLayout, ck::tensor_layout::convolution::KXC> ||
                 is_same_v<WeiLayout, ck::tensor_layout::convolution::KYXC> ||
                 is_same_v<WeiLayout, ck::tensor_layout::convolution::KZYXC>)
    {
        wei_desc =
            f_nhwc_host_tensor_descriptor(params.K_, params.C_, params.filter_spatial_lengths_);
    }
    else if constexpr(is_same_v<WeiLayout, ck::tensor_layout::convolution::KCX> ||
                      is_same_v<WeiLayout, ck::tensor_layout::convolution::KCYX> ||
                      is_same_v<WeiLayout, ck::tensor_layout::convolution::KCZYX>)
    {
        wei_desc =
            f_nchw_host_tensor_descriptor(params.K_, params.C_, params.filter_spatial_lengths_);
    }

    // FIXME: properly implement "make host descriptor" for different layout
    if constexpr(is_same_v<OutLayout, ck::tensor_layout::convolution::NWK> ||
                 is_same_v<OutLayout, ck::tensor_layout::convolution::NHWK> ||
                 is_same_v<OutLayout, ck::tensor_layout::convolution::NDHWK>)
    {
        out_desc =
            f_nhwc_host_tensor_descriptor(params.N_, params.K_, params.GetOutputSpatialLengths());
    }
    else if constexpr(is_same_v<OutLayout, ck::tensor_layout::convolution::NKW> ||
                      is_same_v<OutLayout, ck::tensor_layout::convolution::NKHW> ||
                      is_same_v<OutLayout, ck::tensor_layout::convolution::NKDHW>)
    {
        out_desc =
            f_nchw_host_tensor_descriptor(params.N_, params.K_, params.GetOutputSpatialLengths());
    }

    Tensor<InDataType> input_host_result(in_desc);
    Tensor<InDataType> input_device_result(in_desc);
    Tensor<WeiDataType> weight(wei_desc);
    Tensor<OutDataType> output(out_desc);
144

ltqin's avatar
ltqin committed
145
    std::cout << "input: " << input_host_result.mDesc << std::endl;
146
    std::cout << "weight: " << weight.mDesc << std::endl;
ltqin's avatar
ltqin committed
147
    std::cout << "output: " << output.mDesc << std::endl;
148
149
150
151
152

    switch(init_method)
    {
    case 0: break;
    case 1:
ltqin's avatar
ltqin committed
153
        output.GenerateTensorValue(GeneratorTensor_2<OutDataType>{-5, 5});
154
        weight.GenerateTensorValue(GeneratorTensor_2<WeiDataType>{-5, 5});
155
156
        break;
    default:
Chao Liu's avatar
Chao Liu committed
157
158
        output.GenerateTensorValue(GeneratorTensor_3<OutDataType>{0.0, 1.0});
        weight.GenerateTensorValue(GeneratorTensor_3<WeiDataType>{-0.5, 0.5});
159
160
    }

ltqin's avatar
ltqin committed
161
    DeviceMem in_device_buf(sizeof(InDataType) * input_device_result.mDesc.GetElementSpace());
162
    DeviceMem wei_device_buf(sizeof(WeiDataType) * weight.mDesc.GetElementSpace());
ltqin's avatar
ltqin committed
163
    DeviceMem out_device_buf(sizeof(OutDataType) * output.mDesc.GetElementSpace());
164

ltqin's avatar
ltqin committed
165
    out_device_buf.ToDevice(output.mData.data());
166
    wei_device_buf.ToDevice(weight.mData.data());
167
168
169

    if(do_verification)
    {
170
171
172
173
174
        auto ref_conv = ck::tensor_operation::host::ReferenceConvBwdData<NDimSpatial,
                                                                         InLayout,
                                                                         WeiLayout,
                                                                         OutLayout,
                                                                         InDataType,
ltqin's avatar
ltqin committed
175
176
177
178
                                                                         WeiDataType,
                                                                         OutDataType,
                                                                         InElementOp,
                                                                         WeiElementOp,
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
                                                                         OutElementOp>{};

        auto ref_invoker = ref_conv.MakeInvoker();

        auto ref_argument = ref_conv.MakeArgument(input_host_result,
                                                  weight,
                                                  output,
                                                  params.conv_filter_strides_,
                                                  params.conv_filter_dilations_,
                                                  params.input_left_pads_,
                                                  params.input_right_pads_,
                                                  InElementOp{},
                                                  WeiElementOp{},
                                                  OutElementOp{});
        ref_invoker.Run(ref_argument);
194
195
    }

Chao Liu's avatar
Chao Liu committed
196
197
198
199
200
201
202
203
204
205
    using DeviceOp = ck::tensor_operation::device::DeviceConvBwdData<NDimSpatial,
                                                                     InLayout,
                                                                     WeiLayout,
                                                                     OutLayout,
                                                                     InDataType,
                                                                     WeiDataType,
                                                                     OutDataType,
                                                                     InElementOp,
                                                                     WeiElementOp,
                                                                     OutElementOp>;
206

Chao Liu's avatar
Chao Liu committed
207
208
209
210
211
    // get device op instances
    const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
        DeviceOp>::GetInstances();

    std::cout << "found " << op_ptrs.size() << " instances" << std::endl;
212

213
214
    std::string best_op_name;
    float best_avg_time   = 0;
215
216
217
218
    float best_tflops     = 0;
    float best_gb_per_sec = 0;

    // profile device Conv instances
219
220
    bool pass = true;

Chao Liu's avatar
Chao Liu committed
221
    for(auto& op_ptr : op_ptrs)
222
    {
Chao Liu's avatar
Chao Liu committed
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
        auto argument_ptr =
            op_ptr->MakeArgumentPointer(static_cast<InDataType*>(in_device_buf.GetDeviceBuffer()),
                                        static_cast<WeiDataType*>(wei_device_buf.GetDeviceBuffer()),
                                        static_cast<OutDataType*>(out_device_buf.GetDeviceBuffer()),
                                        params.N_,
                                        params.K_,
                                        params.C_,
                                        params.input_spatial_lengths_,
                                        params.filter_spatial_lengths_,
                                        params.output_spatial_lengths_,
                                        params.conv_filter_strides_,
                                        params.conv_filter_dilations_,
                                        params.input_left_pads_,
                                        params.input_right_pads_,
                                        in_element_op,
                                        wei_element_op,
                                        out_element_op);

        if(op_ptr->IsSupportedArgument(argument_ptr.get()))
242
        {
Chao Liu's avatar
Chao Liu committed
243
244
            // for conv bwd data, some input tensor element are zero, but not written by kernel,
            // need to set zero
245
246
            in_device_buf.SetZero();

Chao Liu's avatar
Chao Liu committed
247
248
249
            std::string op_name = op_ptr->GetTypeString();

            auto invoker_ptr = op_ptr->MakeInvokerPointer();
250

251
            float avg_time =
JD's avatar
JD committed
252
                invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, time_kernel});
253

254
255
            std::size_t flop      = params.GetFlops();
            std::size_t num_btype = params.GetByte<InDataType, WeiDataType, OutDataType>();
256

257
258
            float tflops     = static_cast<float>(flop) / 1.E9 / avg_time;
            float gb_per_sec = num_btype / 1.E6 / avg_time;
259

260
            std::cout << "Perf: " << avg_time << " ms, " << tflops << " TFlops, " << gb_per_sec
261
262
263
264
                      << " GB/s" << std::endl;

            if(tflops > best_tflops)
            {
265
                best_op_name    = op_name;
266
                best_tflops     = tflops;
267
                best_avg_time   = avg_time;
268
269
270
271
272
                best_gb_per_sec = gb_per_sec;
            }

            if(do_verification)
            {
ltqin's avatar
ltqin committed
273
                in_device_buf.FromDevice(input_device_result.mData.data());
274

275
276
                pass =
                    pass & ck::utils::check_err(input_device_result.mData, input_host_result.mData);
277
278
279
280

                if(do_log)
                {
                    std::cout << "in : ";
ltqin's avatar
ltqin committed
281
                    show_data_nhwc_layout(output);
282
283
284
                    std::cout << std::endl;

                    std::cout << "wei: ";
285
                    show_data_nhwc_layout(weight);
286
287
288
                    std::cout << std::endl;

                    std::cout << "out_host  : ";
ltqin's avatar
ltqin committed
289
                    show_data_nhwc_layout(input_host_result);
290
291
292
                    std::cout << std::endl;

                    std::cout << "out_device: ";
ltqin's avatar
ltqin committed
293
                    show_data_nhwc_layout(input_device_result);
294
295
296
297
                    std::cout << std::endl;
                }
            }
        }
Chao Liu's avatar
Chao Liu committed
298
299
300
301
        else
        {
            std::cout << op_ptr->GetTypeString() << " does not support this problem" << std::endl;
        }
302
303
    }

Chao Liu's avatar
Chao Liu committed
304
305
306
    std::cout << "Best configuration parameters:"
              << "\nname: " << best_op_name << "\navg_time: " << best_avg_time
              << "\ntflops: " << best_tflops << "\nGB/s: " << best_gb_per_sec << std::endl;
307
308

    return pass;
309
310
311
312
}

} // namespace profiler
} // namespace ck