profile_grouped_conv_bwd_weight.cpp 8.9 KB
Newer Older
Chao Liu's avatar
Chao Liu committed
1
// SPDX-License-Identifier: MIT
Illia Silin's avatar
Illia Silin committed
2
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
Chao Liu's avatar
Chao Liu committed
3

4
5
#include <cstdlib>
#include <initializer_list>
6
7
#include <iostream>
#include <numeric>
Chao Liu's avatar
Chao Liu committed
8

9
10
#include "profiler/profile_grouped_conv_bwd_weight_impl.hpp"
#include "profiler_operation_registry.hpp"
11

12
namespace {
13

14
enum struct ConvLayout
15
{
16
17
    GNCHW_GKCYX_GNKHW, // 0
    GNHWC_GKYXC_GNHWK, // 1
18
    NHWGC_GKYXC_NHWGK, // 2
19
20
};

21
enum struct ConvDataType
22
{
23
24
25
26
27
    F32_F32_F32,        // 0
    F16_F16_F16,        // 1
    BF16_F32_BF16,      // 2
    F16_F16_F16_BF8_F8, // 3
    I8_I8_I8            // 4
28
29
};

30
31
32
#define OP_NAME "grouped_conv_bwd_weight"
#define OP_DESC "Grouped Convolution Backward Weight"

33
static void print_helper_msg()
34
{
35
    std::cout << "arg1: tensor operation (" OP_NAME ": " OP_DESC ")\n"
36
37
              << "arg2: data type (0: Input fp32, Weight fp32, Output fp32\n"
              << "                 1: Input fp16, Weight fp16, Output fp16\n"
38
              << "                 2: Input bf16, Weight fp32, Output bf16\n"
39
40
              << "                 3: Input fp16, Weight fp16, Output fp16, Gemm bf8@fp8\n"
              << "                 4: Input int8, Weight int8, Output int8)\n"
41
42
43
44
              << "arg3: tensor layout (0: Input[G, N, C, Hi, Wi], Weight[G, K, C, Y, X], Output[G, "
                 "N, K, Ho, Wo]\n"
              << "                     1: Input[G, N, Hi, Wi, C], Weight[G, K, Y, X, C], Output[G, "
                 "N, Ho, Wo, K]\n"
45
46
              << "                     2: Input[N, Hi, Wi, G, C], Weight[G, K, Y, X, C], Output[N, "
                 "Ho, Wo, G, K]\n"
47
48
49
50
51
52
              << "arg4: verification (0: no, 1: yes)\n"
              << "arg5: initialization (0: no init, 1: integer value, 2: decimal value)\n"
              << "arg6: print tensor value (0: no; 1: yes)\n"
              << "arg7: time kernel (0: no, 1: yes)\n"
              << ck::utils::conv::get_conv_param_parser_helper_msg() << " SplitK\n"
              << std::endl;
53
54
55
}

} // namespace
56

57
int profile_grouped_conv_bwd_weight(int argc, char* argv[])
58
{
59
60
    // 8 for control, 1 for num_dim_spatial
    if(argc < 9)
61
    {
62
63
        print_helper_msg();
        return 1;
64
65
66
    }

    const auto data_type       = static_cast<ConvDataType>(std::stoi(argv[2]));
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
    const auto layout          = static_cast<ConvLayout>(std::stoi(argv[3]));
    const bool do_verification = std::stoi(argv[4]);
    const int init_method      = std::stoi(argv[5]);
    const bool do_log          = std::stoi(argv[6]);
    const bool time_kernel     = std::stoi(argv[7]);
    const int num_dim_spatial  = std::stoi(argv[8]);

    // 8 for control, 1 for num_dim_spatial, 4 for G/N/K/C, and 6 * num_dim_spatial, 1 for split-K
    if(argc != 8 + 1 + 4 + 6 * num_dim_spatial + 1)
    {
        print_helper_msg();
        return 1;
    }

    const auto params = ck::utils::conv::parse_conv_param(num_dim_spatial, 9, argv);

    ck::index_t split_k = std::stoi(argv[8 + 1 + 4 + 6 * num_dim_spatial]);
    split_k             = std::max(1, split_k);

    using F32  = float;
    using F16  = ck::half_t;
    using BF16 = ck::bhalf_t;
89
90
    using F8   = ck::f8_t;
    using BF8  = ck::bf8_t;
91

92
    using namespace ck::tensor_layout::convolution;
93
94
95
96
97
98
99
100
101
102
103

    constexpr auto I1 = ck::Number<1>{};
    constexpr auto I2 = ck::Number<2>{};
    constexpr auto I3 = ck::Number<3>{};

    auto profile = [&](auto num_dim_spatial_tmp,
                       auto in_layout,
                       auto wei_layout,
                       auto out_layout,
                       auto in_type,
                       auto wei_type,
104
105
106
                       auto out_type,
                       auto compute_type_a,
                       auto compute_type_b) {
107
108
109
110
111
112
113
114
115
116
        constexpr ck::index_t NDimSpatial = num_dim_spatial_tmp.value;

        using InLayout  = decltype(in_layout);
        using WeiLayout = decltype(wei_layout);
        using OutLayout = decltype(out_layout);

        using InDataType  = decltype(in_type);
        using WeiDataType = decltype(wei_type);
        using OutDataType = decltype(out_type);

117
118
119
        using ComputeTypeA = decltype(compute_type_a);
        using ComputeTypeB = decltype(compute_type_b);

120
121
122
123
124
125
        bool pass = ck::profiler::profile_grouped_conv_bwd_weight_impl<NDimSpatial,
                                                                       InLayout,
                                                                       WeiLayout,
                                                                       OutLayout,
                                                                       InDataType,
                                                                       WeiDataType,
126
127
128
                                                                       OutDataType,
                                                                       ComputeTypeA,
                                                                       ComputeTypeB>(
129
130
131
132
133
            do_verification, init_method, do_log, time_kernel, params, split_k);

        return pass ? 0 : 1;
    };

134
    if(num_dim_spatial == 1 && layout == ConvLayout::GNHWC_GKYXC_GNHWK)
135
    {
136
137
        if(data_type == ConvDataType::F32_F32_F32)
        {
138
            return profile(I1, GNWC{}, GKXC{}, GNWK{}, F32{}, F32{}, F32{}, F32{}, F32{});
139
        }
140
        if(data_type == ConvDataType::F16_F16_F16)
141
        {
142
            return profile(I1, GNWC{}, GKXC{}, GNWK{}, F16{}, F16{}, F16{}, F16{}, F16{});
143
        }
144
        if(data_type == ConvDataType::BF16_F32_BF16)
145
146
        {
            // fp32 atomic add is used for weight tensor in bf16 kernel
147
            return profile(I1, GNWC{}, GKXC{}, GNWK{}, BF16{}, F32{}, BF16{}, BF16{}, BF16{});
148
        }
149
    }
150
    if(num_dim_spatial == 2 && layout == ConvLayout::GNHWC_GKYXC_GNHWK)
151
    {
152
153
        if(data_type == ConvDataType::F32_F32_F32)
        {
154
            return profile(I2, GNHWC{}, GKYXC{}, GNHWK{}, F32{}, F32{}, F32{}, F32{}, F32{});
155
        }
156
        if(data_type == ConvDataType::F16_F16_F16)
157
        {
158
            return profile(I2, GNHWC{}, GKYXC{}, GNHWK{}, F16{}, F16{}, F16{}, F16{}, F16{});
159
        }
160
        if(data_type == ConvDataType::BF16_F32_BF16)
161
162
        {
            // fp32 atomic add is used for weight tensor in bf16 kernel
163
            return profile(I2, GNHWC{}, GKYXC{}, GNHWK{}, BF16{}, F32{}, BF16{}, BF16{}, BF16{});
164
        }
165
    }
166
    if(num_dim_spatial == 2 && layout == ConvLayout::NHWGC_GKYXC_NHWGK)
167
168
169
    {
        if(data_type == ConvDataType::F32_F32_F32)
        {
170
            return profile(I2, NHWGC{}, GKYXC{}, NHWGK{}, F32{}, F32{}, F32{}, F32{}, F32{});
171
        }
172
        if(data_type == ConvDataType::F16_F16_F16)
173
        {
174
            return profile(I2, NHWGC{}, GKYXC{}, NHWGK{}, F16{}, F16{}, F16{}, F16{}, F16{});
175
        }
176
        if(data_type == ConvDataType::BF16_F32_BF16)
177
178
        {
            // fp32 atomic add is used for weight tensor in bf16 kernel
179
            return profile(I2, NHWGC{}, GKYXC{}, NHWGK{}, BF16{}, F32{}, BF16{}, BF16{}, BF16{});
180
181
        }
    }
182
    if(num_dim_spatial == 3 && layout == ConvLayout::GNHWC_GKYXC_GNHWK)
183
    {
184
185
        if(data_type == ConvDataType::F32_F32_F32)
        {
186
            return profile(I3, GNDHWC{}, GKZYXC{}, GNDHWK{}, F32{}, F32{}, F32{}, F32{}, F32{});
187
        }
188
        if(data_type == ConvDataType::F16_F16_F16)
189
        {
190
            return profile(I3, GNDHWC{}, GKZYXC{}, GNDHWK{}, F16{}, F16{}, F16{}, F16{}, F16{});
191
        }
192
        if(data_type == ConvDataType::BF16_F32_BF16)
193
194
        {
            // fp32 atomic add is used for weight tensor in bf16 kernel
195
            return profile(I3, GNDHWC{}, GKZYXC{}, GNDHWK{}, BF16{}, F32{}, BF16{}, BF16{}, BF16{});
196
        }
197
198
199
200
201
        else if(data_type == ConvDataType::I8_I8_I8)
        {
            return profile(
                I3, GNDHWC{}, GKZYXC{}, GNDHWK{}, int8_t{}, int8_t{}, int8_t{}, int8_t{}, int8_t{});
        }
202
    }
203
    if(num_dim_spatial == 3 && layout == ConvLayout::NHWGC_GKYXC_NHWGK)
204
205
206
    {
        if(data_type == ConvDataType::F32_F32_F32)
        {
207
            return profile(I3, NDHWGC{}, GKZYXC{}, NDHWGK{}, F32{}, F32{}, F32{}, F32{}, F32{});
208
        }
209
        if(data_type == ConvDataType::F16_F16_F16)
210
        {
211
            return profile(I3, NDHWGC{}, GKZYXC{}, NDHWGK{}, F16{}, F16{}, F16{}, F16{}, F16{});
212
        }
213
        if(data_type == ConvDataType::BF16_F32_BF16)
214
215
        {
            // fp32 atomic add is used for weight tensor in bf16 kernel
216
217
            return profile(I3, NDHWGC{}, GKZYXC{}, NDHWGK{}, BF16{}, F32{}, BF16{}, BF16{}, BF16{});
        }
218
        if(data_type == ConvDataType::F16_F16_F16_BF8_F8)
219
220
        {
            return profile(I3, NDHWGC{}, GKZYXC{}, NDHWGK{}, F16{}, F16{}, F16{}, BF8{}, F8{});
221
        }
222
223
224
225
226
        else if(data_type == ConvDataType::I8_I8_I8)
        {
            return profile(
                I3, NDHWGC{}, GKZYXC{}, NDHWGK{}, int8_t{}, int8_t{}, int8_t{}, int8_t{}, int8_t{});
        }
227
    }
228

229
230
231
    std::cout << "this data_type & layout is not implemented" << std::endl;

    return 1;
232
}
233
234

REGISTER_PROFILER_OPERATION(OP_NAME, OP_DESC, profile_grouped_conv_bwd_weight);