"tests/pipelines/wan/test_wan_animate.py" did not exist on "2d8a41cae8635d366a394d42fbabfdcb21a16f7d"
profile_grouped_conv_fwd.cpp 11.1 KB
Newer Older
1
// SPDX-License-Identifier: MIT
2
// Copyright (c) 2018-2024, Advanced Micro Devices, Inc. All rights reserved.
3
4
5
6
7
8

#include <iostream>
#include <numeric>
#include <initializer_list>
#include <cstdlib>

9
10
#include "profiler/profile_grouped_conv_fwd_impl.hpp"
#include "profiler_operation_registry.hpp"
11
12
13
14
15
16

namespace {

enum struct ConvLayout
{
    GNHWC_GKYXC_GNHWK, // 0
Chao Liu's avatar
Chao Liu committed
17
    NHWGC_GKYXC_NHWGK, // 1
18
19
20
21
22
23
24
25
};

enum struct ConvDataType
{
    F32_F32_F32,    // 0
    F16_F16_F16,    // 1
    BF16_BF16_BF16, // 2
    INT8_INT8_INT8, // 3
26
    F8_F8_F8,       // 4
27
    BF8_BF8_F8,     // 5
28
    F8_BF8_F8,      // 6
29
    BF8_F8_F8,      // 7
30
31
};

32
33
34
#define OP_NAME "grouped_conv_fwd"
#define OP_DESC "Grouped Convolution Forward"

35
36
37
38
static void print_helper_msg()
{
    std::cout
        // clang-format off
39
        << "arg1: tensor operation (" OP_NAME ": " OP_DESC ")\n"
40
41
42
        << "arg2: data type (0: Input fp32, Weight fp32, Output fp32\n"
        << "                 1: Input fp16, Weight fp16, Output fp16\n"
        << "                 2: Input bf16, Weight bf16, Output bf16\n"
43
        << "                 3: Input int8, Weight int8, Output int8\n"
44
        << "                 4: Input fp8, Weight fp8, Output fp8\n"
45
        << "                 5: Input bf8, Weight bf8, Output fp8\n"
46
47
        << "                 6: Input fp8, Weight bf8, Output fp8\n"
        << "                 7: Input bf8, Weight fp8, Output fp8)\n"
48
        << "arg3: tensor layout (0: Input[G, N, Hi, Wi, C], Weight[G, K, Y, X, C], Output[G, N, Ho, Wo, K]\n"
Chao Liu's avatar
Chao Liu committed
49
        << "                     1: Input[N, Hi, Wi, G, C], Weight[G, K, Y, X, C], Output[N, Ho, Wo, G, K])\n"
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
        << "arg4: verification (0: no, 1: yes)\n"
        << "arg5: initialization (0: no init, 1: integer value, 2: decimal value)\n"
        << "arg6: print tensor value (0: no; 1: yes)\n"
        << "arg7: time kernel (0: no, 1: yes)\n"
        << ck::utils::conv::get_conv_param_parser_helper_msg() << std::endl;
    // clang-format on
}

} // namespace

int profile_grouped_conv_fwd(int argc, char* argv[])
{
    // 8 for control, 1 for num_dim_spatial
    if(argc < 9)
    {
        print_helper_msg();
        return 1;
    }

    const auto data_type       = static_cast<ConvDataType>(std::stoi(argv[2]));
    const auto layout          = static_cast<ConvLayout>(std::stoi(argv[3]));
    const bool do_verification = std::stoi(argv[4]);
    const int init_method      = std::stoi(argv[5]);
    const bool do_log          = std::stoi(argv[6]);
    const bool time_kernel     = std::stoi(argv[7]);
    const int num_dim_spatial  = std::stoi(argv[8]);

    // 8 for control, 1 for num_dim_spatial, 4 for G/N/K/C, and 6 * num_dim_spatial
    if(argc != 8 + 1 + 4 + 6 * num_dim_spatial)
    {
        print_helper_msg();
        return 1;
    }

    const auto params = ck::utils::conv::parse_conv_param(num_dim_spatial, 9, argv);

    using F32  = float;
    using F16  = ck::half_t;
    using BF16 = ck::bhalf_t;
    using INT8 = int8_t;
90
    using F8   = ck::f8_t;
91
    using BF8  = ck::bf8_t;
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124

    //
    using GNWC   = ck::tensor_layout::convolution::GNWC;
    using GNHWC  = ck::tensor_layout::convolution::GNHWC;
    using GNDHWC = ck::tensor_layout::convolution::GNDHWC;

    using GKXC   = ck::tensor_layout::convolution::GKXC;
    using GKYXC  = ck::tensor_layout::convolution::GKYXC;
    using GKZYXC = ck::tensor_layout::convolution::GKZYXC;

    using GNWK   = ck::tensor_layout::convolution::GNWK;
    using GNHWK  = ck::tensor_layout::convolution::GNHWK;
    using GNDHWK = ck::tensor_layout::convolution::GNDHWK;

    //
    using NWGC   = ck::tensor_layout::convolution::NWGC;
    using NHWGC  = ck::tensor_layout::convolution::NHWGC;
    using NDHWGC = ck::tensor_layout::convolution::NDHWGC;

    using NWGK   = ck::tensor_layout::convolution::NWGK;
    using NHWGK  = ck::tensor_layout::convolution::NHWGK;
    using NDHWGK = ck::tensor_layout::convolution::NDHWGK;

    constexpr auto I1 = ck::Number<1>{};
    constexpr auto I2 = ck::Number<2>{};
    constexpr auto I3 = ck::Number<3>{};

    auto profile = [&](auto num_dim_spatial_tmp,
                       auto in_layout,
                       auto wei_layout,
                       auto out_layout,
                       auto in_type,
                       auto wei_type,
125
126
127
                       auto out_type,
                       auto a_compute_type,
                       auto b_compute_type) {
128
129
130
131
132
133
134
135
136
137
        constexpr ck::index_t NDimSpatial = num_dim_spatial_tmp.value;

        using InLayout  = decltype(in_layout);
        using WeiLayout = decltype(wei_layout);
        using OutLayout = decltype(out_layout);

        using InDataType  = decltype(in_type);
        using WeiDataType = decltype(wei_type);
        using OutDataType = decltype(out_type);

138
139
140
        using AComputeType = decltype(a_compute_type);
        using BComputeType = decltype(b_compute_type);

141
142
143
144
145
146
        bool pass = ck::profiler::profile_grouped_conv_fwd_impl<NDimSpatial,
                                                                InLayout,
                                                                WeiLayout,
                                                                OutLayout,
                                                                InDataType,
                                                                WeiDataType,
147
148
149
                                                                OutDataType,
                                                                AComputeType,
                                                                BComputeType>(
150
151
152
153
154
155
156
157
158
159
            do_verification, init_method, do_log, time_kernel, params);

        return pass ? 0 : 1;
    };

    // GNHWC_GKYXC_GNHWK
    if(num_dim_spatial == 1 && layout == ConvLayout::GNHWC_GKYXC_GNHWK)
    {
        if(data_type == ConvDataType::F32_F32_F32)
        {
160
            return profile(I1, GNWC{}, GKXC{}, GNWK{}, F32{}, F32{}, F32{}, F32{}, F32{});
161
162
163
        }
        else if(data_type == ConvDataType::F16_F16_F16)
        {
164
            return profile(I1, GNWC{}, GKXC{}, GNWK{}, F16{}, F16{}, F16{}, F16{}, F16{});
165
166
167
        }
        else if(data_type == ConvDataType::BF16_BF16_BF16)
        {
168
            return profile(I1, GNWC{}, GKXC{}, GNWK{}, BF16{}, BF16{}, BF16{}, BF16{}, BF16{});
169
170
171
        }
        else if(data_type == ConvDataType::INT8_INT8_INT8)
        {
172
            return profile(I1, GNWC{}, GKXC{}, GNWK{}, INT8{}, INT8{}, INT8{}, INT8{}, INT8{});
173
174
175
176
177
178
        }
    }
    else if(num_dim_spatial == 2 && layout == ConvLayout::GNHWC_GKYXC_GNHWK)
    {
        if(data_type == ConvDataType::F32_F32_F32)
        {
179
            return profile(I2, GNHWC{}, GKYXC{}, GNHWK{}, F32{}, F32{}, F32{}, F32{}, F32{});
180
181
182
        }
        else if(data_type == ConvDataType::F16_F16_F16)
        {
183
            return profile(I2, GNHWC{}, GKYXC{}, GNHWK{}, F16{}, F16{}, F16{}, F16{}, F16{});
184
185
186
        }
        else if(data_type == ConvDataType::BF16_BF16_BF16)
        {
187
            return profile(I2, GNHWC{}, GKYXC{}, GNHWK{}, BF16{}, BF16{}, BF16{}, BF16{}, BF16{});
188
189
190
        }
        else if(data_type == ConvDataType::INT8_INT8_INT8)
        {
191
            return profile(I2, GNHWC{}, GKYXC{}, GNHWK{}, INT8{}, INT8{}, INT8{}, INT8{}, INT8{});
192
193
194
195
196
197
        }
    }
    else if(num_dim_spatial == 3 && layout == ConvLayout::GNHWC_GKYXC_GNHWK)
    {
        if(data_type == ConvDataType::F32_F32_F32)
        {
198
            return profile(I3, GNDHWC{}, GKZYXC{}, GNDHWK{}, F32{}, F32{}, F32{}, F32{}, F32{});
199
200
201
        }
        else if(data_type == ConvDataType::F16_F16_F16)
        {
202
            return profile(I3, GNDHWC{}, GKZYXC{}, GNDHWK{}, F16{}, F16{}, F16{}, F16{}, F16{});
203
204
205
        }
        else if(data_type == ConvDataType::BF16_BF16_BF16)
        {
206
207
            return profile(
                I3, GNDHWC{}, GKZYXC{}, GNDHWK{}, BF16{}, BF16{}, BF16{}, BF16{}, BF16{});
208
209
210
        }
        else if(data_type == ConvDataType::INT8_INT8_INT8)
        {
211
212
            return profile(
                I3, GNDHWC{}, GKZYXC{}, GNDHWK{}, INT8{}, INT8{}, INT8{}, INT8{}, INT8{});
213
214
        }
    }
Chao Liu's avatar
Chao Liu committed
215
216
    // NHWGC_GKYXC_NHWGK
    else if(num_dim_spatial == 1 && layout == ConvLayout::NHWGC_GKYXC_NHWGK)
217
218
219
    {
        if(data_type == ConvDataType::F32_F32_F32)
        {
220
            return profile(I1, NWGC{}, GKXC{}, NWGK{}, F32{}, F32{}, F32{}, F32{}, F32{});
221
222
223
        }
        else if(data_type == ConvDataType::F16_F16_F16)
        {
224
            return profile(I1, NWGC{}, GKXC{}, NWGK{}, F16{}, F16{}, F16{}, F16{}, F16{});
225
226
227
        }
        else if(data_type == ConvDataType::BF16_BF16_BF16)
        {
228
            return profile(I1, NWGC{}, GKXC{}, NWGK{}, BF16{}, BF16{}, BF16{}, BF16{}, BF16{});
229
230
231
        }
        else if(data_type == ConvDataType::INT8_INT8_INT8)
        {
232
            return profile(I1, NWGC{}, GKXC{}, NWGK{}, INT8{}, INT8{}, INT8{}, INT8{}, INT8{});
233
234
        }
    }
Chao Liu's avatar
Chao Liu committed
235
    else if(num_dim_spatial == 2 && layout == ConvLayout::NHWGC_GKYXC_NHWGK)
236
237
238
    {
        if(data_type == ConvDataType::F32_F32_F32)
        {
239
            return profile(I2, NHWGC{}, GKYXC{}, NHWGK{}, F32{}, F32{}, F32{}, F32{}, F32{});
240
241
242
        }
        else if(data_type == ConvDataType::F16_F16_F16)
        {
243
            return profile(I2, NHWGC{}, GKYXC{}, NHWGK{}, F16{}, F16{}, F16{}, F16{}, F16{});
244
245
246
        }
        else if(data_type == ConvDataType::BF16_BF16_BF16)
        {
247
            return profile(I2, NHWGC{}, GKYXC{}, NHWGK{}, BF16{}, BF16{}, BF16{}, BF16{}, BF16{});
248
249
250
        }
        else if(data_type == ConvDataType::INT8_INT8_INT8)
        {
251
            return profile(I2, NHWGC{}, GKYXC{}, NHWGK{}, INT8{}, INT8{}, INT8{}, INT8{}, INT8{});
252
253
        }
    }
Chao Liu's avatar
Chao Liu committed
254
    else if(num_dim_spatial == 3 && layout == ConvLayout::NHWGC_GKYXC_NHWGK)
255
256
257
    {
        if(data_type == ConvDataType::F32_F32_F32)
        {
258
            return profile(I3, NDHWGC{}, GKZYXC{}, NDHWGK{}, F32{}, F32{}, F32{}, F32{}, F32{});
259
260
261
        }
        else if(data_type == ConvDataType::F16_F16_F16)
        {
262
            return profile(I3, NDHWGC{}, GKZYXC{}, NDHWGK{}, F16{}, F16{}, F16{}, F16{}, F16{});
263
264
265
        }
        else if(data_type == ConvDataType::BF16_BF16_BF16)
        {
266
267
            return profile(
                I3, NDHWGC{}, GKZYXC{}, NDHWGK{}, BF16{}, BF16{}, BF16{}, BF16{}, BF16{});
268
269
270
        }
        else if(data_type == ConvDataType::INT8_INT8_INT8)
        {
271
272
            return profile(
                I3, NDHWGC{}, GKZYXC{}, NDHWGK{}, INT8{}, INT8{}, INT8{}, INT8{}, INT8{});
273
        }
274
275
        else if(data_type == ConvDataType::F8_F8_F8)
        {
276
            return profile(I3, NDHWGC{}, GKZYXC{}, NDHWGK{}, F8{}, F8{}, F8{}, F8{}, F8{});
277
        }
278
279
        else if(data_type == ConvDataType::BF8_BF8_F8)
        {
280
281
282
283
284
            return profile(I3, NDHWGC{}, GKZYXC{}, NDHWGK{}, BF8{}, BF8{}, F8{}, BF8{}, BF8{});
        }
        else if(data_type == ConvDataType::F8_BF8_F8)
        {
            return profile(I3, NDHWGC{}, GKZYXC{}, NDHWGK{}, F8{}, BF8{}, F8{}, F8{}, BF8{});
285
        }
286
287
288
289
        else if(data_type == ConvDataType::BF8_F8_F8)
        {
            return profile(I3, NDHWGC{}, GKZYXC{}, NDHWGK{}, BF8{}, F8{}, F8{}, BF8{}, F8{});
        }
290
291
292
293
294
295
    }

    std::cout << "this data_type & layout is not implemented" << std::endl;

    return 1;
}
296
297

REGISTER_PROFILER_OPERATION(OP_NAME, OP_DESC, profile_grouped_conv_fwd);