"vscode:/vscode.git/clone" did not exist on "bb3cce9a93d6c1d2a6f504afedec988e014587bf"
profile_gemm_bias_2d.cpp 9.6 KB
Newer Older
1
2
3
4
5
6
7
8
#include <iostream>
#include <numeric>
#include <initializer_list>
#include <cstdlib>
#include <stdlib.h>
#include <half.hpp>
#include "profile_gemm_bias_2d_impl.hpp"

Chao Liu's avatar
Chao Liu committed
9
enum struct GemmMatrixLayout
10
11
12
13
14
15
16
17
18
19
20
{
    MK_KN_MN, // 0
    MK_NK_MN, // 1
    KM_KN_MN, // 2
    KM_NK_MN, // 3
    MK_KN_NM, // 4
    MK_NK_NM, // 5
    KM_KN_NM, // 6
    KM_NK_NM, // 7
};

Chao Liu's avatar
Chao Liu committed
21
enum struct GemmDataType
22
23
24
25
26
27
28
29
30
{
    F32_F32_F32, // 0
    F16_F16_F16, // 1
};

int profile_gemm_bias_2d(int argc, char* argv[])
{
    if(!(argc == 16 || argc == 17))
    {
31
        printf("arg1: tensor operation (gemm: GEMM+Bias_2d)\n");
32
33
34
35
36
37
38
        printf("arg2: data type (0: fp32; 1: fp16)\n");
        printf("arg3: matrix layout (0: A[m, k] * B[k, n] = C[m, n];\n");
        printf("                     1: A[m, k] * B[n, k] = C[m, n];\n");
        printf("                     2: A[k, m] * B[k, n] = C[m, n];\n");
        printf("                     3: A[k, m] * B[n, k] = C[m, n])\n");
        printf("arg4: verification (0: no; 1: yes)\n");
        printf("arg5: initialization (0: no init; 1: integer value; 2: decimal value)\n");
JD's avatar
JD committed
39
40
        printf("arg6: print tensor value (0: no; 1: yes)\n");
        printf("arg7: time kernel (0=n0, 1=yes)\n");
41
42
43
44
45
46
47
        printf("arg8 to 13: M, N, K, StrideA, StrideB, StrideC\n");
        printf("arg14: alpha\n");
        printf("arg15: beta\n");
        printf("arg16: split k into  mulitiple batch\n");
        exit(1);
    }

Chao Liu's avatar
Chao Liu committed
48
49
    const auto data_type       = static_cast<GemmDataType>(std::stoi(argv[2]));
    const auto layout          = static_cast<GemmMatrixLayout>(std::stoi(argv[3]));
50
51
52
    const bool do_verification = std::stoi(argv[4]);
    const int init_method      = std::stoi(argv[5]);
    const bool do_log          = std::stoi(argv[6]);
JD's avatar
JD committed
53
    const bool time_kernel     = std::stoi(argv[7]);
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78

    const int M = std::stoi(argv[8]);
    const int N = std::stoi(argv[9]);
    const int K = std::stoi(argv[10]);

    const int StrideA = std::stoi(argv[11]);
    const int StrideB = std::stoi(argv[12]);
    const int StrideC = std::stoi(argv[13]);

    const float alpha = std::stof(argv[14]);
    const float beta  = std::stof(argv[15]);

    if(data_type == GemmDataType::F32_F32_F32 && layout == GemmMatrixLayout::MK_KN_MN)
    {
        ck::profiler::profile_gemm_bias_2d_impl<float,
                                                float,
                                                float,
                                                float,
                                                float,
                                                ck::tensor_layout::gemm::RowMajor,
                                                ck::tensor_layout::gemm::RowMajor,
                                                ck::tensor_layout::gemm::RowMajor>(
            do_verification,
            init_method,
            do_log,
JD's avatar
JD committed
79
            time_kernel,
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
            M,
            N,
            K,
            (StrideA < 0) ? K : StrideA,
            (StrideB < 0) ? N : StrideB,
            (StrideC < 0) ? N : StrideC,
            alpha,
            beta);
    }
    else if(data_type == GemmDataType::F32_F32_F32 && layout == GemmMatrixLayout::MK_NK_MN)
    {
        ck::profiler::profile_gemm_bias_2d_impl<float,
                                                float,
                                                float,
                                                float,
                                                float,
                                                ck::tensor_layout::gemm::RowMajor,
                                                ck::tensor_layout::gemm::ColumnMajor,
                                                ck::tensor_layout::gemm::RowMajor>(
            do_verification,
            init_method,
            do_log,
JD's avatar
JD committed
102
            time_kernel,
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
            M,
            N,
            K,
            (StrideA < 0) ? K : StrideA,
            (StrideB < 0) ? N : StrideB,
            (StrideC < 0) ? N : StrideC,
            alpha,
            beta);
    }
    else if(data_type == GemmDataType::F32_F32_F32 && layout == GemmMatrixLayout::KM_KN_MN)
    {
        ck::profiler::profile_gemm_bias_2d_impl<float,
                                                float,
                                                float,
                                                float,
                                                float,
                                                ck::tensor_layout::gemm::ColumnMajor,
                                                ck::tensor_layout::gemm::RowMajor,
                                                ck::tensor_layout::gemm::RowMajor>(
            do_verification,
            init_method,
            do_log,
JD's avatar
JD committed
125
            time_kernel,
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
            M,
            N,
            K,
            (StrideA < 0) ? K : StrideA,
            (StrideB < 0) ? N : StrideB,
            (StrideC < 0) ? N : StrideC,
            alpha,
            beta);
    }
    else if(data_type == GemmDataType::F32_F32_F32 && layout == GemmMatrixLayout::KM_NK_MN)
    {
        ck::profiler::profile_gemm_bias_2d_impl<float,
                                                float,
                                                float,
                                                float,
                                                float,
                                                ck::tensor_layout::gemm::ColumnMajor,
                                                ck::tensor_layout::gemm::ColumnMajor,
                                                ck::tensor_layout::gemm::RowMajor>(
            do_verification,
            init_method,
            do_log,
JD's avatar
JD committed
148
            time_kernel,
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
            M,
            N,
            K,
            (StrideA < 0) ? K : StrideA,
            (StrideB < 0) ? N : StrideB,
            (StrideC < 0) ? N : StrideC,
            alpha,
            beta);
    }
    else if(data_type == GemmDataType::F16_F16_F16 && layout == GemmMatrixLayout::MK_KN_MN)
    {
        ck::profiler::profile_gemm_bias_2d_impl<ck::half_t,
                                                ck::half_t,
                                                ck::half_t,
                                                ck::half_t,
                                                float,
                                                ck::tensor_layout::gemm::RowMajor,
                                                ck::tensor_layout::gemm::RowMajor,
                                                ck::tensor_layout::gemm::RowMajor>(
            do_verification,
            init_method,
            do_log,
JD's avatar
JD committed
171
            time_kernel,
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
            M,
            N,
            K,
            (StrideA < 0) ? K : StrideA,
            (StrideB < 0) ? N : StrideB,
            (StrideC < 0) ? N : StrideC,
            alpha,
            beta);
    }
    else if(data_type == GemmDataType::F16_F16_F16 && layout == GemmMatrixLayout::MK_NK_MN)
    {
        ck::profiler::profile_gemm_bias_2d_impl<ck::half_t,
                                                ck::half_t,
                                                ck::half_t,
                                                ck::half_t,
                                                float,
                                                ck::tensor_layout::gemm::RowMajor,
                                                ck::tensor_layout::gemm::ColumnMajor,
                                                ck::tensor_layout::gemm::RowMajor>(
            do_verification,
            init_method,
            do_log,
JD's avatar
JD committed
194
            time_kernel,
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
            M,
            N,
            K,
            (StrideA < 0) ? K : StrideA,
            (StrideB < 0) ? N : StrideB,
            (StrideC < 0) ? N : StrideC,
            alpha,
            beta);
    }
    else if(data_type == GemmDataType::F16_F16_F16 && layout == GemmMatrixLayout::KM_KN_MN)
    {
        ck::profiler::profile_gemm_bias_2d_impl<ck::half_t,
                                                ck::half_t,
                                                ck::half_t,
                                                ck::half_t,
                                                float,
                                                ck::tensor_layout::gemm::ColumnMajor,
                                                ck::tensor_layout::gemm::RowMajor,
                                                ck::tensor_layout::gemm::RowMajor>(
            do_verification,
            init_method,
            do_log,
JD's avatar
JD committed
217
            time_kernel,
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
            M,
            N,
            K,
            (StrideA < 0) ? K : StrideA,
            (StrideB < 0) ? N : StrideB,
            (StrideC < 0) ? N : StrideC,
            alpha,
            beta);
    }
    else if(data_type == GemmDataType::F16_F16_F16 && layout == GemmMatrixLayout::KM_NK_MN)
    {
        ck::profiler::profile_gemm_bias_2d_impl<ck::half_t,
                                                ck::half_t,
                                                ck::half_t,
                                                ck::half_t,
                                                float,
                                                ck::tensor_layout::gemm::ColumnMajor,
                                                ck::tensor_layout::gemm::ColumnMajor,
                                                ck::tensor_layout::gemm::RowMajor>(
            do_verification,
            init_method,
            do_log,
JD's avatar
JD committed
240
            time_kernel,
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
            M,
            N,
            K,
            (StrideA < 0) ? K : StrideA,
            (StrideB < 0) ? N : StrideB,
            (StrideC < 0) ? N : StrideC,
            alpha,
            beta);
    }
    else
    {
        throw std::runtime_error("wrong! this data_type & layout is not implemented");
    }

    return 1;
}