profile_gemm_universal.cpp 7.64 KB
Newer Older
1
// SPDX-License-Identifier: MIT
2
// Copyright (c) 2023-2024, Advanced Micro Devices, Inc. All rights reserved.
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28

#include <iostream>
#include <numeric>
#include <initializer_list>
#include <cstdlib>

#include "profiler/profile_gemm_universal_impl.hpp"
#include "profiler_operation_registry.hpp"

enum struct GemmMatrixLayout
{
    MK_KN_MN, // 0
    MK_NK_MN, // 1
    KM_KN_MN, // 2
    KM_NK_MN, // 3
};

enum struct GemmDataType
{
    F32_F32_F32,    // 0
    F16_F16_F16,    // 1
    BF16_BF16_BF16, // 2
    INT8_INT8_INT8, // 3
    F8_F16_F16,     // 4
    F16_F8_F16,     // 5
    F16_F16_F16_F8, // 6
29
    F8_F8_BF16,     // 7
30
31
32
33
34
35
36
};

#define OP_NAME "gemm_universal"
#define OP_DESC "Universal GEMM"

int profile_gemm_universal(int argc, char* argv[])
{
ltqin's avatar
ltqin committed
37
    if(argc != 15 && argc != 18)
38
39
    {
        printf("arg1: tensor operation (" OP_NAME ": " OP_DESC ")\n");
40
41
        printf("arg2: data type (0: fp32; 1: fp16; 2: bf16; 3: int8; 4: f8@f16; 5: f16@f8; 6: "
               "f16->f8; 7: f8->bf16, "
42
43
44
45
46
47
48
49
50
51
52
53
54
55
               "comp f8)\n");
        printf("arg3: matrix layout (0: A[m, k] * B[k, n] = C[m, n];\n");
        printf("                     1: A[m, k] * B[n, k] = C[m, n];\n");
        printf("                     2: A[k, m] * B[k, n] = C[m, n];\n");
        printf("                     3: A[k, m] * B[n, k] = C[m, n])\n");
        printf("arg4: verification (0: no; 1: yes)\n");
        printf("arg5: initialization (0: no init; 1: integer value; 2: decimal value)\n");
        printf("arg6: print tensor value (0: no; 1: yes)\n");
        printf("arg7: time kernel (0=no, 1=yes)\n");
        printf("arg8 to 13: M, N, K, StrideA, StrideB, StrideC\n");
        printf("arg14: split k into  mulitiple batch\n");
        printf("optional:\n");
        printf("arg15: number of warm-up cycles (default 1)\n");
        printf("arg16: number of iterations (default 10)\n");
ltqin's avatar
ltqin committed
56
        printf("arg17: memory for rotating buffer (default 0, size in MB)\n");
57
58
59
        exit(1);
    }

60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
    int M;
    int N;
    int StrideA;
    int StrideB;
    // Analyze the unsupported matrix shapes, switch the M and N number
    if(std::stoi(argv[9]) % 8 != 0 && std::stoi(argv[8]) % 8 == 0)
    {
        M       = std::stoi(argv[9]);
        StrideA = std::stoi(argv[12]);
        N       = std::stoi(argv[8]);
        StrideB = std::stoi(argv[11]);
    }
    else
    {
        M       = std::stoi(argv[8]);
        StrideA = std::stoi(argv[11]);
        N       = std::stoi(argv[9]);
        StrideB = std::stoi(argv[12]);
    }
79
80
81
82
83
84
85
86
87
88
89
90
    const auto data_type       = static_cast<GemmDataType>(std::stoi(argv[2]));
    const auto layout          = static_cast<GemmMatrixLayout>(std::stoi(argv[3]));
    const bool do_verification = std::stoi(argv[4]);
    const int init_method      = std::stoi(argv[5]);
    const bool do_log          = std::stoi(argv[6]);
    const bool time_kernel     = std::stoi(argv[7]);

    const int K = std::stoi(argv[10]);

    const int StrideC = std::stoi(argv[13]);
    const int KBatch  = std::stoi(argv[14]);

ltqin's avatar
ltqin committed
91
92
93
94
    int n_warmup      = 1;
    int n_iter        = 10;
    uint64_t rotating = 0;
    if(argc == 18)
95
96
97
    {
        n_warmup = std::stoi(argv[15]);
        n_iter   = std::stoi(argv[16]);
ltqin's avatar
ltqin committed
98
        rotating = std::stoull(argv[17]) * 1024 * 1024;
99
100
    }

101
102
103
104
    using F32  = float;
    using F16  = ck::half_t;
    using BF16 = ck::bhalf_t;
    using F8   = ck::f8_t;
105
106
107
108
109
110

    using Row = ck::tensor_layout::gemm::RowMajor;
    using Col = ck::tensor_layout::gemm::ColumnMajor;

    auto profile = [&](auto a_type,
                       auto b_type,
111
                       auto comp_type,
112
113
114
115
116
                       auto acc_type,
                       auto c_type,
                       auto a_layout,
                       auto b_layout,
                       auto c_layout) {
117
118
119
120
121
        using ADataType       = decltype(a_type);
        using BDataType       = decltype(b_type);
        using ComputeDataType = decltype(comp_type);
        using AccDataType     = decltype(acc_type);
        using CDataType       = decltype(c_type);
122
123
124
125
126
127
128
129
130
131
132

        using ALayout = decltype(a_layout);
        using BLayout = decltype(b_layout);
        using CLayout = decltype(c_layout);

        const int DefaultStrideA = ck::is_same_v<ALayout, Row> ? K : M;
        const int DefaultStrideB = ck::is_same_v<BLayout, Row> ? N : K;
        const int DefaultStrideC = ck::is_same_v<CLayout, Row> ? N : M;

        bool pass = ck::profiler::profile_gemm_universal_impl<ADataType,
                                                              BDataType,
133
                                                              ComputeDataType,
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
                                                              AccDataType,
                                                              CDataType,
                                                              ALayout,
                                                              BLayout,
                                                              CLayout>(
            do_verification,
            init_method,
            do_log,
            time_kernel,
            M,
            N,
            K,
            (StrideA < 0) ? DefaultStrideA : StrideA,
            (StrideB < 0) ? DefaultStrideB : StrideB,
            (StrideC < 0) ? DefaultStrideC : StrideC,
            KBatch,
            n_warmup,
ltqin's avatar
ltqin committed
151
152
            n_iter,
            rotating);
153
154
155
156
157
158

        return pass ? 0 : 1;
    };

    if(data_type == GemmDataType::F16_F16_F16 && layout == GemmMatrixLayout::MK_KN_MN)
    {
159
        return profile(F16{}, F16{}, F16{}, F32{}, F16{}, Row{}, Row{}, Row{});
160
161
162
    }
    else if(data_type == GemmDataType::F16_F16_F16 && layout == GemmMatrixLayout::MK_NK_MN)
    {
163
        return profile(F16{}, F16{}, F16{}, F32{}, F16{}, Row{}, Col{}, Row{});
164
165
166
    }
    else if(data_type == GemmDataType::F16_F8_F16 && layout == GemmMatrixLayout::MK_KN_MN)
    {
167
        return profile(F16{}, F8{}, F16{}, F32{}, F16{}, Row{}, Row{}, Row{});
168
169
170
    }
    else if(data_type == GemmDataType::F16_F8_F16 && layout == GemmMatrixLayout::MK_NK_MN)
    {
171
        return profile(F16{}, F8{}, F16{}, F32{}, F16{}, Row{}, Col{}, Row{});
172
173
174
    }
    else if(data_type == GemmDataType::F8_F16_F16 && layout == GemmMatrixLayout::MK_KN_MN)
    {
175
        return profile(F8{}, F16{}, F16{}, F32{}, F16{}, Row{}, Row{}, Row{});
176
177
178
    }
    else if(data_type == GemmDataType::F8_F16_F16 && layout == GemmMatrixLayout::MK_NK_MN)
    {
179
        return profile(F8{}, F16{}, F16{}, F32{}, F16{}, Row{}, Col{}, Row{});
180
    }
181
182
    else if(data_type == GemmDataType::BF16_BF16_BF16 && layout == GemmMatrixLayout::MK_KN_MN)
    {
183
        return profile(BF16{}, BF16{}, BF16{}, F32{}, BF16{}, Row{}, Row{}, Row{});
184
185
186
    }
    else if(data_type == GemmDataType::BF16_BF16_BF16 && layout == GemmMatrixLayout::MK_NK_MN)
    {
187
188
        return profile(BF16{}, BF16{}, BF16{}, F32{}, BF16{}, Row{}, Col{}, Row{});
    }
189
190
191
192
193
194
195
196
    else if(data_type == GemmDataType::BF16_BF16_BF16 && layout == GemmMatrixLayout::KM_NK_MN)
    {
        return profile(BF16{}, BF16{}, BF16{}, F32{}, BF16{}, Col{}, Col{}, Row{});
    }
    else if(data_type == GemmDataType::BF16_BF16_BF16 && layout == GemmMatrixLayout::KM_KN_MN)
    {
        return profile(BF16{}, BF16{}, BF16{}, F32{}, BF16{}, Col{}, Row{}, Row{});
    }
197
198
199
200
    else if(data_type == GemmDataType::F8_F8_BF16 && layout == GemmMatrixLayout::MK_KN_MN)
    {
        return profile(F8{}, F8{}, F8{}, F32{}, BF16{}, Row{}, Row{}, Row{});
    }
201
202
203
    else if(data_type == GemmDataType::F8_F8_BF16 && layout == GemmMatrixLayout::MK_NK_MN)
    {
        return profile(F8{}, F8{}, F8{}, F32{}, BF16{}, Row{}, Col{}, Row{});
204
    }
205
206
207
208
209
210
211
212
213
    else
    {
        std::cout << "this data_type & layout is not implemented" << std::endl;

        return 1;
    }
}

REGISTER_PROFILER_OPERATION(OP_NAME, OP_DESC, profile_gemm_universal);