profile_batched_gemm_reduce.cpp 5.87 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
#include <iostream>
#include <numeric>
#include <initializer_list>
#include <cstdlib>
#include <stdlib.h>
#include <half.hpp>

#include "profile_batched_gemm_reduce_impl.hpp"

int profile_batched_gemm_reduce(int argc, char* argv[])
{
12
    enum struct GemmMatrixLayout
13
14
15
16
17
18
19
    {
        MK_KN_MN, // 0
        MK_NK_MN, // 1
        KM_KN_MN, // 2
        KM_NK_MN, // 3
    };

20
    enum struct GemmReduceDataType
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
    {
        F32_F32_F32_F32_F32, // 0
        F16_F16_F16_F32_F32, // 1
    };

    if(!(argc == 15 || argc == 16))
    {
        printf("arg1: tensor operation (batched_gemm: BatchedGEMM+Reduce)\n");
        printf("arg2: data type (0: fp32; 1: fp16)\n");
        printf("arg3: matrix layout (0: A[m, k] * B[k, n] = C[m, n];\n");
        printf("                     1: A[m, k] * B[n, k] = C[m, n];\n");
        printf("                     2: A[k, m] * B[k, n] = C[m, n];\n");
        printf("                     3: A[k, m] * B[n, k] = C[m, n])\n");
        printf("arg4: verification (0: no; 1: yes)\n");
        printf("arg5: initialization (0: no init; 1: integer value; 2: decimal value)\n");
JD's avatar
JD committed
36
37
        printf("arg6: print tensor value (0: no; 1: yes)\n");
        printf("arg7: time kernel (0=n0, 1=yes)\n");
38
39
40
41
42
        printf("arg8 to 14: M, N, K, StrideA, StrideB, StrideC, BatchCount\n");
        printf("arg15: split k into  mulitiple batch\n");
        exit(1);
    }

43
44
    const auto data_type       = static_cast<GemmReduceDataType>(std::stoi(argv[2]));
    const auto layout          = static_cast<GemmMatrixLayout>(std::stoi(argv[3]));
45
46
47
    const bool do_verification = std::stoi(argv[4]);
    const int init_method      = std::stoi(argv[5]);
    const bool do_log          = std::stoi(argv[6]);
JD's avatar
JD committed
48
    const bool time_kernel     = std::stoi(argv[7]);
49
50
51
52
53
54
55
56
57
58
59

    const int M = std::stoi(argv[8]);
    const int N = std::stoi(argv[9]);
    const int K = std::stoi(argv[10]);

    const int StrideA = std::stoi(argv[11]);
    const int StrideB = std::stoi(argv[12]);
    const int StrideC = std::stoi(argv[13]);

    const int BatchCount = std::stoi(argv[14]);

60
    if(data_type == GemmReduceDataType::F16_F16_F16_F32_F32 && layout == GemmMatrixLayout::MK_KN_MN)
61
62
63
64
65
66
67
68
69
70
71
    {
        ck::profiler::profile_batched_gemm_reduce_impl<ck::half_t,
                                                       ck::half_t,
                                                       ck::half_t,
                                                       float,
                                                       ck::tensor_layout::gemm::RowMajor,
                                                       ck::tensor_layout::gemm::RowMajor,
                                                       ck::tensor_layout::gemm::RowMajor>(
            do_verification,
            init_method,
            do_log,
JD's avatar
JD committed
72
            time_kernel,
73
74
75
76
77
78
79
80
            M,
            N,
            K,
            (StrideA < 0) ? K : StrideA,
            (StrideB < 0) ? N : StrideB,
            (StrideC < 0) ? N : StrideC,
            BatchCount);
    }
81
82
    else if(data_type == GemmReduceDataType::F16_F16_F16_F32_F32 &&
            layout == GemmMatrixLayout::MK_NK_MN)
83
84
85
86
87
88
89
90
91
92
93
    {
        ck::profiler::profile_batched_gemm_reduce_impl<ck::half_t,
                                                       ck::half_t,
                                                       ck::half_t,
                                                       float,
                                                       ck::tensor_layout::gemm::RowMajor,
                                                       ck::tensor_layout::gemm::ColumnMajor,
                                                       ck::tensor_layout::gemm::RowMajor>(
            do_verification,
            init_method,
            do_log,
JD's avatar
JD committed
94
            time_kernel,
95
96
97
98
99
100
101
102
            M,
            N,
            K,
            (StrideA < 0) ? K : StrideA,
            (StrideB < 0) ? K : StrideB,
            (StrideC < 0) ? N : StrideC,
            BatchCount);
    }
103
104
    else if(data_type == GemmReduceDataType::F16_F16_F16_F32_F32 &&
            layout == GemmMatrixLayout::KM_KN_MN)
105
106
107
108
109
110
111
112
113
114
115
    {
        ck::profiler::profile_batched_gemm_reduce_impl<ck::half_t,
                                                       ck::half_t,
                                                       ck::half_t,
                                                       float,
                                                       ck::tensor_layout::gemm::ColumnMajor,
                                                       ck::tensor_layout::gemm::RowMajor,
                                                       ck::tensor_layout::gemm::RowMajor>(
            do_verification,
            init_method,
            do_log,
JD's avatar
JD committed
116
            time_kernel,
117
118
119
120
121
122
123
124
            M,
            N,
            K,
            (StrideA < 0) ? M : StrideA,
            (StrideB < 0) ? N : StrideB,
            (StrideC < 0) ? N : StrideC,
            BatchCount);
    }
125
126
    else if(data_type == GemmReduceDataType::F16_F16_F16_F32_F32 &&
            layout == GemmMatrixLayout::KM_NK_MN)
127
128
129
130
131
132
133
134
135
136
137
    {
        ck::profiler::profile_batched_gemm_reduce_impl<ck::half_t,
                                                       ck::half_t,
                                                       ck::half_t,
                                                       float,
                                                       ck::tensor_layout::gemm::ColumnMajor,
                                                       ck::tensor_layout::gemm::ColumnMajor,
                                                       ck::tensor_layout::gemm::RowMajor>(
            do_verification,
            init_method,
            do_log,
JD's avatar
JD committed
138
            time_kernel,
139
140
141
142
143
144
145
146
147
148
149
150
151
            M,
            N,
            K,
            (StrideA < 0) ? M : StrideA,
            (StrideB < 0) ? K : StrideB,
            (StrideC < 0) ? N : StrideC,
            BatchCount);
    }
    else
    {
        throw std::runtime_error("wrong! this data_type & layout is not implemented");
    }

152
    return 0;
153
}