profile_gemm_impl.hpp 9.71 KB
Newer Older
1
2
#pragma once
#include "device_gemm_instance.hpp"
ltqin's avatar
ltqin committed
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76

namespace ck {
namespace tensor_operation {
namespace device {
namespace device_gemm_instance {

template <>
void add_device_gemm_instance<float,
                              float,
                              float,
                              ck::tensor_layout::gemm::RowMajor,
                              ck::tensor_layout::gemm::RowMajor,
                              ck::tensor_layout::gemm::RowMajor>(std::vector<DeviceGemmNoOpPtr>&);

template <>
void add_device_gemm_instance<float,
                              float,
                              float,
                              ck::tensor_layout::gemm::RowMajor,
                              ck::tensor_layout::gemm::ColumnMajor,
                              ck::tensor_layout::gemm::RowMajor>(std::vector<DeviceGemmNoOpPtr>&);

template <>
void add_device_gemm_instance<float,
                              float,
                              float,
                              ck::tensor_layout::gemm::ColumnMajor,
                              ck::tensor_layout::gemm::RowMajor,
                              ck::tensor_layout::gemm::RowMajor>(std::vector<DeviceGemmNoOpPtr>&);

template <>
void add_device_gemm_instance<float,
                              float,
                              float,
                              ck::tensor_layout::gemm::ColumnMajor,
                              ck::tensor_layout::gemm::ColumnMajor,
                              ck::tensor_layout::gemm::RowMajor>(std::vector<DeviceGemmNoOpPtr>&);

template <>
void add_device_gemm_instance<ck::half_t,
                              ck::half_t,
                              ck::half_t,
                              ck::tensor_layout::gemm::RowMajor,
                              ck::tensor_layout::gemm::RowMajor,
                              ck::tensor_layout::gemm::RowMajor>(std::vector<DeviceGemmNoOpPtr>&);

template <>
void add_device_gemm_instance<ck::half_t,
                              ck::half_t,
                              ck::half_t,
                              ck::tensor_layout::gemm::RowMajor,
                              ck::tensor_layout::gemm::ColumnMajor,
                              ck::tensor_layout::gemm::RowMajor>(std::vector<DeviceGemmNoOpPtr>&);

template <>
void add_device_gemm_instance<ck::half_t,
                              ck::half_t,
                              ck::half_t,
                              ck::tensor_layout::gemm::ColumnMajor,
                              ck::tensor_layout::gemm::RowMajor,
                              ck::tensor_layout::gemm::RowMajor>(std::vector<DeviceGemmNoOpPtr>&);

template <>
void add_device_gemm_instance<ck::half_t,
                              ck::half_t,
                              ck::half_t,
                              ck::tensor_layout::gemm::ColumnMajor,
                              ck::tensor_layout::gemm::ColumnMajor,
                              ck::tensor_layout::gemm::RowMajor>(std::vector<DeviceGemmNoOpPtr>&);

} // namespace device_gemm_instance
} // namespace device
} // namespace tensor_operation
} // namespace ck
77
78
79
80
81
82
83
84
85
86

namespace ck {
namespace profiler {

template <typename ADataType,
          typename BDataType,
          typename CDataType,
          typename ALayout,
          typename BLayout,
          typename CLayout>
Chao Liu's avatar
Chao Liu committed
87
88
89
90
91
92
93
94
95
96
void profile_gemm_impl(int do_verification,
                       int init_method,
                       bool do_log,
                       int nrepeat,
                       int M,
                       int N,
                       int K,
                       int StrideA,
                       int StrideB,
                       int StrideC)
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
{
    auto f_host_tensor_descriptor =
        [](std::size_t row, std::size_t col, std::size_t stride, auto layout) {
            if(is_same<decltype(layout), tensor_layout::gemm::RowMajor>::value)
            {
                return HostTensorDescriptor(std::vector<std::size_t>({row, col}),
                                            std::vector<std::size_t>({stride, 1}));
            }
            else
            {
                return HostTensorDescriptor(std::vector<std::size_t>({row, col}),
                                            std::vector<std::size_t>({1, stride}));
            }
        };

    Tensor<ADataType> a_m_k(f_host_tensor_descriptor(M, K, StrideA, ALayout{}));
    Tensor<BDataType> b_k_n(f_host_tensor_descriptor(K, N, StrideB, BLayout{}));
    Tensor<CDataType> c_m_n_host_result(f_host_tensor_descriptor(M, N, StrideC, CLayout{}));
    Tensor<CDataType> c_m_n_device_result(f_host_tensor_descriptor(M, N, StrideC, CLayout{}));

    std::cout << "a_m_k: " << a_m_k.mDesc << std::endl;
    std::cout << "b_k_n: " << b_k_n.mDesc << std::endl;
    std::cout << "c_m_n: " << c_m_n_host_result.mDesc << std::endl;

ltqin's avatar
ltqin committed
121
    std::size_t num_thread = std::thread::hardware_concurrency();
122
123
124
125
    switch(init_method)
    {
    case 0: break;
    case 1:
ltqin's avatar
ltqin committed
126
127
        a_m_k.GenerateTensorValue(GeneratorTensor_2<ADataType>{-5, 5}, num_thread);
        b_k_n.GenerateTensorValue(GeneratorTensor_2<BDataType>{-5, 5}, num_thread);
128
129
        break;
    default:
ltqin's avatar
ltqin committed
130
131
        a_m_k.GenerateTensorValue(GeneratorTensor_3<ADataType>{0.0, 1.0}, num_thread);
        b_k_n.GenerateTensorValue(GeneratorTensor_3<BDataType>{-0.5, 0.5}, num_thread);
132
    }
ltqin's avatar
ltqin committed
133
134
    // set zero to c_device_buf
    c_m_n_device_result.GenerateTensorValue(GeneratorTensor_0<CDataType>{}, num_thread);
135
136
137

    if(do_verification)
    {
Chao Liu's avatar
Chao Liu committed
138
139
140
141
142
143
        host_gemm_mk_kn_mn(a_m_k,
                           b_k_n,
                           c_m_n_host_result,
                           ck::tensor_operation::element_wise::PassThrough{},
                           ck::tensor_operation::element_wise::PassThrough{},
                           ck::tensor_operation::element_wise::PassThrough{});
144
145
146
147
148
149
150
151
152
153
154
    }

    DeviceMem a_device_buf(sizeof(ADataType) * a_m_k.mDesc.GetElementSpace());
    DeviceMem b_device_buf(sizeof(BDataType) * b_k_n.mDesc.GetElementSpace());
    DeviceMem c_device_buf(sizeof(CDataType) * c_m_n_device_result.mDesc.GetElementSpace());

    a_device_buf.ToDevice(a_m_k.mData.data());
    b_device_buf.ToDevice(b_k_n.mData.data());
    c_device_buf.ToDevice(c_m_n_device_result.mData.data());

    // add device GEMM instances
Chao Liu's avatar
Chao Liu committed
155
    std::vector<ck::tensor_operation::device::device_gemm_instance::DeviceGemmNoOpPtr> gemm_ptrs;
156
157
158
159
160
161
162
163
164
165

    ck::tensor_operation::device::device_gemm_instance::
        add_device_gemm_instance<ADataType, BDataType, CDataType, ALayout, BLayout, CLayout>(
            gemm_ptrs);

    if(gemm_ptrs.size() <= 0)
    {
        throw std::runtime_error("wrong! no device GEMM instance found");
    }

Chao Liu's avatar
Chao Liu committed
166
    std::string best_gemm_name;
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
    float best_ave_time   = 0;
    float best_tflops     = 0;
    float best_gb_per_sec = 0;

    // profile device GEMM instances
    for(auto& gemm_ptr : gemm_ptrs)
    {
        auto argument_ptr =
            gemm_ptr->MakeArgumentPointer(static_cast<ADataType*>(a_device_buf.GetDeviceBuffer()),
                                          static_cast<BDataType*>(b_device_buf.GetDeviceBuffer()),
                                          static_cast<CDataType*>(c_device_buf.GetDeviceBuffer()),
                                          M,
                                          N,
                                          K,
                                          StrideA,
                                          StrideB,
Chao Liu's avatar
Chao Liu committed
183
184
185
186
                                          StrideC,
                                          ck::tensor_operation::element_wise::PassThrough{},
                                          ck::tensor_operation::element_wise::PassThrough{},
                                          ck::tensor_operation::element_wise::PassThrough{});
187
188
189
190
191

        auto invoker_ptr = gemm_ptr->MakeInvokerPointer();

        if(gemm_ptr->IsSupportedArgument(argument_ptr.get()))
        {
Chao Liu's avatar
Chao Liu committed
192
193
            std::string gemm_name = gemm_ptr->GetTypeString();

194
195
196
            float ave_time = invoker_ptr->Run(argument_ptr.get(), nrepeat);

            std::size_t flop = std::size_t(2) * M * N * K;
Chao Liu's avatar
Chao Liu committed
197

198
199
200
201
202
203
204
205
            std::size_t num_btype =
                sizeof(ADataType) * M * K + sizeof(BDataType) * K * M + sizeof(CDataType) * M * N;

            float tflops = static_cast<float>(flop) / 1.E9 / ave_time;

            float gb_per_sec = num_btype / 1.E6 / ave_time;

            std::cout << "Perf: " << ave_time << " ms, " << tflops << " TFlops, " << gb_per_sec
Chao Liu's avatar
Chao Liu committed
206
                      << " GB/s, " << gemm_name << std::endl;
207
208
209

            if(tflops > best_tflops)
            {
Chao Liu's avatar
Chao Liu committed
210
                best_gemm_name  = gemm_name;
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
                best_tflops     = tflops;
                best_ave_time   = ave_time;
                best_gb_per_sec = gb_per_sec;
            }

            if(do_verification)
            {
                c_device_buf.FromDevice(c_m_n_device_result.mData.data());

                check_error(c_m_n_host_result, c_m_n_device_result);

                if(do_log)
                {
                    LogRangeAsType<float>(std::cout << "a : ", a_m_k.mData, ",") << std::endl;
                    LogRangeAsType<float>(std::cout << "b: ", b_k_n.mData, ",") << std::endl;
                    LogRangeAsType<float>(std::cout << "c_host  : ", c_m_n_host_result.mData, ",")
                        << std::endl;
                    LogRangeAsType<float>(std::cout << "c_device: ", c_m_n_device_result.mData, ",")
                        << std::endl;
                }
            }
        }
        else
        {
            std::cout << "this device GEMM instance does not support this GEMM problem"
                      << std::endl;
        }
    }

    std::cout << "Best Perf: " << best_ave_time << " ms, " << best_tflops << " TFlops, "
Chao Liu's avatar
Chao Liu committed
241
              << best_gb_per_sec << " GB/s, " << best_gemm_name << std::endl;
242
243
244
245
}

} // namespace profiler
} // namespace ck