"python/sglang/srt/mem_cache/memory_pool.py" did not exist on "665815969a71a478b840999cb821054814a723fc"
convnd_fwd_xdl.cpp 13.6 KB
Newer Older
1
2
3
4
#include <cstdlib>
#include <iostream>
#include <numeric>
#include <type_traits>
5
6

#include "check_err.hpp"
7
#include "config.hpp"
Adam Osewski's avatar
Adam Osewski committed
8
#include "conv_util.hpp"
9
10
11
12
13
14
15
16
17
#include "device.hpp"
#include "device_tensor.hpp"
#include "device_convnd_fwd_xdl_nhwc_kyxc_nhwk.hpp"
#include "element_wise_operation.hpp"
#include "host_tensor.hpp"
#include "host_tensor_generator.hpp"
#include "reference_conv_fwd.hpp"
#include "tensor_layout.hpp"

18
19
namespace {

20
21
22
23
24
25
26
27
28
29
30
31
32
using InDataType  = float;
using WeiDataType = float;
using OutDataType = float;
using AccDataType = float;

template <ck::index_t... Is>
using S = ck::Sequence<Is...>;

using InElementOp  = ck::tensor_operation::element_wise::PassThrough;
using WeiElementOp = ck::tensor_operation::element_wise::PassThrough;
using OutElementOp = ck::tensor_operation::element_wise::PassThrough;

static constexpr auto ConvFwdDefault =
33
    ck::tensor_operation::device::ConvolutionForwardSpecialization::Default;
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86

using DeviceConvFwdBasePtr =
    ck::tensor_operation::device::DeviceConvFwdPtr<InElementOp, WeiElementOp, OutElementOp>;

template <ck::index_t NumDimSpatial>
using DeviceConvNDFwdInstance = ck::tensor_operation::device::
    DeviceConvNDFwdXdl_Input_N_Hi_Wi_C_Weight_K_Y_X_C_Output_N_Ho_Wo_K<
        // clang-format off
        InDataType,         // 
        WeiDataType,        //
        OutDataType,        //
        AccDataType,        // 
        InElementOp,        // Input Elementwise Operation
        WeiElementOp,       // Weights Elementwise Operation
        OutElementOp,       // Output Elementwise Operation
        ConvFwdDefault,     // ConvForwardSpecialization
        NumDimSpatial,      // NumDimSpatial
        256,                // BlockSize
        256,                // MPerBlock
        128,                // NPerBlock
        4,                  // K0PerBlock
        4,                  // K1
        32,                 // MPerXDL
        32,                 // NPerXDL
        4,                  // MXdlPerWave
        2,                  // NXdlPerWave
        S<4, 64, 1>,        // ABlockTransferThreadClusterLengths_K0_M_K1
        S<1, 0, 2>,         // ABlockTransferThreadClusterArrangeOrder
        S<1, 0, 2>,         // ABlockTransferSrcAccessOrder
        2,                  // ABlockTransferSrcVectorDim
        4,                  // ABlockTransferSrcScalarPerVector
        4,                  // ABlockTransferDstScalarPerVector_K1
        true,               // ABlockLdsAddExtraM
        S<4, 64, 1>,        // BBlockTransferThreadClusterLengths_K0_N_K1
        S<1, 0, 2>,         // BBlockTransferThreadClusterArrangeOrder
        S<1, 0, 2>,         // BBlockTransferSrcAccessOrder
        2,                  // BBlockTransferSrcVectorDim
        4,                  // BBlockTransferSrcScalarPerVector
        4,                  // BBlockTransferDstScalarPerVector_K1
        true,               // BBlockTransferAddExtraN
        7,                  // CThreadTransferSrcDstVectorDim
        1>;                 // CThreadTransferDstScalarPerVector
// clang-format on

template <ck::index_t NumDimSpatial>
using ReferenceConvNDFwdInstance = ck::tensor_operation::host::ReferenceConvFwd<InDataType,
                                                                                WeiDataType,
                                                                                OutDataType,
                                                                                InElementOp,
                                                                                WeiElementOp,
                                                                                OutElementOp,
                                                                                NumDimSpatial>;

87
DeviceConvFwdBasePtr get_conv_instance(int num_dim_spatial)
88
89
90
{
    switch(num_dim_spatial)
    {
91
92
93
    case 3: {
        return std::make_unique<DeviceConvNDFwdInstance<3>>();
    }
94
95
96
97
98
99
100
101
102
103
104
105
    case 2: {
        return std::make_unique<DeviceConvNDFwdInstance<2>>();
    }
    case 1: {
        return std::make_unique<DeviceConvNDFwdInstance<1>>();
    }
    default: {
        throw std::runtime_error("Unsupported number of spatial dimensions provided!");
    }
    }
}

106
void print_use_msg()
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
{
    std::cout << "arg1: verification (0=no, 1=yes)\n"
              << "arg2: initialization (0=no init, 1=integer value, 2=decimal value)\n"
              << "arg3: run kernel # of times (>1)\n"
              << "arg4: N spatial dimensions (default 2)\n"
              << "Following arguments (depending on number of spatial dims):\n"
              << " N, K, C, \n"
              << " <filter spatial dimensions>, (ie Y, X for 2D)\n"
              << " <input image spatial dimensions>, (ie Hi, Wi for 2D)\n"
              << " <strides>, (ie Sy, Sx for 2D)\n"
              << " <dilations>, (ie Dy, Dx for 2D)\n"
              << " <left padding>, (ie LeftPy, LeftPx for 2D)\n"
              << " <right padding>, (ie RightPy, RightPx for 2D)\n"
              << std::endl;
}

123
ck::utils::conv::ConvParams parse_conv_params(int num_dim_spatial, int argc, char* argv[])
124
125
126
127
128
129
{
    // (N, K, C) + num_dim_spatial * 6 (filter, input, strides, dilations, pad left, pad right)
    int conv_args     = 3 + num_dim_spatial * 6;
    int cmdline_nargs = conv_args + 5;
    if(cmdline_nargs != argc)
    {
130
        print_use_msg();
131
132
133
        exit(0);
    }

134
    ck::utils::conv::ConvParams params;
135
136
    int arg_idx = 5;

Adam Osewski's avatar
Adam Osewski committed
137
138
139
140
    params.num_dim_spatial_ = num_dim_spatial;
    params.N_               = std::stoi(argv[arg_idx++]);
    params.K_               = std::stoi(argv[arg_idx++]);
    params.C_               = std::stoi(argv[arg_idx++]);
141

Adam Osewski's avatar
Adam Osewski committed
142
    params.filter_spatial_lengths_.resize(num_dim_spatial);
143
144
    for(int i = 0; i < num_dim_spatial; ++i)
    {
Adam Osewski's avatar
Adam Osewski committed
145
        params.filter_spatial_lengths_[i] = std::stoi(argv[arg_idx++]);
146
    }
Adam Osewski's avatar
Adam Osewski committed
147
    params.input_spatial_lengths_.resize(num_dim_spatial);
148
149
    for(int i = 0; i < num_dim_spatial; ++i)
    {
Adam Osewski's avatar
Adam Osewski committed
150
        params.input_spatial_lengths_[i] = std::stoi(argv[arg_idx++]);
151
    }
Adam Osewski's avatar
Adam Osewski committed
152
    params.conv_filter_strides_.resize(num_dim_spatial);
153
154
    for(int i = 0; i < num_dim_spatial; ++i)
    {
Adam Osewski's avatar
Adam Osewski committed
155
        params.conv_filter_strides_[i] = std::stoi(argv[arg_idx++]);
156
    }
Adam Osewski's avatar
Adam Osewski committed
157
    params.conv_filter_dilations_.resize(num_dim_spatial);
158
159
    for(int i = 0; i < num_dim_spatial; ++i)
    {
Adam Osewski's avatar
Adam Osewski committed
160
        params.conv_filter_dilations_[i] = std::stoi(argv[arg_idx++]);
161
    }
Adam Osewski's avatar
Adam Osewski committed
162
    params.input_left_pads_.resize(num_dim_spatial);
163
164
    for(int i = 0; i < num_dim_spatial; ++i)
    {
Adam Osewski's avatar
Adam Osewski committed
165
        params.input_left_pads_[i] = std::stoi(argv[arg_idx++]);
166
    }
Adam Osewski's avatar
Adam Osewski committed
167
    params.input_right_pads_.resize(num_dim_spatial);
168
169
    for(int i = 0; i < num_dim_spatial; ++i)
    {
Adam Osewski's avatar
Adam Osewski committed
170
        params.input_right_pads_[i] = std::stoi(argv[arg_idx++]);
171
172
173
174
175
    }

    return params;
}

176
} // anonymous namespace
177
178
179

int main(int argc, char* argv[])
{
180
181
    using namespace ck::utils::conv;

182
183
184
185
186
    bool do_verification = 0;
    int init_method      = 0;
    int nrepeat          = 5;
    int num_dim_spatial  = 2;

187
    ck::utils::conv::ConvParams params;
188
189
190
191
192
193
194
195
196
197
198

    if(argc >= 5)
    {
        do_verification = std::stoi(argv[1]);
        init_method     = std::stoi(argv[2]);
        nrepeat         = std::stoi(argv[3]);
        num_dim_spatial = std::stoi(argv[4]);
    }

    if(argc >= 6)
    {
199
        params = parse_conv_params(num_dim_spatial, argc, argv);
200
201
    }

Adam Osewski's avatar
Adam Osewski committed
202
203
    std::vector<std::size_t> input_dims{static_cast<std::size_t>(params.N_),
                                        static_cast<std::size_t>(params.C_)};
204
    input_dims.insert(std::end(input_dims),
Adam Osewski's avatar
Adam Osewski committed
205
206
                      std::begin(params.input_spatial_lengths_),
                      std::end(params.input_spatial_lengths_));
207

Adam Osewski's avatar
Adam Osewski committed
208
209
    std::vector<std::size_t> filter_dims{static_cast<std::size_t>(params.K_),
                                         static_cast<std::size_t>(params.C_)};
210
    filter_dims.insert(std::end(filter_dims),
Adam Osewski's avatar
Adam Osewski committed
211
212
                       std::begin(params.filter_spatial_lengths_),
                       std::end(params.filter_spatial_lengths_));
213
214

    const std::vector<ck::index_t>& output_spatial_lengths = params.GetOutputSpatialLengths();
Adam Osewski's avatar
Adam Osewski committed
215
216
    std::vector<std::size_t> output_dims{static_cast<std::size_t>(params.N_),
                                         static_cast<std::size_t>(params.K_)};
217
218
219
220
    output_dims.insert(std::end(output_dims),
                       std::begin(output_spatial_lengths),
                       std::end(output_spatial_lengths));

221
222
223
224
225
226
    Tensor<InDataType> input(get_input_host_tensor_descriptor(input_dims, num_dim_spatial));
    Tensor<WeiDataType> weights(get_filters_host_tensor_descriptor(filter_dims, num_dim_spatial));
    Tensor<OutDataType> host_output(
        get_output_host_tensor_descriptor(output_dims, num_dim_spatial));
    Tensor<OutDataType> device_output(
        get_output_host_tensor_descriptor(output_dims, num_dim_spatial));
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251

    std::cout << "input: " << input.mDesc << std::endl;
    std::cout << "weights: " << weights.mDesc << std::endl;
    std::cout << "output: " << host_output.mDesc << std::endl;

    switch(init_method)
    {
    case 0: break;
    case 1:
        input.GenerateTensorValue(GeneratorTensor_2<InDataType>{-5, 5});
        weights.GenerateTensorValue(GeneratorTensor_2<WeiDataType>{-5, 5});
        break;
    default:
        input.GenerateTensorValue(GeneratorTensor_3<InDataType>{0.0, 1.0});
        weights.GenerateTensorValue(GeneratorTensor_3<WeiDataType>{-0.5, 0.5});
    }

    DeviceMem in_device_buf(sizeof(InDataType) * input.mDesc.GetElementSpace());
    DeviceMem wei_device_buf(sizeof(WeiDataType) * weights.mDesc.GetElementSpace());
    DeviceMem out_device_buf(sizeof(OutDataType) * device_output.mDesc.GetElementSpace());

    in_device_buf.ToDevice(input.mData.data());
    wei_device_buf.ToDevice(weights.mData.data());

    // do GEMM
252
    auto conv    = get_conv_instance(num_dim_spatial);
253
254
255
256
257
    auto invoker = conv->MakeInvokerPointer();
    auto argument =
        conv->MakeArgumentPointer(static_cast<InDataType*>(in_device_buf.GetDeviceBuffer()),
                                  static_cast<WeiDataType*>(wei_device_buf.GetDeviceBuffer()),
                                  static_cast<OutDataType*>(out_device_buf.GetDeviceBuffer()),
Adam Osewski's avatar
Adam Osewski committed
258
259
260
261
262
                                  params.N_,
                                  params.K_,
                                  params.C_,
                                  params.input_spatial_lengths_,
                                  params.filter_spatial_lengths_,
263
                                  output_spatial_lengths,
Adam Osewski's avatar
Adam Osewski committed
264
265
266
267
                                  params.conv_filter_strides_,
                                  params.conv_filter_dilations_,
                                  params.input_left_pads_,
                                  params.input_right_pads_,
268
269
270
271
272
273
274
275
276
277
278
279
280
                                  InElementOp{},
                                  WeiElementOp{},
                                  OutElementOp{});

    if(!conv->IsSupportedArgument(argument.get()))
    {
        throw std::runtime_error(
            "wrong! device_conv with the specified compilation parameters does "
            "not support this Conv problem");
    }

    float ave_time = invoker->Run(argument.get(), nrepeat);

281
    std::size_t flop = get_flops(
Adam Osewski's avatar
Adam Osewski committed
282
        params.N_, params.C_, params.K_, params.filter_spatial_lengths_, output_spatial_lengths);
283
    std::size_t num_btype =
Adam Osewski's avatar
Adam Osewski committed
284
285
286
287
288
        get_btype<InDataType, WeiDataType, OutDataType>(params.N_,
                                                        params.C_,
                                                        params.K_,
                                                        params.input_spatial_lengths_,
                                                        params.filter_spatial_lengths_,
289
                                                        output_spatial_lengths);
290
291
292
293
294
295
296
297
298
299
300
301
302
303

    float tflops     = static_cast<float>(flop) / 1.E9 / ave_time;
    float gb_per_sec = num_btype / 1.E6 / ave_time;
    std::cout << "Perf: " << ave_time << " ms, " << tflops << " TFlops, " << gb_per_sec << " GB/s"
              << std::endl;

    if(do_verification)
    {
        auto verify_f = [&input, &weights, &host_output, &params, &out_device_buf, &device_output](
                            const auto& ref_conv) {
            auto ref_invoker  = ref_conv.MakeInvoker();
            auto ref_argument = ref_conv.MakeArgument(input,
                                                      weights,
                                                      host_output,
Adam Osewski's avatar
Adam Osewski committed
304
305
306
307
                                                      params.conv_filter_strides_,
                                                      params.conv_filter_dilations_,
                                                      params.input_left_pads_,
                                                      params.input_right_pads_,
308
309
310
311
312
313
                                                      InElementOp{},
                                                      WeiElementOp{},
                                                      OutElementOp{});

            ref_invoker.Run(ref_argument);
            out_device_buf.FromDevice(device_output.mData.data());
314
315
            ck::utils::check_err(
                host_output.mData, device_output.mData, "Error: incorrect results!", 1e-5f, 1e-4f);
316
317
318
319
        };

        switch(num_dim_spatial)
        {
320
321
322
323
324
        case 3: {
            auto ref_conv = ReferenceConvNDFwdInstance<3>();
            verify_f(ref_conv);
            break;
        }
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
        case 2: {
            auto ref_conv = ReferenceConvNDFwdInstance<2>();
            verify_f(ref_conv);
            break;
        }
        case 1: {
            auto ref_conv = ReferenceConvNDFwdInstance<1>();
            verify_f(ref_conv);
            break;
        }
        default: {
            throw std::runtime_error("Unsupported number of spatial dimensions provided!");
        }
        }
    }
}