lowering.cpp 17.3 KB
Newer Older
Shucai Xiao's avatar
Shucai Xiao committed
1
#include <iterator>
Paul's avatar
Paul committed
2
3
4
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/instruction.hpp>
5
6
7
8
9
10
11
12
#include <migraphx/make_op.hpp>

#include <migraphx/op/abs.hpp>
#include <migraphx/op/batch_norm_inference.hpp>
#include <migraphx/op/convolution.hpp>
#include <migraphx/op/deconvolution.hpp>
#include <migraphx/op/dot.hpp>
#include <migraphx/op/elu.hpp>
Shucai Xiao's avatar
Shucai Xiao committed
13
#include <migraphx/op/if_op.hpp>
14
15
16
17
18
19
20
21
22
#include <migraphx/op/leaky_relu.hpp>
#include <migraphx/op/lrn.hpp>
#include <migraphx/op/pooling.hpp>
#include <migraphx/op/reshape.hpp>
#include <migraphx/op/quant_convolution.hpp>
#include <migraphx/op/quant_dot.hpp>

#include <migraphx/gpu/abs.hpp>
#include <migraphx/gpu/batch_norm_inference.hpp>
Paul's avatar
Paul committed
23
24
#include <migraphx/gpu/context.hpp>
#include <migraphx/gpu/convolution.hpp>
kahmed10's avatar
kahmed10 committed
25
#include <migraphx/gpu/deconvolution.hpp>
26
#include <migraphx/gpu/device_name.hpp>
Khalique's avatar
Khalique committed
27
#include <migraphx/gpu/elu.hpp>
28
#include <migraphx/gpu/equal.hpp>
Paul's avatar
Paul committed
29
#include <migraphx/gpu/gemm.hpp>
30
#include <migraphx/gpu/greater.hpp>
31
#include <migraphx/gpu/int8_conv_pack.hpp>
32
#include <migraphx/gpu/leaky_relu.hpp>
33
#include <migraphx/gpu/less.hpp>
Shucai Xiao's avatar
Shucai Xiao committed
34
35
36
#include <migraphx/gpu/logical_and.hpp>
#include <migraphx/gpu/logical_or.hpp>
#include <migraphx/gpu/logical_xor.hpp>
37
38
39
40
#include <migraphx/gpu/lrn.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <migraphx/gpu/quant_convolution.hpp>
#include <migraphx/gpu/rocblas.hpp>
41
#include <migraphx/gpu/unary_not.hpp>
turneram's avatar
turneram committed
42
#include <migraphx/gpu/where.hpp>
43
#include <migraphx/gpu/compiler.hpp>
44
#include <migraphx/iterator_for.hpp>
45
#include <migraphx/program.hpp>
Paul's avatar
Paul committed
46
#include <utility>
47
#include <functional>
Khalique's avatar
Khalique committed
48
#include <algorithm>
Shucai Xiao's avatar
Shucai Xiao committed
49
#include <map>
Paul's avatar
Paul committed
50

Paul's avatar
Paul committed
51
namespace migraphx {
Paul's avatar
Paul committed
52
inline namespace MIGRAPHX_INLINE_NS {
Paul's avatar
Paul committed
53
namespace gpu {
Paul's avatar
Paul committed
54
55
56

struct miopen_apply
{
Shucai Xiao's avatar
Shucai Xiao committed
57
    module* mod          = nullptr;
58
    const lowering* pass = nullptr;
Shucai Xiao's avatar
Shucai Xiao committed
59
    std::unordered_map<std::string, std::function<instruction_ref(instruction_ref)>> apply_map{};
Shucai Xiao's avatar
Shucai Xiao committed
60
    instruction_ref last{};
Shucai Xiao's avatar
Shucai Xiao committed
61
62
    bool offload_copy   = false;
    bool int8_x4_format = true;
63
    bool compute_fp32   = false;
Paul's avatar
Paul committed
64

65
    context& get_context() const
66
67
68
69
70
71
    {
        assert(pass != nullptr);
        assert(pass->ctx != nullptr);
        return *pass->ctx;
    }

Paul's avatar
Paul committed
72
73
74
75
76
77
78
    void check_shape(shape x, instruction_ref i)
    {
        assert(x == i->get_shape());
        (void)x;
        (void)i;
    }

79
80
81
82
83
84
    const std::unordered_set<std::string>& get_rocblas_fp32_archs()
    {
        static std::unordered_set<std::string> supported_archs{"gfx908", "gfx90a"};
        return supported_archs;
    }

85
86
    void init()
    {
Shucai Xiao's avatar
Shucai Xiao committed
87
        assert(mod != nullptr);
88
        assert(pass != nullptr);
89

Shucai Xiao's avatar
Shucai Xiao committed
90
#if ROCBLAS_VERSION_MAJOR >= 2 && ROCBLAS_VERSION_MINOR >= 38
91
92
93
94
        auto& ctx              = get_context();
        const auto device_name = trim(split_string(get_device_name(), ':').front());
        if(contains(get_rocblas_fp32_archs(), device_name))
            compute_fp32 = true;
Shucai Xiao's avatar
Shucai Xiao committed
95
96
97
98
99
        rocblas_gemm_flags flag;
        rocblas_query_int8_layout_flag(ctx.get_stream().get_rocblas(), &flag);
        int8_x4_format = (flag == rocblas_gemm_flags_pack_int8x4);
#endif

Shucai Xiao's avatar
Shucai Xiao committed
100
        offload_copy = (mod->name() == "main") ? pass->offload_copy : false;
Paul's avatar
Paul committed
101

102
103
104
105
106
107
108
109
110
111
112
113
        add_generic_op("acos");
        add_generic_op("acosh");
        add_generic_op("add");
        add_generic_op("asin");
        add_generic_op("asinh");
        add_generic_op("atan");
        add_generic_op("atanh");
        add_generic_op("ceil");
        add_generic_op("contiguous");
        add_generic_op("cos");
        add_generic_op("cosh");
        add_generic_op("div");
114
        add_generic_op("equal");
115
116
117
        add_generic_op("erf");
        add_generic_op("exp");
        add_generic_op("floor");
118
119
        add_generic_op("greater");
        add_generic_op("less");
120
        add_generic_op("log");
Shucai Xiao's avatar
Shucai Xiao committed
121
122
123
        add_generic_op("logical_and");
        add_generic_op("logical_or");
        add_generic_op("logical_xor");
124
125
126
        add_generic_op("max");
        add_generic_op("min");
        add_generic_op("mul");
127
        add_generic_op("not");
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
        add_generic_op("pow");
        add_generic_op("prelu");
        add_generic_op("recip");
        add_generic_op("relu");
        add_generic_op("round");
        add_generic_op("rsqrt");
        add_generic_op("sigmoid");
        add_generic_op("sign");
        add_generic_op("sin");
        add_generic_op("sinh");
        add_generic_op("sqdiff");
        add_generic_op("sqrt");
        add_generic_op("sub");
        add_generic_op("tan");
        add_generic_op("tanh");
turneram's avatar
turneram committed
143
        add_generic_op("where");
144

Shucai Xiao's avatar
Shucai Xiao committed
145
        add_extend_op("abs");
146
147
148
149
150
        add_extend_op("argmax");
        add_extend_op("argmin");
        add_extend_op("clip");
        add_extend_op("concat");
        add_extend_op("convert");
Shucai Xiao's avatar
Shucai Xiao committed
151
        add_extend_op("elu");
152
        add_extend_op("gather");
Shucai Xiao's avatar
Shucai Xiao committed
153
        add_extend_op("leaky_relu");
154
        add_extend_op("logsoftmax");
Shucai Xiao's avatar
Shucai Xiao committed
155
        add_extend_op("lrn");
turneram's avatar
turneram committed
156
        add_extend_op("multinomial");
Shucai Xiao's avatar
Shucai Xiao committed
157
        add_extend_op("nonzero");
158
        add_extend_op("pad");
159
        add_extend_op("pooling");
160
        add_extend_op("prefix_scan_sum");
Cagri Eryilmaz's avatar
Cagri Eryilmaz committed
161
        add_extend_op("reverse");
162
163
164
        add_extend_op("rnn_var_sl_last_output");
        add_extend_op("rnn_var_sl_shift_output");
        add_extend_op("rnn_var_sl_shift_sequence");
165
        add_extend_op("scatter_none");
Shucai Xiao's avatar
Shucai Xiao committed
166
        add_extend_op("topk");
167

Shucai Xiao's avatar
Shucai Xiao committed
168
        add_batch_norm_inference_op();
169
        add_convolution_op();
kahmed10's avatar
kahmed10 committed
170
        add_deconvolution_op();
Shucai Xiao's avatar
Shucai Xiao committed
171
172
        add_gemm_op<op::dot>("dot");
        add_gemm_op<op::quant_dot>("quant_dot");
Shucai Xiao's avatar
Shucai Xiao committed
173
        add_if_op();
Shucai Xiao's avatar
Shucai Xiao committed
174
        add_loop_op();
Shucai Xiao's avatar
Shucai Xiao committed
175
        add_neg_op();
176
        add_nms_op();
Shucai Xiao's avatar
Shucai Xiao committed
177
        add_quant_convolution_op();
178
179
    }

180
    void copy_params() const
181
    {
Shucai Xiao's avatar
Shucai Xiao committed
182
        if(not offload_copy)
183
            return;
184

Shucai Xiao's avatar
Shucai Xiao committed
185
        for(auto ins : iterator_for(*mod))
186
187
188
        {
            if(ins->name() != "@param")
                continue;
189

Shucai Xiao's avatar
Shucai Xiao committed
190
191
192
193
            // parameter no outputs, no need to insert copy to gpu
            if(ins->outputs().empty())
                continue;

194
195
            auto pos = std::next(ins);
            auto a   = insert_allocation(pos, ins->get_shape());
196
            auto c   = mod->insert_instruction(pos, make_op("hip::copy_to_gpu"), ins, a);
Shucai Xiao's avatar
Shucai Xiao committed
197
            mod->replace_instruction(ins, c);
198
        }
199
200

        // return instruction
Shucai Xiao's avatar
Shucai Xiao committed
201
        auto ret = std::prev(mod->end());
202
203
        if(ret->name() == "@return")
        {
204
            const auto& inputs = ret->inputs();
205
206
207

            // each input of ret need to be copied from gpu to host, and replace
            // output with copy output
208
            for(const auto& in : inputs)
209
            {
210
                auto p_output = mod->insert_instruction(ret, make_op("hip::copy_from_gpu"), in);
211
212
213
214
215
216
                instruction::replace_argument(ret, in, p_output);
            }
        }
        // else branch to handle legacy program without the return instruction
        else
        {
217
            mod->add_instruction(make_op("hip::copy_from_gpu"), ret);
218
        }
219
220
    }

Paul's avatar
Paul committed
221
222
    void apply()
    {
223
        init();
Shucai Xiao's avatar
Shucai Xiao committed
224
        for(auto it = mod->begin(); it != mod->end(); it++)
Paul's avatar
Paul committed
225
        {
Paul's avatar
Paul committed
226
            auto s = it->get_shape();
227
            if(apply_map.count(it->name()) > 0)
228
            {
229
                check_shape(s, apply_map.at(it->name())(it));
Paul's avatar
Paul committed
230
            }
231
232
233
234
            else if(has_compiler_for(it->name()))
            {
                check_shape(s, insert_precompile_op(it));
            }
Paul's avatar
Paul committed
235
        }
236

237
        copy_params();
Paul's avatar
Paul committed
238
239
    }

240
    instruction_ref insert_precompile_op(instruction_ref ins) const
241
242
243
244
245
246
247
248
249
250
251
252
    {
        auto output                       = insert_allocation(ins, ins->get_shape());
        std::vector<instruction_ref> refs = ins->inputs();
        refs.push_back(output);

        return mod->replace_instruction(
            ins,
            make_op("gpu::precompile_op", {{"op", to_value(ins->get_operator())}}),
            refs,
            ins->module_inputs());
    }

253
    instruction_ref insert_allocation(instruction_ref ins, const shape& s) const
Paul's avatar
Paul committed
254
    {
255
        return mod->insert_instruction(ins, make_op("allocate", {{"shape", to_value(s)}}));
Paul's avatar
Paul committed
256
257
    }

Shucai Xiao's avatar
Shucai Xiao committed
258
    void add_convolution_op()
Paul's avatar
Paul committed
259
    {
260
261
        apply_map.emplace("convolution", [=](instruction_ref ins) {
            auto&& op = any_cast<op::convolution>(ins->get_operator());
Paul's avatar
Paul committed
262

263
            auto conv = miopen_convolution{op, make_conv(op)};
264
            auto ws   = conv.find(get_context(), ins->get_shape(), to_shapes(ins->inputs()));
Paul's avatar
Paul committed
265

266
            auto workspace = insert_allocation(ins, ws);
267
            auto output    = insert_allocation(ins, ins->get_shape());
kahmed10's avatar
kahmed10 committed
268

Shucai Xiao's avatar
Shucai Xiao committed
269
            return mod->replace_instruction(
kahmed10's avatar
kahmed10 committed
270
271
272
273
274
275
276
277
278
279
280
281
                ins, conv, ins->inputs().at(0), ins->inputs().at(1), workspace, output);
        });
    }

    void add_deconvolution_op()
    {
        apply_map.emplace("deconvolution", [=](instruction_ref ins) {
            auto&& op = any_cast<op::deconvolution>(ins->get_operator());

            auto conv = miopen_deconvolution{op, make_deconv(op)};
            auto ws   = conv.compile(get_context(), ins->get_shape(), to_shapes(ins->inputs()));

282
            auto workspace = insert_allocation(ins, ws);
kahmed10's avatar
kahmed10 committed
283
            auto output    = insert_allocation(ins, ins->get_shape());
Paul's avatar
Paul committed
284

Shucai Xiao's avatar
Shucai Xiao committed
285
            return mod->replace_instruction(
286
287
                ins, conv, ins->inputs().at(0), ins->inputs().at(1), workspace, output);
        });
Paul's avatar
Paul committed
288
289
    }

290
291
    template <typename Op>
    void add_gemm_op(const std::string& name)
292
293
    {
        apply_map.emplace(name, [=](instruction_ref ins) {
294
            std::vector<instruction_ref> refs = ins->inputs();
295
296
297
            assert(refs.size() == 2);
            auto output = insert_allocation(ins, ins->get_shape());
            refs.push_back(output);
Shucai Xiao's avatar
Shucai Xiao committed
298
            return mod->replace_instruction(
299
                ins, rocblas_gemm<Op>{Op{}, 1, 0, int8_x4_format, compute_fp32}, refs);
300
301
302
        });
    }

303
304
305
306
    void add_quant_convolution_op()
    {
        apply_map.emplace("quant_convolution", [=](instruction_ref ins) {
            auto&& op = any_cast<op::quant_convolution>(ins->get_operator());
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
            shape ws;
            miopen_quant_convolution conv;
            auto compile_quant_conv_with_format = [&](bool format) {
                conv = miopen_quant_convolution{op, format, make_conv(op)};
                ws   = conv.compile(get_context(), ins->get_shape(), to_shapes(ins->inputs()));
            };

            try
            {
                compile_quant_conv_with_format(int8_x4_format);
            }
            catch(migraphx::exception&)
            {
                // In case no solver supports the default format, retry using the other format.
                compile_quant_conv_with_format(!int8_x4_format);
            }
323

Shucai Xiao's avatar
Shucai Xiao committed
324
            auto args      = ins->inputs();
325
            auto workspace = insert_allocation(ins, ws);
Shucai Xiao's avatar
Shucai Xiao committed
326
327
            auto output    = insert_allocation(ins, ins->get_shape());

Shucai Xiao's avatar
Shucai Xiao committed
328
            return mod->replace_instruction(ins, conv, args[0], args[1], workspace, output);
Shucai Xiao's avatar
Shucai Xiao committed
329
330
331
        });
    }

332
333
334
    // add_generic_op just constructs the operator with no fields whereas add_extend_op copies over
    // the fields Since it doesn't have fields its default constructed

335
336
337
    void add_generic_op(const std::string& name) { add_generic_op(name, "gpu::" + name); }

    void add_generic_op(const std::string& op_name, const std::string& gpu_name)
Paul's avatar
Paul committed
338
    {
339
        apply_map.emplace(op_name, [=](instruction_ref ins) {
340
341
342
            auto output                       = insert_allocation(ins, ins->get_shape());
            std::vector<instruction_ref> refs = ins->inputs();
            refs.push_back(output);
Paul's avatar
Paul committed
343

Shucai Xiao's avatar
Shucai Xiao committed
344
            return mod->replace_instruction(ins, make_op(gpu_name), refs);
345
        });
Paul's avatar
Paul committed
346
    }
Paul's avatar
Paul committed
347

348
349
350
    void add_extend_op(const std::string& name) { add_extend_op(name, "gpu::" + name); }

    void add_extend_op(const std::string& op_name, const std::string& gpu_name)
Khalique's avatar
Khalique committed
351
    {
352
353
        apply_map.emplace(op_name, [=](instruction_ref ins) {
            auto&& op                         = ins->get_operator();
354
355
356
            auto output                       = insert_allocation(ins, ins->get_shape());
            std::vector<instruction_ref> refs = ins->inputs();
            refs.push_back(output);
Paul's avatar
Paul committed
357

Shucai Xiao's avatar
Shucai Xiao committed
358
            return mod->replace_instruction(ins, make_op(gpu_name, op.to_value()), refs);
359
        });
Khalique's avatar
Khalique committed
360
361
    }

Shucai Xiao's avatar
Shucai Xiao committed
362
    void add_batch_norm_inference_op()
363
    {
364
365
366
367
        apply_map.emplace("batch_norm_inference", [=](instruction_ref ins) {
            auto&& op       = any_cast<op::batch_norm_inference>(ins->get_operator());
            auto output     = insert_allocation(ins, ins->get_shape());
            shape old_shape = ins->inputs().at(1)->get_shape();
Shucai Xiao's avatar
Shucai Xiao committed
368
369
370
371
372
373
374
375
376
377
378
379
380
381
            auto input      = ins->inputs()[0];
            auto input_lens = input->get_shape().lens();
            std::vector<int64_t> rsp_lens(input_lens.size(), 1);
            // for per_activation case, also need to reshape input
            if(op.bn_mode == op::batch_norm_inference::per_activation)
            {
                std::copy(input_lens.begin() + 1, input_lens.end(), rsp_lens.begin() + 1);
            }
            else
            {
                rsp_lens[1] = static_cast<int64_t>(old_shape.elements());
            }

            auto reshape_op = op::reshape{rsp_lens};
382
383
            std::vector<instruction_ref> reshapes;
            std::transform(ins->inputs().begin() + 1,
Shucai Xiao's avatar
Shucai Xiao committed
384
385
                           ins->inputs().end(),
                           std::back_inserter(reshapes),
Shucai Xiao's avatar
Shucai Xiao committed
386
                           [&](auto i) { return mod->insert_instruction(ins, reshape_op, i); });
Shucai Xiao's avatar
Shucai Xiao committed
387

Shucai Xiao's avatar
Shucai Xiao committed
388
389
390
391
392
393
394
395
            return mod->replace_instruction(ins,
                                            miopen_batch_norm_inference{op},
                                            input,
                                            reshapes[0],
                                            reshapes[1],
                                            reshapes[2],
                                            reshapes[3],
                                            output);
396
        });
397
    }
Shucai Xiao's avatar
Shucai Xiao committed
398
399
400
401
402
403
404

    // use 0 - input to represent neg
    void add_neg_op()
    {
        apply_map.emplace("neg", [=](instruction_ref ins) {
            auto s = ins->get_shape();
            std::vector<float> zeros(s.elements(), 0.0f);
Shucai Xiao's avatar
Shucai Xiao committed
405
            auto l0     = mod->add_literal(literal(s, zeros));
Shucai Xiao's avatar
Shucai Xiao committed
406
            auto output = insert_allocation(ins, s);
Shucai Xiao's avatar
Shucai Xiao committed
407
            return mod->replace_instruction(
408
                ins, make_op("gpu::sub"), l0, ins->inputs().front(), output);
Shucai Xiao's avatar
Shucai Xiao committed
409
410
        });
    }
Shucai Xiao's avatar
Shucai Xiao committed
411

Shucai Xiao's avatar
Shucai Xiao committed
412
    // add input and output argument for the if operator
Shucai Xiao's avatar
Shucai Xiao committed
413
414
415
416
    void add_if_op()
    {
        apply_map.emplace("if", [=](instruction_ref ins) {
            std::vector<instruction_ref> inputs = ins->inputs();
417
418
419
            auto cpu_cond =
                mod->insert_instruction(ins, make_op("hip::copy_from_gpu"), inputs.front());
            auto sync_cond = mod->insert_instruction(ins, make_op("hip::sync_stream"), cpu_cond);
Shucai Xiao's avatar
Shucai Xiao committed
420
421
            inputs.front() = sync_cond;

422
            return mod->replace_instruction(ins, ins->get_operator(), inputs, ins->module_inputs());
Shucai Xiao's avatar
Shucai Xiao committed
423
424
        });
    }
Shucai Xiao's avatar
Shucai Xiao committed
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440

    // replace the loop operator with gpu_loop operator
    void add_loop_op()
    {
        apply_map.emplace("loop", [=](instruction_ref ins) {
            std::vector<instruction_ref> inputs = ins->inputs();
            // copy max_iter from gpu to cpu
            auto cpu_max_iter =
                mod->insert_instruction(ins, make_op("hip::copy_from_gpu"), inputs.at(0));
            auto cpu_cond =
                mod->insert_instruction(ins, make_op("hip::copy_from_gpu"), inputs.at(1));
            auto synced_max_iter =
                mod->insert_instruction(ins, make_op("hip::sync_stream"), cpu_max_iter, cpu_cond);
            inputs.at(0)     = synced_max_iter;
            inputs.at(1)     = cpu_cond;
            auto copy_inputs = inputs;
441
442
443
444
            std::transform(copy_inputs.begin(),
                           copy_inputs.end(),
                           std::back_inserter(inputs),
                           [&](auto in) { return insert_allocation(ins, in->get_shape()); });
Shucai Xiao's avatar
Shucai Xiao committed
445
446
447
448
449

            auto mod_args = ins->module_inputs();
            auto output   = insert_allocation(ins, ins->get_shape());

            const auto* sub_mod = mod_args.front();
450
451
            auto cond_out       = insert_allocation(ins, sub_mod->get_output_shapes().front());

Shucai Xiao's avatar
Shucai Xiao committed
452
453
454
455
456
457
458
459
            // add cond and mod outputs to the argument list
            inputs.push_back(cond_out);
            inputs.push_back(output);

            return mod->replace_instruction(
                ins, make_op("gpu::loop", ins->get_operator().to_value()), inputs, mod_args);
        });
    }
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479

    void add_nms_op()
    {
        apply_map.emplace("nonmaxsuppression", [=](instruction_ref ins) {
            auto s      = ins->get_shape();
            auto output = insert_allocation(ins, s);
            std::vector<instruction_ref> cpu_inputs;
            auto inputs = ins->inputs();
            std::transform(
                inputs.begin(), inputs.end(), std::back_inserter(cpu_inputs), [&](auto in) {
                    return mod->insert_instruction(ins, make_op("hip::copy_from_gpu"), in);
                });
            cpu_inputs.front() =
                mod->insert_instruction(ins, make_op("hip::sync_stream"), cpu_inputs);
            auto cpu_out = mod->insert_instruction(ins, ins->get_operator(), cpu_inputs);
            auto gpu_out =
                mod->insert_instruction(ins, make_op("hip::copy_to_gpu"), cpu_out, output);
            return mod->replace_instruction(ins, gpu_out);
        });
    }
Paul's avatar
Paul committed
480
481
};

Shucai Xiao's avatar
Shucai Xiao committed
482
void lowering::apply(module& m) const { miopen_apply{&m, this}.apply(); }
Shucai Xiao's avatar
Shucai Xiao committed
483

Paul's avatar
Paul committed
484
} // namespace gpu
Paul's avatar
Paul committed
485
} // namespace MIGRAPHX_INLINE_NS
Paul's avatar
Paul committed
486
} // namespace migraphx