lowering.cpp 16.3 KB
Newer Older
Paul's avatar
Paul committed
1
2
3
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/instruction.hpp>
4
5
6
7
8
9
10
11
#include <migraphx/make_op.hpp>

#include <migraphx/op/abs.hpp>
#include <migraphx/op/batch_norm_inference.hpp>
#include <migraphx/op/convolution.hpp>
#include <migraphx/op/deconvolution.hpp>
#include <migraphx/op/dot.hpp>
#include <migraphx/op/elu.hpp>
Shucai Xiao's avatar
Shucai Xiao committed
12
#include <migraphx/op/if_op.hpp>
13
14
15
16
17
18
19
20
21
#include <migraphx/op/leaky_relu.hpp>
#include <migraphx/op/lrn.hpp>
#include <migraphx/op/pooling.hpp>
#include <migraphx/op/reshape.hpp>
#include <migraphx/op/quant_convolution.hpp>
#include <migraphx/op/quant_dot.hpp>

#include <migraphx/gpu/abs.hpp>
#include <migraphx/gpu/batch_norm_inference.hpp>
Paul's avatar
Paul committed
22
23
#include <migraphx/gpu/context.hpp>
#include <migraphx/gpu/convolution.hpp>
kahmed10's avatar
kahmed10 committed
24
#include <migraphx/gpu/deconvolution.hpp>
Khalique's avatar
Khalique committed
25
#include <migraphx/gpu/elu.hpp>
26
#include <migraphx/gpu/equal.hpp>
Paul's avatar
Paul committed
27
#include <migraphx/gpu/gemm.hpp>
28
#include <migraphx/gpu/greater.hpp>
29
#include <migraphx/gpu/int8_conv_pack.hpp>
30
#include <migraphx/gpu/leaky_relu.hpp>
31
#include <migraphx/gpu/less.hpp>
Shucai Xiao's avatar
Shucai Xiao committed
32
33
34
#include <migraphx/gpu/logical_and.hpp>
#include <migraphx/gpu/logical_or.hpp>
#include <migraphx/gpu/logical_xor.hpp>
35
36
37
38
#include <migraphx/gpu/lrn.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <migraphx/gpu/quant_convolution.hpp>
#include <migraphx/gpu/rocblas.hpp>
39
#include <migraphx/gpu/unary_not.hpp>
40
#include <migraphx/iterator_for.hpp>
41
#include <migraphx/program.hpp>
Paul's avatar
Paul committed
42
#include <utility>
43
#include <functional>
Khalique's avatar
Khalique committed
44
#include <algorithm>
Shucai Xiao's avatar
Shucai Xiao committed
45
#include <map>
Paul's avatar
Paul committed
46

Paul's avatar
Paul committed
47
namespace migraphx {
Paul's avatar
Paul committed
48
inline namespace MIGRAPHX_INLINE_NS {
Paul's avatar
Paul committed
49
namespace gpu {
Paul's avatar
Paul committed
50
51
52

struct miopen_apply
{
Shucai Xiao's avatar
Shucai Xiao committed
53
    module* mod          = nullptr;
54
    const lowering* pass = nullptr;
Shucai Xiao's avatar
Shucai Xiao committed
55
    std::unordered_map<std::string, std::function<instruction_ref(instruction_ref)>> apply_map{};
Shucai Xiao's avatar
Shucai Xiao committed
56
    instruction_ref last{};
57
    std::unordered_map<instruction_ref, std::string> prog_output_names{};
Shucai Xiao's avatar
Shucai Xiao committed
58
59
    bool offload_copy   = false;
    bool int8_x4_format = true;
Paul's avatar
Paul committed
60

61
    context& get_context() const
62
63
64
65
66
67
    {
        assert(pass != nullptr);
        assert(pass->ctx != nullptr);
        return *pass->ctx;
    }

Paul's avatar
Paul committed
68
69
70
71
72
73
74
    void check_shape(shape x, instruction_ref i)
    {
        assert(x == i->get_shape());
        (void)x;
        (void)i;
    }

75
76
    void create_output_names()
    {
Shucai Xiao's avatar
Shucai Xiao committed
77
        this->last = instruction::get_output_alias(std::prev(mod->end()));
78
79
        if(this->last->name() == "@return")
        {
80
            const auto& prog_outputs = last->inputs();
81
82
83
84
85
86
87
88
89
90
            std::vector<instruction_ref> outputs_alias(prog_outputs.size());

            std::transform(prog_outputs.begin(),
                           prog_outputs.end(),
                           outputs_alias.begin(),
                           [](const auto& i) { return instruction::get_output_alias(i); });

            std::size_t index = 0;
            for(auto ins : outputs_alias)
            {
Shucai Xiao's avatar
Shucai Xiao committed
91
                prog_output_names[ins] = mod->name() + ":#output_" + std::to_string(index++);
92
93
94
95
            }
        }
    }

96
97
    void init()
    {
Shucai Xiao's avatar
Shucai Xiao committed
98
        assert(mod != nullptr);
99
        assert(pass != nullptr);
100

Shucai Xiao's avatar
Shucai Xiao committed
101
102
103
104
105
106
107
#if ROCBLAS_VERSION_MAJOR >= 2 && ROCBLAS_VERSION_MINOR >= 38
        auto& ctx = get_context();
        rocblas_gemm_flags flag;
        rocblas_query_int8_layout_flag(ctx.get_stream().get_rocblas(), &flag);
        int8_x4_format = (flag == rocblas_gemm_flags_pack_int8x4);
#endif

Shucai Xiao's avatar
Shucai Xiao committed
108
        offload_copy = (mod->name() == "main") ? pass->offload_copy : false;
109
        create_output_names();
Paul's avatar
Paul committed
110

111
112
113
114
115
116
117
118
119
120
121
122
        add_generic_op("acos");
        add_generic_op("acosh");
        add_generic_op("add");
        add_generic_op("asin");
        add_generic_op("asinh");
        add_generic_op("atan");
        add_generic_op("atanh");
        add_generic_op("ceil");
        add_generic_op("contiguous");
        add_generic_op("cos");
        add_generic_op("cosh");
        add_generic_op("div");
123
        add_generic_op("equal");
124
125
126
        add_generic_op("erf");
        add_generic_op("exp");
        add_generic_op("floor");
127
128
        add_generic_op("greater");
        add_generic_op("less");
129
        add_generic_op("log");
Shucai Xiao's avatar
Shucai Xiao committed
130
131
132
        add_generic_op("logical_and");
        add_generic_op("logical_or");
        add_generic_op("logical_xor");
133
134
135
        add_generic_op("max");
        add_generic_op("min");
        add_generic_op("mul");
136
        add_generic_op("not");
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
        add_generic_op("pow");
        add_generic_op("prelu");
        add_generic_op("recip");
        add_generic_op("relu");
        add_generic_op("round");
        add_generic_op("rsqrt");
        add_generic_op("sigmoid");
        add_generic_op("sign");
        add_generic_op("sin");
        add_generic_op("sinh");
        add_generic_op("sqdiff");
        add_generic_op("sqrt");
        add_generic_op("sub");
        add_generic_op("tan");
        add_generic_op("tanh");

Shucai Xiao's avatar
Shucai Xiao committed
153
        add_extend_op("abs");
154
155
156
157
158
        add_extend_op("argmax");
        add_extend_op("argmin");
        add_extend_op("clip");
        add_extend_op("concat");
        add_extend_op("convert");
Shucai Xiao's avatar
Shucai Xiao committed
159
        add_extend_op("elu");
160
        add_extend_op("gather");
Shucai Xiao's avatar
Shucai Xiao committed
161
        add_extend_op("leaky_relu");
162
        add_extend_op("logsoftmax");
Shucai Xiao's avatar
Shucai Xiao committed
163
        add_extend_op("lrn");
164
        add_extend_op("pad");
165
        add_extend_op("pooling");
166
167
168
169
170
        add_extend_op("reduce_max");
        add_extend_op("reduce_mean");
        add_extend_op("reduce_min");
        add_extend_op("reduce_prod");
        add_extend_op("reduce_sum");
Cagri Eryilmaz's avatar
Cagri Eryilmaz committed
171
        add_extend_op("reverse");
172
173
174
        add_extend_op("rnn_var_sl_last_output");
        add_extend_op("rnn_var_sl_shift_output");
        add_extend_op("rnn_var_sl_shift_sequence");
175
        add_extend_op("scatter");
176
177
        add_extend_op("softmax");

178
179
        add_gemm_op<op::dot>("dot");
        add_gemm_op<op::quant_dot>("quant_dot");
180
        add_convolution_op();
kahmed10's avatar
kahmed10 committed
181
        add_deconvolution_op();
182
        add_quant_convolution_op();
183
        add_batch_norm_inference_op();
Shucai Xiao's avatar
Shucai Xiao committed
184
        add_neg_op();
Shucai Xiao's avatar
Shucai Xiao committed
185
        add_if_op();
186
187
    }

188
189
    void copy_params()
    {
Shucai Xiao's avatar
Shucai Xiao committed
190
        if(not offload_copy)
191
            return;
192

Shucai Xiao's avatar
Shucai Xiao committed
193
        for(auto ins : iterator_for(*mod))
194
195
196
        {
            if(ins->name() != "@param")
                continue;
197

198
199
            auto pos = std::next(ins);
            auto a   = insert_allocation(pos, ins->get_shape());
200
            auto c   = mod->insert_instruction(pos, make_op("hip::copy_to_gpu"), ins, a);
Shucai Xiao's avatar
Shucai Xiao committed
201
            mod->replace_instruction(ins, c);
202
        }
203
204

        // return instruction
Shucai Xiao's avatar
Shucai Xiao committed
205
        auto ret = std::prev(mod->end());
206
207
        if(ret->name() == "@return")
        {
208
            const auto& inputs = ret->inputs();
209
210
211

            // each input of ret need to be copied from gpu to host, and replace
            // output with copy output
212
            for(const auto& in : inputs)
213
            {
214
                auto p_output = mod->insert_instruction(ret, make_op("hip::copy_from_gpu"), in);
215
216
217
218
219
220
                instruction::replace_argument(ret, in, p_output);
            }
        }
        // else branch to handle legacy program without the return instruction
        else
        {
221
            mod->add_instruction(make_op("hip::copy_from_gpu"), ret);
222
        }
223
224
    }

Paul's avatar
Paul committed
225
226
    void apply()
    {
227
        init();
Shucai Xiao's avatar
Shucai Xiao committed
228
        for(auto it = mod->begin(); it != mod->end(); it++)
Paul's avatar
Paul committed
229
        {
Paul's avatar
Paul committed
230
            auto s = it->get_shape();
231
            if(apply_map.count(it->name()) > 0)
232
            {
233
                check_shape(s, apply_map.at(it->name())(it));
Paul's avatar
Paul committed
234
            }
Paul's avatar
Paul committed
235
        }
236

237
        copy_params();
Paul's avatar
Paul committed
238
239
    }

Paul's avatar
Paul committed
240
    instruction_ref insert_allocation(instruction_ref ins, const shape& s, std::string tag = "")
Paul's avatar
Paul committed
241
    {
242
        // Instruction's output is an input of the ret instruction
Shucai Xiao's avatar
Shucai Xiao committed
243
        if(offload_copy)
Paul's avatar
Paul committed
244
        {
245
246
            auto result = mod->insert_instruction(
                ins, make_op("hip::allocate", {{"shape", to_value(s)}, {"tag", std::move(tag)}}));
Paul's avatar
Paul committed
247
248
            return result;
        }
249
250
251
252

        auto ins_alias = instruction::get_output_alias(ins);
        if(last->name() == "@return" and tag.empty() and prog_output_names.count(ins_alias) > 0)
        {
Shucai Xiao's avatar
Shucai Xiao committed
253
            return mod->add_parameter(prog_output_names[ins_alias], s);
254
255
256
        }
        else if(ins == last and tag.empty())
        {
Shucai Xiao's avatar
Shucai Xiao committed
257
            return mod->add_parameter("output", s);
258
259
        }

260
261
        return mod->insert_instruction(
            ins, make_op("hip::allocate", {{"shape", to_value(s)}, {"tag", std::move(tag)}}));
Paul's avatar
Paul committed
262
263
    }

Shucai Xiao's avatar
Shucai Xiao committed
264
    void add_convolution_op()
Paul's avatar
Paul committed
265
    {
266
267
        apply_map.emplace("convolution", [=](instruction_ref ins) {
            auto&& op = any_cast<op::convolution>(ins->get_operator());
Paul's avatar
Paul committed
268

269
            auto conv = miopen_convolution{op, make_conv(op)};
270
            auto ws   = conv.find(get_context(), ins->get_shape(), to_shapes(ins->inputs()));
Paul's avatar
Paul committed
271

272
273
            auto workspace = insert_allocation(ins, ws, "workspace");
            auto output    = insert_allocation(ins, ins->get_shape());
kahmed10's avatar
kahmed10 committed
274

Shucai Xiao's avatar
Shucai Xiao committed
275
            return mod->replace_instruction(
kahmed10's avatar
kahmed10 committed
276
277
278
279
280
281
282
283
284
285
286
287
288
289
                ins, conv, ins->inputs().at(0), ins->inputs().at(1), workspace, output);
        });
    }

    void add_deconvolution_op()
    {
        apply_map.emplace("deconvolution", [=](instruction_ref ins) {
            auto&& op = any_cast<op::deconvolution>(ins->get_operator());

            auto conv = miopen_deconvolution{op, make_deconv(op)};
            auto ws   = conv.compile(get_context(), ins->get_shape(), to_shapes(ins->inputs()));

            auto workspace = insert_allocation(ins, ws, "workspace");
            auto output    = insert_allocation(ins, ins->get_shape());
Paul's avatar
Paul committed
290

Shucai Xiao's avatar
Shucai Xiao committed
291
            return mod->replace_instruction(
292
293
                ins, conv, ins->inputs().at(0), ins->inputs().at(1), workspace, output);
        });
Paul's avatar
Paul committed
294
295
    }

296
297
298
299
300
301
302
    template <class Op>
    void add_gemm_op(std::string name)
    {
        apply_map.emplace(name, [=](instruction_ref ins) {
            auto&& op                         = any_cast<Op>(ins->get_operator());
            auto beta                         = op.beta;
            std::vector<instruction_ref> refs = ins->inputs();
Shucai Xiao's avatar
Shucai Xiao committed
303
            if(refs.size() == 2)
304
305
            {
                auto output = insert_allocation(ins, ins->get_shape());
Shucai Xiao's avatar
Shucai Xiao committed
306
307
308
309
310
311
312
                beta        = 0;
                refs.push_back(output);
            }
            else
            {
                auto c_alias = instruction::get_output_alias(refs.back());
                if(ins == last or refs.back()->outputs().size() > 1 or c_alias->inputs().empty())
313
                {
314
315
316
317
                    auto output = insert_allocation(ins, ins->get_shape());
                    auto copy_out =
                        mod->insert_instruction(ins, make_op("hip::copy"), refs.back(), output);
                    refs.back() = copy_out;
318
319
                    refs.push_back(copy_out);
                }
Shucai Xiao's avatar
Shucai Xiao committed
320
321
322
323
                else
                {
                    refs.push_back(refs.back());
                }
324
325
            }

Shucai Xiao's avatar
Shucai Xiao committed
326
327
            return mod->replace_instruction(
                ins, rocblas_gemm<Op>{Op{op.alpha, beta}, int8_x4_format}, refs);
328
329
330
        });
    }

331
332
333
334
335
    void add_quant_convolution_op()
    {
        apply_map.emplace("quant_convolution", [=](instruction_ref ins) {
            auto&& op = any_cast<op::quant_convolution>(ins->get_operator());
            auto conv = miopen_quant_convolution{op, make_conv(op)};
336
            auto ws   = conv.compile(get_context(), ins->get_shape(), to_shapes(ins->inputs()));
337

Shucai Xiao's avatar
Shucai Xiao committed
338
            auto args      = ins->inputs();
339
            auto workspace = insert_allocation(ins, ws, "workspace");
Shucai Xiao's avatar
Shucai Xiao committed
340
341
            auto output    = insert_allocation(ins, ins->get_shape());

Shucai Xiao's avatar
Shucai Xiao committed
342
            return mod->replace_instruction(ins, conv, args[0], args[1], workspace, output);
Shucai Xiao's avatar
Shucai Xiao committed
343
344
345
        });
    }

346
347
348
    void add_generic_op(const std::string& name) { add_generic_op(name, "gpu::" + name); }

    void add_generic_op(const std::string& op_name, const std::string& gpu_name)
Paul's avatar
Paul committed
349
    {
350
        apply_map.emplace(op_name, [=](instruction_ref ins) {
351
352
353
            auto output                       = insert_allocation(ins, ins->get_shape());
            std::vector<instruction_ref> refs = ins->inputs();
            refs.push_back(output);
Paul's avatar
Paul committed
354

Shucai Xiao's avatar
Shucai Xiao committed
355
            return mod->replace_instruction(ins, make_op(gpu_name), refs);
356
        });
Paul's avatar
Paul committed
357
    }
Paul's avatar
Paul committed
358

359
360
361
    void add_extend_op(const std::string& name) { add_extend_op(name, "gpu::" + name); }

    void add_extend_op(const std::string& op_name, const std::string& gpu_name)
Khalique's avatar
Khalique committed
362
    {
363
364
        apply_map.emplace(op_name, [=](instruction_ref ins) {
            auto&& op                         = ins->get_operator();
365
366
367
            auto output                       = insert_allocation(ins, ins->get_shape());
            std::vector<instruction_ref> refs = ins->inputs();
            refs.push_back(output);
Paul's avatar
Paul committed
368

Shucai Xiao's avatar
Shucai Xiao committed
369
            return mod->replace_instruction(ins, make_op(gpu_name, op.to_value()), refs);
370
        });
Khalique's avatar
Khalique committed
371
372
    }

Shucai Xiao's avatar
Shucai Xiao committed
373
    void add_batch_norm_inference_op()
374
    {
375
376
377
378
        apply_map.emplace("batch_norm_inference", [=](instruction_ref ins) {
            auto&& op       = any_cast<op::batch_norm_inference>(ins->get_operator());
            auto output     = insert_allocation(ins, ins->get_shape());
            shape old_shape = ins->inputs().at(1)->get_shape();
Shucai Xiao's avatar
Shucai Xiao committed
379
380
381
382
383
384
385
386
387
388
389
390
391
392
            auto input      = ins->inputs()[0];
            auto input_lens = input->get_shape().lens();
            std::vector<int64_t> rsp_lens(input_lens.size(), 1);
            // for per_activation case, also need to reshape input
            if(op.bn_mode == op::batch_norm_inference::per_activation)
            {
                std::copy(input_lens.begin() + 1, input_lens.end(), rsp_lens.begin() + 1);
            }
            else
            {
                rsp_lens[1] = static_cast<int64_t>(old_shape.elements());
            }

            auto reshape_op = op::reshape{rsp_lens};
393
394
            std::vector<instruction_ref> reshapes;
            std::transform(ins->inputs().begin() + 1,
Shucai Xiao's avatar
Shucai Xiao committed
395
396
                           ins->inputs().end(),
                           std::back_inserter(reshapes),
Shucai Xiao's avatar
Shucai Xiao committed
397
                           [&](auto i) { return mod->insert_instruction(ins, reshape_op, i); });
Shucai Xiao's avatar
Shucai Xiao committed
398

Shucai Xiao's avatar
Shucai Xiao committed
399
400
401
402
403
404
405
406
            return mod->replace_instruction(ins,
                                            miopen_batch_norm_inference{op},
                                            input,
                                            reshapes[0],
                                            reshapes[1],
                                            reshapes[2],
                                            reshapes[3],
                                            output);
Shucai Xiao's avatar
Shucai Xiao committed
407

408
        });
409
    }
Shucai Xiao's avatar
Shucai Xiao committed
410
411
412
413
414
415
416

    // use 0 - input to represent neg
    void add_neg_op()
    {
        apply_map.emplace("neg", [=](instruction_ref ins) {
            auto s = ins->get_shape();
            std::vector<float> zeros(s.elements(), 0.0f);
Shucai Xiao's avatar
Shucai Xiao committed
417
            auto l0     = mod->add_literal(literal(s, zeros));
Shucai Xiao's avatar
Shucai Xiao committed
418
            auto output = insert_allocation(ins, s);
Shucai Xiao's avatar
Shucai Xiao committed
419
            return mod->replace_instruction(
420
                ins, make_op("gpu::sub"), l0, ins->inputs().front(), output);
Shucai Xiao's avatar
Shucai Xiao committed
421
422
        });
    }
Shucai Xiao's avatar
Shucai Xiao committed
423
424
425
426
427
428

    // replace the if operator with gpu_if operator
    void add_if_op()
    {
        apply_map.emplace("if", [=](instruction_ref ins) {
            std::vector<instruction_ref> inputs = ins->inputs();
429
430
431
            auto cpu_cond =
                mod->insert_instruction(ins, make_op("hip::copy_from_gpu"), inputs.front());
            auto sync_cond = mod->insert_instruction(ins, make_op("hip::sync_stream"), cpu_cond);
Shucai Xiao's avatar
Shucai Xiao committed
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
            inputs.front() = sync_cond;

            std::vector<module_ref> mod_args = ins->module_inputs();
            std::map<std::string, shape> name_shapes;
            for(const auto& smod : mod_args)
            {
                auto ps = smod->get_parameter_shapes();
                name_shapes.insert(ps.begin(), ps.end());
            }

            bool ins_output_allocated = false;
            for(auto& pn : name_shapes)
            {
                const auto& s = pn.second;
                instruction_ref output{};
                if(s == ins->get_shape() and not ins_output_allocated)
                {
                    output               = insert_allocation(ins, s);
                    ins_output_allocated = true;
                }
                else
                {
454
455
                    output = mod->insert_instruction(
                        ins, make_op("hip::allocate", {{"shape", to_value(s)}}));
Shucai Xiao's avatar
Shucai Xiao committed
456
457
458
459
460
461
462
                }
                inputs.push_back(output);
            }

            return mod->replace_instruction(ins, ins->get_operator(), inputs, mod_args);
        });
    }
Paul's avatar
Paul committed
463
464
};

Shucai Xiao's avatar
Shucai Xiao committed
465
void lowering::apply(module& m) const { miopen_apply{&m, this}.apply(); }
Paul's avatar
Paul committed
466
} // namespace gpu
Paul's avatar
Paul committed
467
} // namespace MIGRAPHX_INLINE_NS
Paul's avatar
Paul committed
468
} // namespace migraphx