lowering.cpp 18.4 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
/*
 * The MIT License (MIT)
 *
 * Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 * THE SOFTWARE.
 */
Shucai Xiao's avatar
Shucai Xiao committed
24
#include <iterator>
Paul's avatar
Paul committed
25
26
27
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/instruction.hpp>
28
29
30
31
32
33
34
35
#include <migraphx/make_op.hpp>

#include <migraphx/op/abs.hpp>
#include <migraphx/op/batch_norm_inference.hpp>
#include <migraphx/op/convolution.hpp>
#include <migraphx/op/deconvolution.hpp>
#include <migraphx/op/dot.hpp>
#include <migraphx/op/elu.hpp>
Shucai Xiao's avatar
Shucai Xiao committed
36
#include <migraphx/op/if_op.hpp>
37
38
39
40
41
42
43
44
45
#include <migraphx/op/leaky_relu.hpp>
#include <migraphx/op/lrn.hpp>
#include <migraphx/op/pooling.hpp>
#include <migraphx/op/reshape.hpp>
#include <migraphx/op/quant_convolution.hpp>
#include <migraphx/op/quant_dot.hpp>

#include <migraphx/gpu/abs.hpp>
#include <migraphx/gpu/batch_norm_inference.hpp>
Paul's avatar
Paul committed
46
47
#include <migraphx/gpu/context.hpp>
#include <migraphx/gpu/convolution.hpp>
kahmed10's avatar
kahmed10 committed
48
#include <migraphx/gpu/deconvolution.hpp>
49
#include <migraphx/gpu/device_name.hpp>
Khalique's avatar
Khalique committed
50
#include <migraphx/gpu/elu.hpp>
51
#include <migraphx/gpu/equal.hpp>
Paul's avatar
Paul committed
52
#include <migraphx/gpu/gemm.hpp>
53
#include <migraphx/gpu/greater.hpp>
54
#include <migraphx/gpu/int8_conv_pack.hpp>
55
#include <migraphx/gpu/leaky_relu.hpp>
56
#include <migraphx/gpu/less.hpp>
Shucai Xiao's avatar
Shucai Xiao committed
57
58
59
#include <migraphx/gpu/logical_and.hpp>
#include <migraphx/gpu/logical_or.hpp>
#include <migraphx/gpu/logical_xor.hpp>
60
61
62
63
#include <migraphx/gpu/lrn.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <migraphx/gpu/quant_convolution.hpp>
#include <migraphx/gpu/rocblas.hpp>
64
#include <migraphx/gpu/unary_not.hpp>
turneram's avatar
turneram committed
65
#include <migraphx/gpu/where.hpp>
66
#include <migraphx/gpu/compiler.hpp>
67
#include <migraphx/iterator_for.hpp>
68
#include <migraphx/program.hpp>
Paul's avatar
Paul committed
69
#include <utility>
70
#include <functional>
Khalique's avatar
Khalique committed
71
#include <algorithm>
Shucai Xiao's avatar
Shucai Xiao committed
72
#include <map>
Paul's avatar
Paul committed
73

Paul's avatar
Paul committed
74
namespace migraphx {
Paul's avatar
Paul committed
75
inline namespace MIGRAPHX_INLINE_NS {
Paul's avatar
Paul committed
76
namespace gpu {
Paul's avatar
Paul committed
77
78
79

struct miopen_apply
{
Shucai Xiao's avatar
Shucai Xiao committed
80
    module* mod          = nullptr;
81
    const lowering* pass = nullptr;
Shucai Xiao's avatar
Shucai Xiao committed
82
    std::unordered_map<std::string, std::function<instruction_ref(instruction_ref)>> apply_map{};
Shucai Xiao's avatar
Shucai Xiao committed
83
    instruction_ref last{};
Shucai Xiao's avatar
Shucai Xiao committed
84
85
    bool offload_copy   = false;
    bool int8_x4_format = true;
86
    bool compute_fp32   = false;
Paul's avatar
Paul committed
87

88
    context& get_context() const
89
90
91
92
93
94
    {
        assert(pass != nullptr);
        assert(pass->ctx != nullptr);
        return *pass->ctx;
    }

Paul's avatar
Paul committed
95
96
97
98
99
100
101
    void check_shape(shape x, instruction_ref i)
    {
        assert(x == i->get_shape());
        (void)x;
        (void)i;
    }

102
103
104
105
106
107
    const std::unordered_set<std::string>& get_rocblas_fp32_archs()
    {
        static std::unordered_set<std::string> supported_archs{"gfx908", "gfx90a"};
        return supported_archs;
    }

108
109
    void init()
    {
Shucai Xiao's avatar
Shucai Xiao committed
110
        assert(mod != nullptr);
111
        assert(pass != nullptr);
112

Shucai Xiao's avatar
Shucai Xiao committed
113
#if ROCBLAS_VERSION_MAJOR >= 2 && ROCBLAS_VERSION_MINOR >= 38
114
115
116
117
        auto& ctx              = get_context();
        const auto device_name = trim(split_string(get_device_name(), ':').front());
        if(contains(get_rocblas_fp32_archs(), device_name))
            compute_fp32 = true;
Shucai Xiao's avatar
Shucai Xiao committed
118
119
120
121
122
        rocblas_gemm_flags flag;
        rocblas_query_int8_layout_flag(ctx.get_stream().get_rocblas(), &flag);
        int8_x4_format = (flag == rocblas_gemm_flags_pack_int8x4);
#endif

Shucai Xiao's avatar
Shucai Xiao committed
123
        offload_copy = (mod->name() == "main") ? pass->offload_copy : false;
Paul's avatar
Paul committed
124

125
126
127
128
129
130
131
132
133
134
135
136
        add_generic_op("acos");
        add_generic_op("acosh");
        add_generic_op("add");
        add_generic_op("asin");
        add_generic_op("asinh");
        add_generic_op("atan");
        add_generic_op("atanh");
        add_generic_op("ceil");
        add_generic_op("contiguous");
        add_generic_op("cos");
        add_generic_op("cosh");
        add_generic_op("div");
137
        add_generic_op("equal");
138
139
140
        add_generic_op("erf");
        add_generic_op("exp");
        add_generic_op("floor");
141
142
        add_generic_op("greater");
        add_generic_op("less");
143
        add_generic_op("log");
Shucai Xiao's avatar
Shucai Xiao committed
144
145
146
        add_generic_op("logical_and");
        add_generic_op("logical_or");
        add_generic_op("logical_xor");
147
148
149
        add_generic_op("max");
        add_generic_op("min");
        add_generic_op("mul");
150
        add_generic_op("not");
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
        add_generic_op("pow");
        add_generic_op("prelu");
        add_generic_op("recip");
        add_generic_op("relu");
        add_generic_op("round");
        add_generic_op("rsqrt");
        add_generic_op("sigmoid");
        add_generic_op("sign");
        add_generic_op("sin");
        add_generic_op("sinh");
        add_generic_op("sqdiff");
        add_generic_op("sqrt");
        add_generic_op("sub");
        add_generic_op("tan");
        add_generic_op("tanh");
turneram's avatar
turneram committed
166
        add_generic_op("where");
167

Shucai Xiao's avatar
Shucai Xiao committed
168
        add_extend_op("abs");
169
170
171
172
        add_extend_op("argmax");
        add_extend_op("argmin");
        add_extend_op("clip");
        add_extend_op("convert");
Shucai Xiao's avatar
Shucai Xiao committed
173
        add_extend_op("elu");
174
        add_extend_op("gather");
Shucai Xiao's avatar
Shucai Xiao committed
175
        add_extend_op("leaky_relu");
176
        add_extend_op("logsoftmax");
Shucai Xiao's avatar
Shucai Xiao committed
177
        add_extend_op("lrn");
turneram's avatar
turneram committed
178
        add_extend_op("multinomial");
Shucai Xiao's avatar
Shucai Xiao committed
179
        add_extend_op("nonzero");
180
        add_extend_op("pad");
181
        add_extend_op("pooling");
182
        add_extend_op("prefix_scan_sum");
Cagri Eryilmaz's avatar
Cagri Eryilmaz committed
183
        add_extend_op("reverse");
184
185
186
        add_extend_op("rnn_var_sl_last_output");
        add_extend_op("rnn_var_sl_shift_output");
        add_extend_op("rnn_var_sl_shift_sequence");
187
        add_extend_op("scatter_none");
Shucai Xiao's avatar
Shucai Xiao committed
188
        add_extend_op("topk");
189

Shucai Xiao's avatar
Shucai Xiao committed
190
        add_batch_norm_inference_op();
191
        add_convolution_op();
kahmed10's avatar
kahmed10 committed
192
        add_deconvolution_op();
Shucai Xiao's avatar
Shucai Xiao committed
193
194
        add_gemm_op<op::dot>("dot");
        add_gemm_op<op::quant_dot>("quant_dot");
Shucai Xiao's avatar
Shucai Xiao committed
195
        add_if_op();
Shucai Xiao's avatar
Shucai Xiao committed
196
        add_loop_op();
Shucai Xiao's avatar
Shucai Xiao committed
197
        add_neg_op();
198
        add_nms_op();
Shucai Xiao's avatar
Shucai Xiao committed
199
        add_quant_convolution_op();
200
201
    }

202
    void copy_params() const
203
    {
Shucai Xiao's avatar
Shucai Xiao committed
204
        if(not offload_copy)
205
            return;
206

Shucai Xiao's avatar
Shucai Xiao committed
207
        for(auto ins : iterator_for(*mod))
208
209
210
        {
            if(ins->name() != "@param")
                continue;
211

Shucai Xiao's avatar
Shucai Xiao committed
212
213
214
215
            // parameter no outputs, no need to insert copy to gpu
            if(ins->outputs().empty())
                continue;

216
217
            auto pos = std::next(ins);
            auto a   = insert_allocation(pos, ins->get_shape());
218
            auto c   = mod->insert_instruction(pos, make_op("hip::copy_to_gpu"), ins, a);
Shucai Xiao's avatar
Shucai Xiao committed
219
            mod->replace_instruction(ins, c);
220
        }
221
222

        // return instruction
Shucai Xiao's avatar
Shucai Xiao committed
223
        auto ret = std::prev(mod->end());
224
225
        if(ret->name() == "@return")
        {
226
            const auto& inputs = ret->inputs();
227
228
229

            // each input of ret need to be copied from gpu to host, and replace
            // output with copy output
230
            for(const auto& in : inputs)
231
            {
232
                auto p_output = mod->insert_instruction(ret, make_op("hip::copy_from_gpu"), in);
233
234
235
236
237
238
                instruction::replace_argument(ret, in, p_output);
            }
        }
        // else branch to handle legacy program without the return instruction
        else
        {
239
            mod->add_instruction(make_op("hip::copy_from_gpu"), ret);
240
        }
241
242
    }

Paul's avatar
Paul committed
243
244
    void apply()
    {
245
        init();
Shucai Xiao's avatar
Shucai Xiao committed
246
        for(auto it = mod->begin(); it != mod->end(); it++)
Paul's avatar
Paul committed
247
        {
Paul's avatar
Paul committed
248
            auto s = it->get_shape();
249
            if(apply_map.count(it->name()) > 0)
250
            {
251
                check_shape(s, apply_map.at(it->name())(it));
Paul's avatar
Paul committed
252
            }
253
254
255
256
            else if(has_compiler_for(it->name()))
            {
                check_shape(s, insert_precompile_op(it));
            }
Paul's avatar
Paul committed
257
        }
258

259
        copy_params();
Paul's avatar
Paul committed
260
261
    }

262
    instruction_ref insert_precompile_op(instruction_ref ins) const
263
264
265
266
267
268
269
270
271
272
273
274
    {
        auto output                       = insert_allocation(ins, ins->get_shape());
        std::vector<instruction_ref> refs = ins->inputs();
        refs.push_back(output);

        return mod->replace_instruction(
            ins,
            make_op("gpu::precompile_op", {{"op", to_value(ins->get_operator())}}),
            refs,
            ins->module_inputs());
    }

275
    instruction_ref insert_allocation(instruction_ref ins, const shape& s) const
Paul's avatar
Paul committed
276
    {
277
        return mod->insert_instruction(ins, make_op("allocate", {{"shape", to_value(s)}}));
Paul's avatar
Paul committed
278
279
    }

Shucai Xiao's avatar
Shucai Xiao committed
280
    void add_convolution_op()
Paul's avatar
Paul committed
281
    {
282
283
        apply_map.emplace("convolution", [=](instruction_ref ins) {
            auto&& op = any_cast<op::convolution>(ins->get_operator());
Paul's avatar
Paul committed
284

285
            auto conv = miopen_convolution{op, make_conv(op)};
286
            auto ws   = conv.find(get_context(), ins->get_shape(), to_shapes(ins->inputs()));
Paul's avatar
Paul committed
287

288
            auto workspace = insert_allocation(ins, ws);
289
            auto output    = insert_allocation(ins, ins->get_shape());
kahmed10's avatar
kahmed10 committed
290

Shucai Xiao's avatar
Shucai Xiao committed
291
            return mod->replace_instruction(
kahmed10's avatar
kahmed10 committed
292
293
294
295
296
297
298
299
300
301
                ins, conv, ins->inputs().at(0), ins->inputs().at(1), workspace, output);
        });
    }

    void add_deconvolution_op()
    {
        apply_map.emplace("deconvolution", [=](instruction_ref ins) {
            auto&& op = any_cast<op::deconvolution>(ins->get_operator());

            auto conv = miopen_deconvolution{op, make_deconv(op)};
Paul Fultz II's avatar
Paul Fultz II committed
302
            auto ws   = conv.find(get_context(), ins->get_shape(), to_shapes(ins->inputs()));
kahmed10's avatar
kahmed10 committed
303

304
            auto workspace = insert_allocation(ins, ws);
kahmed10's avatar
kahmed10 committed
305
            auto output    = insert_allocation(ins, ins->get_shape());
Paul's avatar
Paul committed
306

Shucai Xiao's avatar
Shucai Xiao committed
307
            return mod->replace_instruction(
308
309
                ins, conv, ins->inputs().at(0), ins->inputs().at(1), workspace, output);
        });
Paul's avatar
Paul committed
310
311
    }

312
313
    template <typename Op>
    void add_gemm_op(const std::string& name)
314
315
    {
        apply_map.emplace(name, [=](instruction_ref ins) {
316
            std::vector<instruction_ref> refs = ins->inputs();
317
318
319
            assert(refs.size() == 2);
            auto output = insert_allocation(ins, ins->get_shape());
            refs.push_back(output);
Shucai Xiao's avatar
Shucai Xiao committed
320
            return mod->replace_instruction(
321
                ins, rocblas_gemm<Op>{Op{}, 1, 0, int8_x4_format, compute_fp32}, refs);
322
323
324
        });
    }

325
326
327
328
    void add_quant_convolution_op()
    {
        apply_map.emplace("quant_convolution", [=](instruction_ref ins) {
            auto&& op = any_cast<op::quant_convolution>(ins->get_operator());
329
330
331
332
            shape ws;
            miopen_quant_convolution conv;
            auto compile_quant_conv_with_format = [&](bool format) {
                conv = miopen_quant_convolution{op, format, make_conv(op)};
Paul Fultz II's avatar
Paul Fultz II committed
333
                ws   = conv.find(get_context(), ins->get_shape(), to_shapes(ins->inputs()));
334
335
336
337
338
339
340
341
342
343
344
            };

            try
            {
                compile_quant_conv_with_format(int8_x4_format);
            }
            catch(migraphx::exception&)
            {
                // In case no solver supports the default format, retry using the other format.
                compile_quant_conv_with_format(!int8_x4_format);
            }
345

Shucai Xiao's avatar
Shucai Xiao committed
346
            auto args      = ins->inputs();
347
            auto workspace = insert_allocation(ins, ws);
Shucai Xiao's avatar
Shucai Xiao committed
348
349
            auto output    = insert_allocation(ins, ins->get_shape());

Shucai Xiao's avatar
Shucai Xiao committed
350
            return mod->replace_instruction(ins, conv, args[0], args[1], workspace, output);
Shucai Xiao's avatar
Shucai Xiao committed
351
352
353
        });
    }

354
355
356
    // add_generic_op just constructs the operator with no fields whereas add_extend_op copies over
    // the fields Since it doesn't have fields its default constructed

357
358
359
    void add_generic_op(const std::string& name) { add_generic_op(name, "gpu::" + name); }

    void add_generic_op(const std::string& op_name, const std::string& gpu_name)
Paul's avatar
Paul committed
360
    {
361
        apply_map.emplace(op_name, [=](instruction_ref ins) {
362
363
364
            auto output                       = insert_allocation(ins, ins->get_shape());
            std::vector<instruction_ref> refs = ins->inputs();
            refs.push_back(output);
Paul's avatar
Paul committed
365

Shucai Xiao's avatar
Shucai Xiao committed
366
            return mod->replace_instruction(ins, make_op(gpu_name), refs);
367
        });
Paul's avatar
Paul committed
368
    }
Paul's avatar
Paul committed
369

370
371
372
    void add_extend_op(const std::string& name) { add_extend_op(name, "gpu::" + name); }

    void add_extend_op(const std::string& op_name, const std::string& gpu_name)
Khalique's avatar
Khalique committed
373
    {
374
375
        apply_map.emplace(op_name, [=](instruction_ref ins) {
            auto&& op                         = ins->get_operator();
376
377
378
            auto output                       = insert_allocation(ins, ins->get_shape());
            std::vector<instruction_ref> refs = ins->inputs();
            refs.push_back(output);
Paul's avatar
Paul committed
379

Shucai Xiao's avatar
Shucai Xiao committed
380
            return mod->replace_instruction(ins, make_op(gpu_name, op.to_value()), refs);
381
        });
Khalique's avatar
Khalique committed
382
383
    }

Shucai Xiao's avatar
Shucai Xiao committed
384
    void add_batch_norm_inference_op()
385
    {
386
387
388
389
        apply_map.emplace("batch_norm_inference", [=](instruction_ref ins) {
            auto&& op       = any_cast<op::batch_norm_inference>(ins->get_operator());
            auto output     = insert_allocation(ins, ins->get_shape());
            shape old_shape = ins->inputs().at(1)->get_shape();
Shucai Xiao's avatar
Shucai Xiao committed
390
391
392
393
394
395
396
397
398
399
400
401
402
403
            auto input      = ins->inputs()[0];
            auto input_lens = input->get_shape().lens();
            std::vector<int64_t> rsp_lens(input_lens.size(), 1);
            // for per_activation case, also need to reshape input
            if(op.bn_mode == op::batch_norm_inference::per_activation)
            {
                std::copy(input_lens.begin() + 1, input_lens.end(), rsp_lens.begin() + 1);
            }
            else
            {
                rsp_lens[1] = static_cast<int64_t>(old_shape.elements());
            }

            auto reshape_op = op::reshape{rsp_lens};
404
405
            std::vector<instruction_ref> reshapes;
            std::transform(ins->inputs().begin() + 1,
Shucai Xiao's avatar
Shucai Xiao committed
406
407
                           ins->inputs().end(),
                           std::back_inserter(reshapes),
Shucai Xiao's avatar
Shucai Xiao committed
408
                           [&](auto i) { return mod->insert_instruction(ins, reshape_op, i); });
Shucai Xiao's avatar
Shucai Xiao committed
409

Shucai Xiao's avatar
Shucai Xiao committed
410
411
412
413
414
415
416
417
            return mod->replace_instruction(ins,
                                            miopen_batch_norm_inference{op},
                                            input,
                                            reshapes[0],
                                            reshapes[1],
                                            reshapes[2],
                                            reshapes[3],
                                            output);
418
        });
419
    }
Shucai Xiao's avatar
Shucai Xiao committed
420
421
422
423
424
425
426

    // use 0 - input to represent neg
    void add_neg_op()
    {
        apply_map.emplace("neg", [=](instruction_ref ins) {
            auto s = ins->get_shape();
            std::vector<float> zeros(s.elements(), 0.0f);
Shucai Xiao's avatar
Shucai Xiao committed
427
            auto l0     = mod->add_literal(literal(s, zeros));
Shucai Xiao's avatar
Shucai Xiao committed
428
            auto output = insert_allocation(ins, s);
Shucai Xiao's avatar
Shucai Xiao committed
429
            return mod->replace_instruction(
430
                ins, make_op("gpu::sub"), l0, ins->inputs().front(), output);
Shucai Xiao's avatar
Shucai Xiao committed
431
432
        });
    }
Shucai Xiao's avatar
Shucai Xiao committed
433

Shucai Xiao's avatar
Shucai Xiao committed
434
    // add input and output argument for the if operator
Shucai Xiao's avatar
Shucai Xiao committed
435
436
437
438
    void add_if_op()
    {
        apply_map.emplace("if", [=](instruction_ref ins) {
            std::vector<instruction_ref> inputs = ins->inputs();
439
440
441
            auto cpu_cond =
                mod->insert_instruction(ins, make_op("hip::copy_from_gpu"), inputs.front());
            auto sync_cond = mod->insert_instruction(ins, make_op("hip::sync_stream"), cpu_cond);
Shucai Xiao's avatar
Shucai Xiao committed
442
443
            inputs.front() = sync_cond;

444
            return mod->replace_instruction(ins, ins->get_operator(), inputs, ins->module_inputs());
Shucai Xiao's avatar
Shucai Xiao committed
445
446
        });
    }
Shucai Xiao's avatar
Shucai Xiao committed
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462

    // replace the loop operator with gpu_loop operator
    void add_loop_op()
    {
        apply_map.emplace("loop", [=](instruction_ref ins) {
            std::vector<instruction_ref> inputs = ins->inputs();
            // copy max_iter from gpu to cpu
            auto cpu_max_iter =
                mod->insert_instruction(ins, make_op("hip::copy_from_gpu"), inputs.at(0));
            auto cpu_cond =
                mod->insert_instruction(ins, make_op("hip::copy_from_gpu"), inputs.at(1));
            auto synced_max_iter =
                mod->insert_instruction(ins, make_op("hip::sync_stream"), cpu_max_iter, cpu_cond);
            inputs.at(0)     = synced_max_iter;
            inputs.at(1)     = cpu_cond;
            auto copy_inputs = inputs;
463
464
465
466
            std::transform(copy_inputs.begin(),
                           copy_inputs.end(),
                           std::back_inserter(inputs),
                           [&](auto in) { return insert_allocation(ins, in->get_shape()); });
Shucai Xiao's avatar
Shucai Xiao committed
467
468
469
470
471

            auto mod_args = ins->module_inputs();
            auto output   = insert_allocation(ins, ins->get_shape());

            const auto* sub_mod = mod_args.front();
472
473
            auto cond_out       = insert_allocation(ins, sub_mod->get_output_shapes().front());

Shucai Xiao's avatar
Shucai Xiao committed
474
475
476
477
478
479
480
481
            // add cond and mod outputs to the argument list
            inputs.push_back(cond_out);
            inputs.push_back(output);

            return mod->replace_instruction(
                ins, make_op("gpu::loop", ins->get_operator().to_value()), inputs, mod_args);
        });
    }
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501

    void add_nms_op()
    {
        apply_map.emplace("nonmaxsuppression", [=](instruction_ref ins) {
            auto s      = ins->get_shape();
            auto output = insert_allocation(ins, s);
            std::vector<instruction_ref> cpu_inputs;
            auto inputs = ins->inputs();
            std::transform(
                inputs.begin(), inputs.end(), std::back_inserter(cpu_inputs), [&](auto in) {
                    return mod->insert_instruction(ins, make_op("hip::copy_from_gpu"), in);
                });
            cpu_inputs.front() =
                mod->insert_instruction(ins, make_op("hip::sync_stream"), cpu_inputs);
            auto cpu_out = mod->insert_instruction(ins, ins->get_operator(), cpu_inputs);
            auto gpu_out =
                mod->insert_instruction(ins, make_op("hip::copy_to_gpu"), cpu_out, output);
            return mod->replace_instruction(ins, gpu_out);
        });
    }
Paul's avatar
Paul committed
502
503
};

Shucai Xiao's avatar
Shucai Xiao committed
504
void lowering::apply(module& m) const { miopen_apply{&m, this}.apply(); }
Shucai Xiao's avatar
Shucai Xiao committed
505

Paul's avatar
Paul committed
506
} // namespace gpu
Paul's avatar
Paul committed
507
} // namespace MIGRAPHX_INLINE_NS
Paul's avatar
Paul committed
508
} // namespace migraphx