fuse_ops.cpp 22.7 KB
Newer Older
kahmed10's avatar
kahmed10 committed
1
2
#include <migraphx/pass_manager.hpp>
#include <migraphx/dead_code_elimination.hpp>
Paul's avatar
Paul committed
3
4
5
#include <migraphx/gpu/fuse_ops.hpp>
#include <migraphx/matcher.hpp>
#include <migraphx/gpu/miopen.hpp>
kahmed10's avatar
kahmed10 committed
6
#include <migraphx/gpu/clip.hpp>
Paul's avatar
Paul committed
7
#include <migraphx/gpu/convolution.hpp>
8
#include <migraphx/gpu/oper.hpp>
kahmed10's avatar
kahmed10 committed
9
10
11
#include <migraphx/gpu/add.hpp>
#include <migraphx/gpu/mul.hpp>
#include <migraphx/gpu/device/layernorm.hpp>
kahmed10's avatar
kahmed10 committed
12
#include <migraphx/gpu/device/gelu.hpp>
Paul's avatar
Paul committed
13
#include <migraphx/gpu/device/mul_add.hpp>
14
15
16
17
18
#include <migraphx/gpu/device/add_clip.hpp>
#include <migraphx/gpu/device/add_relu.hpp>
#include <migraphx/gpu/device/add_sigmoid.hpp>
#include <migraphx/gpu/device/add_tanh.hpp>
#include <migraphx/gpu/device/mul_add_relu.hpp>
Paul's avatar
Paul committed
19
#include <migraphx/gpu/device/add.hpp>
Paul's avatar
Paul committed
20
#include <migraphx/instruction.hpp>
Paul's avatar
Paul committed
21
#include <migraphx/array.hpp>
kahmed10's avatar
kahmed10 committed
22
#include <migraphx/op/clip.hpp>
kahmed10's avatar
kahmed10 committed
23
#include <cmath>
Paul's avatar
Paul committed
24
25

namespace migraphx {
Paul's avatar
Paul committed
26
inline namespace MIGRAPHX_INLINE_NS {
Paul's avatar
Paul committed
27
28
namespace gpu {

29
MIGRAPHX_DECLARE_ENV_VAR(MIGRAPHX_DISABLE_MIOPEN_FUSION)
kahmed10's avatar
kahmed10 committed
30
MIGRAPHX_DECLARE_ENV_VAR(MIGRAPHX_DISABLE_FAST_GELU)
31

Paul's avatar
Paul committed
32
33
34
35
36
37
38
39
struct fusion
{
    using op_t = miopenFusionOpDescriptor_t;
    shared<fusion_plan_descriptor> fp;

    // Used as a temporary hack to keep descriptor references alive
    std::vector<std::shared_ptr<void>> storage;

Paul's avatar
Paul committed
40
    template <class T>
Paul's avatar
Paul committed
41
42
43
44
45
46
47
48
49
50
    auto keep_alive(T x)
    {
        auto result = share(std::move(x));
        storage.push_back(result);
        return result;
    }

    fusion(const shape& input)
    // : fp(make_fusion_plan(input))
    {
51
        assert(input.standard());
Paul's avatar
Paul committed
52
        auto t = make_tensor(input);
Paul's avatar
Paul committed
53
        fp     = make_fusion_plan(t);
Paul's avatar
Paul committed
54
55
56
57
58
59
60
61
        keep_alive(std::move(t));
    }

    op_t operator[](std::size_t i) const
    {
        op_t result;
        auto status = miopenFusionPlanGetOp(fp.get(), i, &result);
        if(status != miopenStatusSuccess)
Paul's avatar
Paul committed
62
            MIGRAPHX_THROW("Failed retrieving operator at " + std::to_string(i));
Paul's avatar
Paul committed
63
64
65
        return result;
    }

Paul's avatar
Paul committed
66
    auto get() const { return fp.get(); }
Paul's avatar
Paul committed
67
68
69
70

    op_t create_bias(const shape& bias)
    {
        op_t result;
Paul's avatar
Paul committed
71
72
        auto b      = shape{bias.type(), {1, bias.lens().at(1), 1, 1}};
        auto t      = keep_alive(make_tensor(b));
Paul's avatar
Paul committed
73
74
        auto status = miopenCreateOpBiasForward(fp.get(), &result, t.get());
        if(status != miopenStatusSuccess)
Paul's avatar
Paul committed
75
            MIGRAPHX_THROW("Creating operator failed");
Paul's avatar
Paul committed
76
77
78
79
80
81
82
83
        return result;
    }

    op_t create_relu()
    {
        op_t result;
        auto status = miopenCreateOpActivationForward(fp.get(), &result, miopenActivationRELU);
        if(status != miopenStatusSuccess)
Paul's avatar
Paul committed
84
            MIGRAPHX_THROW("Creating operator failed");
Paul's avatar
Paul committed
85
86
87
88
89
90
        return result;
    }

    op_t create_conv(const op::convolution& op, const shape& weights)
    {
        op_t result;
Paul's avatar
Paul committed
91
92
        auto cd     = keep_alive(make_conv(op));
        auto t      = keep_alive(make_tensor(weights));
Paul's avatar
Paul committed
93
94
        auto status = miopenCreateOpConvForward(fp.get(), &result, cd.get(), t.get());
        if(status != miopenStatusSuccess)
Paul's avatar
Paul committed
95
            MIGRAPHX_THROW("Creating operator failed");
Paul's avatar
Paul committed
96
97
        return result;
    }
Paul's avatar
Paul committed
98
99
100
101
102
103
104
105

    shape get_workspace(context&)
    {
        // TODO: Use zero workspace for now
        std::size_t ws_size = 0;
        // int algo_count = 1;
        // miopenConvFwdAlgorithm_t algo;
        // miopenFusionPlanConvolutionGetAlgo(fp.get(), 1, &algo_count, &algo);
Paul's avatar
Paul committed
106
107
        // miopenFusionPlanGetWorkSpaceSize(ctx.get_stream().get_miopen(), fp.get(), &ws_size,
        // algo);
Paul's avatar
Paul committed
108
109
110
111
112
        return shape{shape::int8_type, {ws_size}};
    }

    void compile(context& ctx)
    {
Paul's avatar
Paul committed
113
        auto status = miopenCompileFusionPlan(ctx.get_stream().get_miopen(), fp.get());
Paul's avatar
Paul committed
114
        if(status != miopenStatusSuccess)
Paul's avatar
Paul committed
115
            MIGRAPHX_THROW("Compiling fusion plan failed");
Paul's avatar
Paul committed
116
117
    }

Paul's avatar
Paul committed
118
119
120
121
    argument execute(context& ctx,
                     const fused_operator_args& fargs,
                     const argument& x,
                     const argument& y) const
Paul's avatar
Paul committed
122
    {
Paul's avatar
Paul committed
123
124
        auto x_td   = make_tensor(x.get_shape());
        auto y_td   = make_tensor(y.get_shape());
Paul's avatar
Paul committed
125
        auto status = miopenExecuteFusionPlan(ctx.get_stream().get_miopen(),
Paul's avatar
Paul committed
126
127
128
129
130
131
                                              fp.get(),
                                              x_td.get(),
                                              x.implicit(),
                                              y_td.get(),
                                              y.implicit(),
                                              fargs.get());
Paul's avatar
Paul committed
132
        if(status != miopenStatusSuccess)
Paul's avatar
Paul committed
133
            MIGRAPHX_THROW("Failed to execute fusion plan");
Paul's avatar
Paul committed
134
135
        return y;
    }
Paul's avatar
Paul committed
136
137
};

Paul's avatar
Paul committed
138
MIGRAPHX_PRED_MATCHER(bias_shape, instruction_ref ins)
Paul's avatar
Paul committed
139
140
{
    auto&& s = ins->get_shape();
Paul's avatar
Paul committed
141
142
    return s.broadcasted() and s.strides().size() == 4 and s.strides()[0] == 0 and
           s.strides()[1] != 0 and s.strides()[2] == 0 and s.strides()[3] == 0;
Paul's avatar
Paul committed
143
144
}

Paul's avatar
Paul committed
145
MIGRAPHX_PRED_MATCHER(fusable_conv, instruction_ref ins)
Paul's avatar
Paul committed
146
{
147
148
    if(enabled(MIGRAPHX_DISABLE_MIOPEN_FUSION{}))
        return false;
Paul's avatar
Paul committed
149
150
    if(ins->name() != "gpu::convolution")
        return false;
Paul's avatar
Paul committed
151
152
    if(ins->get_shape().type() != shape::float_type)
        return false;
Paul's avatar
Paul committed
153
154
155
    auto wei = ins->inputs().at(1)->get_shape();
    assert(wei.lens().size() == 4);
    auto conv = any_cast<miopen_convolution>(ins->get_operator());
Khalique's avatar
Khalique committed
156
    if(conv.op.group > 1)
Khalique's avatar
Khalique committed
157
        return false;
Paul's avatar
Paul committed
158
    if(wei.lens()[1] > 512 and conv.algo != miopenConvolutionFwdAlgoWinograd)
Paul's avatar
Paul committed
159
        return false;
160
161
162
163
164
165

    // Do not fuse non-symmetric input
    auto input_lens = ins->inputs().at(0)->get_shape().lens();
    if(input_lens[2] != input_lens[3] or wei.lens()[2] != wei.lens()[3])
        return false;

Paul's avatar
Paul committed
166
    auto op = conv.op;
167
168
    // Dont fuse winograd for non-3x3s since there is no fused windograd for those configs
    if(conv.algo == miopenConvolutionFwdAlgoWinograd and wei.lens()[2] != 3 and
169
       wei.lens()[3] != 3 and contains({{1, 1}}, op.stride))
170
        return false;
Paul's avatar
Paul committed
171
    return contains({{0, 0}, {1, 1}, {2, 2}}, op.padding) and
172
           contains({{0, 0}, {1, 1}}, op.stride) and contains({{1, 1}}, op.dilation);
Paul's avatar
Paul committed
173
174
}

175
struct hip_triadd : ternary_device<hip_triadd, &device::add>
Paul's avatar
Paul committed
176
177
178
{
};

179
struct hip_triadd_clip : quinary_device<hip_triadd_clip, &device::add_clip>
kahmed10's avatar
kahmed10 committed
180
181
182
{
};

183
struct hip_add_clip : quaternary_device<hip_add_clip, &device::add_clip>
kahmed10's avatar
kahmed10 committed
184
185
186
{
};

187
struct hip_triadd_relu : ternary_device<hip_triadd_relu, &device::add_relu>
Paul's avatar
Paul committed
188
189
190
{
};

191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
struct hip_triadd_sigmoid : ternary_device<hip_triadd_sigmoid, &device::add_sigmoid>
{
};

struct hip_triadd_tanh : ternary_device<hip_triadd_tanh, &device::add_tanh>
{
};

struct hip_add_relu : binary_device<hip_add_relu, &device::add_relu>
{
};

struct hip_add_sigmoid : binary_device<hip_add_relu, &device::add_sigmoid>
{
};

struct hip_add_tanh : binary_device<hip_add_tanh, &device::add_tanh>
Paul's avatar
Paul committed
208
209
210
{
};

kahmed10's avatar
kahmed10 committed
211
212
213
214
struct hip_layernorm : unary_device<hip_layernorm, &device::layernorm>
{
};

kahmed10's avatar
kahmed10 committed
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
struct hip_gelu : unary_device<hip_gelu, &device::gelu>
{
};

struct hip_add_gelu : binary_device<hip_add_gelu, &device::add_gelu>
{
};

struct hip_gelu_new : unary_device<hip_gelu_new, &device::gelu_new>
{
};

struct hip_add_gelu_new : binary_device<hip_add_gelu_new, &device::add_gelu_new>
{
};

231
struct hip_mul_add : ternary_device<hip_mul_add, &device::mul_add>
Paul's avatar
Paul committed
232
233
234
{
};

235
struct hip_mul_add_relu : ternary_device<hip_mul_add_relu, &device::mul_add_relu>
Paul's avatar
Paul committed
236
237
238
{
};

Paul's avatar
Paul committed
239
240
241
void move_broadcasted_back(std::vector<instruction_ref>& args)
{
    // Ensure the last arguments is the broadcasted one
Paul's avatar
Paul committed
242
    auto last = std::prev(args.end());
Paul's avatar
Paul committed
243
244
    auto it =
        std::find_if(args.begin(), last, [](auto arg) { return arg->get_shape().broadcasted(); });
Paul's avatar
Paul committed
245
246
    if(it != last)
        std::swap(*it, *std::prev(last));
Paul's avatar
Paul committed
247
248
249
250
251
}

void move_standard_front(std::vector<instruction_ref>& args)
{
    // Ensure the first arguments is the standard one
Paul's avatar
Paul committed
252
    auto last = std::prev(args.end());
Paul's avatar
Paul committed
253
254
    auto it =
        std::find_if(args.begin(), last, [](auto arg) { return arg->get_shape().standard(); });
Paul's avatar
Paul committed
255
    if(it != last)
Paul's avatar
Paul committed
256
257
258
        std::swap(*it, args.front());
}

kahmed10's avatar
kahmed10 committed
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
struct find_layernorm
{
    template <class... Ts>
    static auto multibroadcast_op(Ts... xs)
    {
        return match::name("multibroadcast")(match::arg(0)(xs...));
    }

    static auto x_minus_mean()
    {
        return match::name("gpu::sub")(
            match::arg(0)(match::any().bind("x")),
            match::arg(1)(multibroadcast_op(match::name("gpu::reduce_mean"))));
    }

    static auto variance()
    {
        return match::name("gpu::reduce_mean")(match::arg(0)(
            match::name("gpu::pow")(match::arg(0)(x_minus_mean()),
                                    match::arg(1)(multibroadcast_op(match::has_value(2.0f))))));
    }

    static auto layernorm_onnx()
    {
        return match::name("gpu::div")(
            match::arg(0)(x_minus_mean()),

            match::arg(1)(multibroadcast_op(
                match::name("gpu::sqrt")(match::arg(0)(match::name("gpu::add")(match::either_arg(
                    0, 1)(variance(), multibroadcast_op(match::has_value(1e-12f)))))))));
    }

    auto matcher() const { return layernorm_onnx(); }

    void apply(program& p, match::matcher_result r) const
    {
        auto ins   = r.result;
        auto x_ins = r.instructions["x"];
        auto args  = ins->inputs();

        p.replace_instruction(ins, hip_layernorm{}, x_ins, args.back());
    }
};

kahmed10's avatar
kahmed10 committed
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
struct find_gelu
{

    static auto erf_fn()
    {
        return match::name("gpu::erf")(
            match::used_once(),
            match::arg(0)(match::used_once(),
                          match::name("gpu::mul")(match::either_arg(0, 1)(
                              match::none_of(match::has_value(M_SQRT1_2)).bind("x"),
                              match::has_value(M_SQRT1_2)))));
    }

    auto matcher() const
    {
        return match::name("gpu::mul")(match::either_arg(0, 1)(
            match::name("gpu::mul")(match::any_arg(0, 1)(match::args(match::has_value(0.5f)))),
            match::name("gpu::add")(
                match::used_once(),
                match::either_arg(0, 1)(erf_fn(), match::args(match::has_value(1.0f))))));
    }

    void apply(program& p, match::matcher_result r) const
    {
        auto ins   = r.result;
        auto x_ins = r.instructions["x"];
        auto args  = ins->inputs();

        p.replace_instruction(ins, hip_gelu{}, x_ins, args.back());
    }
};

struct find_add_gelu
{
    auto matcher() const
    {
        return match::name("gpu::gelu")(match::arg(0)(match::name("gpu::add").bind("add")));
    }

    void apply(program& p, match::matcher_result r) const
    {
        auto add_ins = r.instructions["add"];
        auto ins     = r.result;
        auto args    = add_ins->inputs();
        move_standard_front(args);
        move_broadcasted_back(args);

        args.back() = ins->inputs().back();
        p.replace_instruction(ins, hip_add_gelu{}, args);
    }
};

struct find_gelu_new
{

    static auto pow_fn()
    {
        return match::name("gpu::pow")(match::used_once(),
                                       match::arg(1)(match::args(match::has_value(3.0f))));
    }

    static auto tanh_fn()
    {
        return match::name("gpu::tanh")(
            match::used_once(),
            match::arg(0)(match::name("gpu::mul")(match::either_arg(0, 1)(
                match::args(match::has_value(sqrt(M_2_PI))),
                match::name("gpu::add")(
                    match::any_arg(0, 1)(match::name("gpu::mul")(match::either_arg(0, 1)(
                        match::args(match::has_value(0.044715f)), pow_fn()))))))));
    }

    auto matcher() const
    {
        return match::name("gpu::mul")(
            match::used_once(),
            match::either_arg(0, 1)(
                match::any().bind("x"),
                match::name("gpu::add")(match::any_arg(0, 1)(match::name("gpu::mul")(
                    match::either_arg(0, 1)(match::args(match::has_value(0.5f)), tanh_fn()))))));
    }

    void apply(program& p, match::matcher_result r) const
    {
        auto ins   = r.result;
        auto x_ins = r.instructions["x"];
        auto args  = ins->inputs();

        if(enabled(MIGRAPHX_DISABLE_FAST_GELU{}))
            p.replace_instruction(ins, hip_gelu_new{}, x_ins, args.back());
        else
            p.replace_instruction(ins, hip_gelu{}, x_ins, args.back());
    }
};

struct find_add_gelu_new
{
    auto matcher() const
    {
        return match::name("gpu::gelu_new")(match::arg(0)(match::name("gpu::add").bind("add")));
    }

    void apply(program& p, match::matcher_result r) const
    {
        auto add_ins = r.instructions["add"];
        auto ins     = r.result;
        auto args    = add_ins->inputs();
        move_standard_front(args);
        move_broadcasted_back(args);

        args.back() = ins->inputs().back();
        p.replace_instruction(ins, hip_add_gelu_new{}, args);
    }
};

kahmed10's avatar
kahmed10 committed
418
419
420
421
422
423
424
425
426
427
428
429
430
struct find_add_clip
{
    auto matcher() const
    {
        return match::name(std::unordered_set<std::string>{"gpu::clip", "gpu::clipped_relu"})(
            match::arg(0)(match::any_of(match::name("gpu::add"),
                                        match::name("hip::triadd"),
                                        match::any_of[match::inputs()](match::standard_shape()))
                              .bind("add")));
    }

    void apply(program& p, match::matcher_result r) const
    {
kahmed10's avatar
kahmed10 committed
431
432
433
434
435
436
437
438
439
440
        auto add_ins  = r.instructions["add"];
        auto ins      = r.result;
        auto ins_args = ins->inputs();
        auto add_args = add_ins->inputs();
        move_standard_front(add_args);
        move_broadcasted_back(add_args);

        // Use the allocation from the clip operator
        add_args.pop_back();
        add_args.insert(add_args.end(), std::next(ins_args.begin()), ins_args.end());
kahmed10's avatar
kahmed10 committed
441
        if(add_ins->name() == "gpu::add")
kahmed10's avatar
kahmed10 committed
442
            p.replace_instruction(ins, hip_add_clip{}, add_args);
kahmed10's avatar
kahmed10 committed
443
        else if(add_ins->name() == "hip::triadd")
kahmed10's avatar
kahmed10 committed
444
            p.replace_instruction(ins, hip_triadd_clip{}, add_args);
kahmed10's avatar
kahmed10 committed
445
446
447
    }
};

448
struct find_add_unary
Paul's avatar
Paul committed
449
{
450
451
452
    std::string op_name;
    operation binary_add_op;
    operation ternary_add_op;
Paul's avatar
Paul committed
453
454
    auto matcher() const
    {
455
        return match::name(op_name)(match::arg(0)(
Paul's avatar
Paul committed
456
            match::used_once(),
Paul's avatar
Paul committed
457
458
459
460
461
            match::any_of(match::name("gpu::add"),
                          match::name("hip::triadd"),
                          match::any_of(match::name("@literal"),
                                        match::any_of[match::inputs()](match::standard_shape())))
                .bind("add")));
Paul's avatar
Paul committed
462
    }
Paul's avatar
Paul committed
463

Paul's avatar
Paul committed
464
465
    void apply(program& p, match::matcher_result r) const
    {
Paul's avatar
Paul committed
466
        auto add_ins = r.instructions["add"];
Paul's avatar
Paul committed
467
468
        auto ins     = r.result;
        auto args    = add_ins->inputs();
Paul's avatar
Paul committed
469
470
471
        move_standard_front(args);
        move_broadcasted_back(args);

Paul's avatar
Paul committed
472
        // Use the allocation from the relu operator
Paul's avatar
Paul committed
473
        args.back() = ins->inputs().back();
Paul's avatar
Paul committed
474
        if(add_ins->name() == "gpu::add")
475
            p.replace_instruction(ins, binary_add_op, args);
Paul's avatar
Paul committed
476
        else if(add_ins->name() == "hip::triadd")
477
            p.replace_instruction(ins, ternary_add_op, args);
Paul's avatar
Paul committed
478
479
480
    }
};

Paul's avatar
Paul committed
481
struct find_triadd
Paul's avatar
Paul committed
482
483
484
{
    auto matcher() const
    {
Paul's avatar
Paul committed
485
        return match::name("gpu::add")(match::either_arg(0, 1)(
Paul's avatar
Paul committed
486
            match::name("gpu::add")(match::used_once()).bind("add"),
Paul's avatar
Paul committed
487
488
489
            match::any(match::any_of(match::name("@literal"),
                                     match::any_of[match::inputs()](match::standard_shape())))
                .bind("input")));
Paul's avatar
Paul committed
490
491
492
493
    }

    void apply(program& p, match::matcher_result r) const
    {
Paul's avatar
Paul committed
494
495
496
497
        auto add_ins   = r.instructions["add"];
        auto input_ins = r.instructions["input"];
        auto ins       = r.result;
        auto args      = add_ins->inputs();
498
499
        assert(add_ins != input_ins);

Paul's avatar
Paul committed
500
501
502
503
        auto is_broadcasted = [](auto arg) { return arg->get_shape().broadcasted(); };
        if(std::count_if(args.begin(), args.end(), is_broadcasted) > 1)
            return;
        args.insert(args.begin(), input_ins);
Paul's avatar
Paul committed
504
505
506
        move_standard_front(args);
        move_broadcasted_back(args);

Paul's avatar
Paul committed
507
508
        args.back() = ins->inputs().back();
        p.replace_instruction(ins, hip_triadd{}, args);
Paul's avatar
Paul committed
509
    }
Paul's avatar
Paul committed
510
511
};

Paul's avatar
Paul committed
512
513
514
515
struct find_mul_add
{
    auto matcher() const
    {
Paul's avatar
Paul committed
516
517
        return match::name("gpu::add")(match::either_arg(0, 1)(
            match::name("gpu::mul")(match::used_once()).bind("mul"), match::any().bind("b")));
Paul's avatar
Paul committed
518
519
520
521
    }

    void apply(program& p, match::matcher_result r) const
    {
Paul's avatar
Paul committed
522
523
524
525
        auto mul_ins = r.instructions["mul"];
        auto b_ins   = r.instructions["b"];
        auto ins     = r.result;
        auto args    = mul_ins->inputs();
Paul's avatar
Paul committed
526
527
528
529
530
531
532
533
534
535
536
        assert(mul_ins != b_ins);

        move_standard_front(args);
        move_broadcasted_back(args);
        args.insert(std::prev(args.end()), b_ins);

        args.back() = ins->inputs().back();
        p.replace_instruction(ins, hip_mul_add{}, args);
    }
};

Paul's avatar
Paul committed
537
538
539
540
struct find_mul_add_relu
{
    auto matcher() const
    {
Paul's avatar
Paul committed
541
542
        return match::name("gpu::relu")(
            match::arg(0)(match::name("hip::mul_add")(match::used_once()).bind("mul_add")));
Paul's avatar
Paul committed
543
544
545
546
547
    }

    void apply(program& p, match::matcher_result r) const
    {
        auto mul_add_ins = r.instructions["mul_add"];
Paul's avatar
Paul committed
548
549
        auto ins         = r.result;
        auto args        = mul_add_ins->inputs();
Paul's avatar
Paul committed
550
551
552
553
554
555
556

        // Use the allocation from the relu operator
        args.back() = ins->inputs().back();
        p.replace_instruction(ins, hip_mul_add_relu{}, args);
    }
};

Paul's avatar
Paul committed
557
558
559
560
561
562
563
struct miopen_conv_bias
{
    op::convolution op;
    fusion f;
    fusion::op_t conv;
    fusion::op_t bias;

Paul's avatar
Paul committed
564
565
566
567
568
569
    template <class Self, class F>
    static auto reflect(Self& self, F f)
    {
        return op::convolution::reflect(self.op, f);
    }

Paul's avatar
Paul committed
570
    miopen_conv_bias(op::convolution c, const shape& input, const shape& weights, const shape& b)
571
        : op(std::move(c)), f(input)
Paul's avatar
Paul committed
572
    {
Paul's avatar
Paul committed
573
574
        conv = f.create_conv(op, weights);
        bias = f.create_bias(b);
Paul's avatar
Paul committed
575
576
577
578
579
580
581
582
583
    }

    std::string name() const { return "gpu::conv_bias"; }
    shape compute_shape(const std::vector<shape>& inputs) const
    {
        check_shapes{inputs, *this}.has(5);
        // TODO: Check slices
        return op.compute_shape({inputs.at(0), inputs.at(1)});
    }
Paul's avatar
Paul committed
584
    argument compute(context& ctx, const shape&, const std::vector<argument>& args) const
Paul's avatar
Paul committed
585
    {
Paul's avatar
Paul committed
586
        auto fargs  = make_fused_args();
Paul's avatar
Paul committed
587
        float alpha = 1;
Paul's avatar
Paul committed
588
        float beta  = 0;
Paul's avatar
Paul committed
589
590
        miopenSetOpArgsConvForward(fargs.get(), conv, &alpha, &beta, args[1].implicit());
        miopenSetOpArgsBiasForward(fargs.get(), bias, &alpha, &beta, args[3].implicit());
Paul's avatar
Paul committed
591
        return f.execute(ctx, fargs, args[0], args[4]);
Paul's avatar
Paul committed
592
593
    }

Paul's avatar
Paul committed
594
595
    void finalize(context& ctx, const shape&, const std::vector<shape>&) { f.compile(ctx); }
    shape get_workspace(context& ctx) { return f.get_workspace(ctx); }
Paul's avatar
Paul committed
596
597
598
599
    std::ptrdiff_t output_alias(const std::vector<shape>& shapes) const
    {
        return shapes.size() - 1;
    }
Paul's avatar
Paul committed
600
601
};

Paul's avatar
Add cbr  
Paul committed
602
603
604
605
606
607
struct miopen_conv_bias_relu
{
    op::convolution op;
    fusion f;
    fusion::op_t conv;
    fusion::op_t bias;
Paul's avatar
Paul committed
608
    fusion::op_t relu;
Paul's avatar
Add cbr  
Paul committed
609

Paul's avatar
Paul committed
610
611
612
613
614
615
    template <class Self, class F>
    static auto reflect(Self& self, F f)
    {
        return op::convolution::reflect(self.op, f);
    }

Paul's avatar
Paul committed
616
617
618
619
    miopen_conv_bias_relu(op::convolution c,
                          const shape& input,
                          const shape& weights,
                          const shape& b)
620
        : op(std::move(c)), f(input)
Paul's avatar
Add cbr  
Paul committed
621
    {
Paul's avatar
Paul committed
622
623
624
        conv = f.create_conv(op, weights);
        bias = f.create_bias(b);
        relu = f.create_relu();
Paul's avatar
Add cbr  
Paul committed
625
626
627
628
629
630
631
632
633
    }

    std::string name() const { return "gpu::conv_bias_relu"; }
    shape compute_shape(const std::vector<shape>& inputs) const
    {
        check_shapes{inputs, *this}.has(5);
        // TODO: Check slices
        return op.compute_shape({inputs.at(0), inputs.at(1)});
    }
Paul's avatar
Paul committed
634
    argument compute(context& ctx, const shape&, const std::vector<argument>& args) const
Paul's avatar
Add cbr  
Paul committed
635
636
    {
        auto fargs  = make_fused_args();
Paul's avatar
Paul committed
637
        float alpha = 1;
Paul's avatar
Paul committed
638
        float beta  = 0;
Paul's avatar
Add cbr  
Paul committed
639
640
        miopenSetOpArgsConvForward(fargs.get(), conv, &alpha, &beta, args[1].implicit());
        miopenSetOpArgsBiasForward(fargs.get(), bias, &alpha, &beta, args[3].implicit());
Paul's avatar
Paul committed
641
642
        miopenSetOpArgsActivForward(fargs.get(), relu, &alpha, &beta, 0, 0, 0);
        return f.execute(ctx, fargs, args[0], args[4]);
Paul's avatar
Add cbr  
Paul committed
643
    }
Paul's avatar
Paul committed
644
645
    void finalize(context& ctx, const shape&, const std::vector<shape>&) { f.compile(ctx); }
    shape get_workspace(context& ctx) { return f.get_workspace(ctx); }
Paul's avatar
Paul committed
646
647
648
649
    std::ptrdiff_t output_alias(const std::vector<shape>& shapes) const
    {
        return shapes.size() - 1;
    }
Paul's avatar
Add cbr  
Paul committed
650
651
};

Paul's avatar
Paul committed
652
template <class... Ms>
Paul's avatar
Add cbr  
Paul committed
653
654
auto conv_bias(Ms... ms)
{
Paul's avatar
Paul committed
655
    return match::name("gpu::add")(
Paul's avatar
Paul committed
656
657
        match::either_arg(0, 1)(bias_shape(match::used_once()).bind("bias"),
                                fusable_conv(match::used_once()).bind("conv")),
Paul's avatar
Paul committed
658
        ms...);
Paul's avatar
Paul committed
659
660
}

Paul's avatar
Paul committed
661
template <class Op>
Paul's avatar
Paul committed
662
663
664
665
666
667
668
669
670
671
672
void apply_conv_bias(context& ctx, program& p, match::matcher_result r)
{
    auto conv_ins    = r.instructions["conv"];
    auto bias_ins    = r.instructions["bias"];
    auto ins         = r.result;
    auto input_ins   = conv_ins->inputs().at(0);
    auto weights_ins = conv_ins->inputs().at(1);
    auto conv_op     = any_cast<miopen_convolution>(conv_ins->get_operator()).op;
    auto alloc_ins   = ins->inputs().back();
    auto old_ws_ins  = conv_ins->inputs().at(2);

Paul's avatar
Paul committed
673
    Op cb{conv_op, input_ins->get_shape(), weights_ins->get_shape(), bias_ins->get_shape()};
Paul's avatar
Paul committed
674
    // TODO: Insert ws allocation
Paul's avatar
Paul committed
675
    auto ws = cb.get_workspace(ctx);
Paul's avatar
Paul committed
676
    (void)ws;
Paul's avatar
Paul committed
677
    p.replace_instruction(ins, cb, input_ins, weights_ins, old_ws_ins, bias_ins, alloc_ins);
Paul's avatar
Add cbr  
Paul committed
678
679
}

Paul's avatar
Paul committed
680
struct find_conv_bias
Paul's avatar
Paul committed
681
{
Paul's avatar
Paul committed
682
    context* ctx = nullptr;
Paul's avatar
Paul committed
683
684
    auto matcher() const
    {
kahmed10's avatar
kahmed10 committed
685
686
        return conv_bias(match::none_of(
            match::output(match::name(std::unordered_set<std::string>{"gpu::relu"}))));
Paul's avatar
Paul committed
687
688
689
690
    }

    void apply(program& p, match::matcher_result r) const
    {
Paul's avatar
Paul committed
691
        apply_conv_bias<miopen_conv_bias>(*ctx, p, std::move(r));
Paul's avatar
Paul committed
692
693
694
    }
};

Paul's avatar
Paul committed
695
struct find_conv_bias_relu
Paul's avatar
Add cbr  
Paul committed
696
697
{
    context* ctx = nullptr;
Paul's avatar
Paul committed
698
    auto matcher() const { return match::name("gpu::relu")(match::arg(0)(conv_bias())); }
Paul's avatar
Add cbr  
Paul committed
699
700
701

    void apply(program& p, match::matcher_result r) const
    {
Paul's avatar
Paul committed
702
        apply_conv_bias<miopen_conv_bias_relu>(*ctx, p, std::move(r));
Paul's avatar
Add cbr  
Paul committed
703
704
705
    }
};

Paul's avatar
Paul committed
706
707
void fuse_ops::apply(program& p) const
{
kahmed10's avatar
kahmed10 committed
708
709
    match::find_matches(p, find_gelu{}, find_gelu_new{});
    run_passes(p, {dead_code_elimination{}});
Paul's avatar
Paul committed
710
    match::find_matches(p, find_triadd{});
711
    match::find_matches(p,
kahmed10's avatar
kahmed10 committed
712
                        find_layernorm{},
713
714
715
716
717
718
719
720
721
722
                        find_conv_bias_relu{ctx},
                        find_conv_bias{ctx},
                        find_add_gelu{},
                        find_add_gelu_new{},
                        find_mul_add{},
                        find_mul_add_relu{},
                        find_add_unary{"gpu::relu", hip_add_relu{}, hip_triadd_relu{}},
                        find_add_unary{"gpu::sigmoid", hip_add_sigmoid{}, hip_triadd_sigmoid{}},
                        find_add_unary{"gpu::tanh", hip_add_tanh{}, hip_triadd_tanh{}},
                        find_add_clip{});
Paul's avatar
Paul committed
723
    // clang-format on
Paul's avatar
Paul committed
724
}
Paul's avatar
Paul committed
725
726

} // namespace gpu
Paul's avatar
Paul committed
727
} // namespace MIGRAPHX_INLINE_NS
Paul's avatar
Paul committed
728
} // namespace migraphx