"vscode:/vscode.git/clone" did not exist on "7a0b098596c706d4105c63bd5601db4821ff6ba2"
fuse_ops.cpp 24.2 KB
Newer Older
kahmed10's avatar
kahmed10 committed
1
2
#include <migraphx/pass_manager.hpp>
#include <migraphx/dead_code_elimination.hpp>
Paul's avatar
Paul committed
3
4
5
#include <migraphx/gpu/fuse_ops.hpp>
#include <migraphx/matcher.hpp>
#include <migraphx/gpu/miopen.hpp>
kahmed10's avatar
kahmed10 committed
6
#include <migraphx/gpu/clip.hpp>
Paul's avatar
Paul committed
7
#include <migraphx/gpu/convolution.hpp>
8
#include <migraphx/gpu/device_name.hpp>
9
#include <migraphx/gpu/oper.hpp>
kahmed10's avatar
kahmed10 committed
10
11
#include <migraphx/gpu/add.hpp>
#include <migraphx/gpu/mul.hpp>
12
#include <migraphx/gpu/gemm.hpp>
kahmed10's avatar
kahmed10 committed
13
#include <migraphx/gpu/device/layernorm.hpp>
kahmed10's avatar
kahmed10 committed
14
#include <migraphx/gpu/device/gelu.hpp>
Paul's avatar
Paul committed
15
#include <migraphx/gpu/device/mul_add.hpp>
16
17
18
19
20
#include <migraphx/gpu/device/add_clip.hpp>
#include <migraphx/gpu/device/add_relu.hpp>
#include <migraphx/gpu/device/add_sigmoid.hpp>
#include <migraphx/gpu/device/add_tanh.hpp>
#include <migraphx/gpu/device/mul_add_relu.hpp>
Paul's avatar
Paul committed
21
#include <migraphx/gpu/device/add.hpp>
22
23
24
#include <migraphx/match/layernorm.hpp>
#include <migraphx/match/gelu_erf.hpp>
#include <migraphx/match/gelu_tanh.hpp>
Paul's avatar
Paul committed
25
#include <migraphx/instruction.hpp>
26
#include <migraphx/register_op.hpp>
Paul's avatar
Paul committed
27
#include <migraphx/array.hpp>
kahmed10's avatar
kahmed10 committed
28
#include <migraphx/op/clip.hpp>
kahmed10's avatar
kahmed10 committed
29
#include <cmath>
30
#include <set>
Paul's avatar
Paul committed
31
32

namespace migraphx {
Paul's avatar
Paul committed
33
inline namespace MIGRAPHX_INLINE_NS {
Paul's avatar
Paul committed
34
35
namespace gpu {

36
37
MIGRAPHX_DECLARE_ENV_VAR(MIGRAPHX_DISABLE_MIOPEN_FUSION)

Paul's avatar
Paul committed
38
39
40
41
42
43
44
45
struct fusion
{
    using op_t = miopenFusionOpDescriptor_t;
    shared<fusion_plan_descriptor> fp;

    // Used as a temporary hack to keep descriptor references alive
    std::vector<std::shared_ptr<void>> storage;

Paul's avatar
Paul committed
46
    template <class T>
Paul's avatar
Paul committed
47
48
49
50
51
52
53
    auto keep_alive(T x)
    {
        auto result = share(std::move(x));
        storage.push_back(result);
        return result;
    }

54
55
    fusion() = default;

Paul's avatar
Paul committed
56
57
    fusion(const shape& input)
    {
58
        assert(input.standard());
Paul's avatar
Paul committed
59
        auto t = make_tensor(input);
Paul's avatar
Paul committed
60
        fp     = make_fusion_plan(t);
61
        assert(fp);
Paul's avatar
Paul committed
62
63
64
65
66
        keep_alive(std::move(t));
    }

    op_t operator[](std::size_t i) const
    {
67
        assert(fp);
Paul's avatar
Paul committed
68
69
70
        op_t result;
        auto status = miopenFusionPlanGetOp(fp.get(), i, &result);
        if(status != miopenStatusSuccess)
Paul's avatar
Paul committed
71
            MIGRAPHX_THROW("Failed retrieving operator at " + std::to_string(i));
Paul's avatar
Paul committed
72
73
74
        return result;
    }

75
76
77
78
79
    auto get() const
    {
        assert(fp);
        return fp.get();
    }
Paul's avatar
Paul committed
80
81
82

    op_t create_bias(const shape& bias)
    {
83
        assert(fp);
Paul's avatar
Paul committed
84
        op_t result;
Paul's avatar
Paul committed
85
86
        auto b      = shape{bias.type(), {1, bias.lens().at(1), 1, 1}};
        auto t      = keep_alive(make_tensor(b));
Paul's avatar
Paul committed
87
88
        auto status = miopenCreateOpBiasForward(fp.get(), &result, t.get());
        if(status != miopenStatusSuccess)
Paul's avatar
Paul committed
89
            MIGRAPHX_THROW("Creating operator failed");
Paul's avatar
Paul committed
90
91
92
93
94
        return result;
    }

    op_t create_relu()
    {
95
        assert(fp);
Paul's avatar
Paul committed
96
97
98
        op_t result;
        auto status = miopenCreateOpActivationForward(fp.get(), &result, miopenActivationRELU);
        if(status != miopenStatusSuccess)
Paul's avatar
Paul committed
99
            MIGRAPHX_THROW("Creating operator failed");
Paul's avatar
Paul committed
100
101
102
103
104
        return result;
    }

    op_t create_conv(const op::convolution& op, const shape& weights)
    {
105
        assert(fp);
Paul's avatar
Paul committed
106
        op_t result;
Paul's avatar
Paul committed
107
108
        auto cd     = keep_alive(make_conv(op));
        auto t      = keep_alive(make_tensor(weights));
Paul's avatar
Paul committed
109
110
        auto status = miopenCreateOpConvForward(fp.get(), &result, cd.get(), t.get());
        if(status != miopenStatusSuccess)
Paul's avatar
Paul committed
111
            MIGRAPHX_THROW("Creating operator failed");
Paul's avatar
Paul committed
112
113
        return result;
    }
Paul's avatar
Paul committed
114
115
116

    shape get_workspace(context&)
    {
117
        // assert(fp);
Paul's avatar
Paul committed
118
119
120
121
122
        // TODO: Use zero workspace for now
        std::size_t ws_size = 0;
        // int algo_count = 1;
        // miopenConvFwdAlgorithm_t algo;
        // miopenFusionPlanConvolutionGetAlgo(fp.get(), 1, &algo_count, &algo);
Paul's avatar
Paul committed
123
124
        // miopenFusionPlanGetWorkSpaceSize(ctx.get_stream().get_miopen(), fp.get(), &ws_size,
        // algo);
Paul's avatar
Paul committed
125
126
127
128
129
        return shape{shape::int8_type, {ws_size}};
    }

    void compile(context& ctx)
    {
130
        assert(fp);
Paul's avatar
Paul committed
131
        auto status = miopenCompileFusionPlan(ctx.get_stream().get_miopen(), fp.get());
Paul's avatar
Paul committed
132
        if(status != miopenStatusSuccess)
Paul's avatar
Paul committed
133
            MIGRAPHX_THROW("Compiling fusion plan failed");
Paul's avatar
Paul committed
134
135
    }

Paul's avatar
Paul committed
136
137
138
139
    argument execute(context& ctx,
                     const fused_operator_args& fargs,
                     const argument& x,
                     const argument& y) const
Paul's avatar
Paul committed
140
    {
141
        assert(fp);
Paul's avatar
Paul committed
142
143
        auto x_td   = make_tensor(x.get_shape());
        auto y_td   = make_tensor(y.get_shape());
Paul's avatar
Paul committed
144
        auto status = miopenExecuteFusionPlan(ctx.get_stream().get_miopen(),
Paul's avatar
Paul committed
145
146
147
148
149
150
                                              fp.get(),
                                              x_td.get(),
                                              x.implicit(),
                                              y_td.get(),
                                              y.implicit(),
                                              fargs.get());
Paul's avatar
Paul committed
151
        if(status != miopenStatusSuccess)
Paul's avatar
Paul committed
152
            MIGRAPHX_THROW("Failed to execute fusion plan");
Paul's avatar
Paul committed
153
154
        return y;
    }
Paul's avatar
Paul committed
155
156
};

157
158
159
160
161
162
const std::unordered_set<std::string>& get_supported_archs()
{
    static std::unordered_set<std::string> supported_archs{"gfx900", "gfx906", "gfx908", "gfx1030"};
    return supported_archs;
}

Paul's avatar
Paul committed
163
MIGRAPHX_PRED_MATCHER(bias_shape, instruction_ref ins)
Paul's avatar
Paul committed
164
165
{
    auto&& s = ins->get_shape();
Paul's avatar
Paul committed
166
167
    return s.broadcasted() and s.strides().size() == 4 and s.strides()[0] == 0 and
           s.strides()[1] != 0 and s.strides()[2] == 0 and s.strides()[3] == 0;
Paul's avatar
Paul committed
168
169
}

Paul's avatar
Paul committed
170
MIGRAPHX_PRED_MATCHER(fusable_conv, instruction_ref ins)
Paul's avatar
Paul committed
171
{
172
    const auto device_name = trim(split_string(get_device_name(), ':').front());
173
174
    if(not contains(get_supported_archs(), device_name))
        return false;
175
176
    if(enabled(MIGRAPHX_DISABLE_MIOPEN_FUSION{}))
        return false;
Paul's avatar
Paul committed
177
178
    if(ins->name() != "gpu::convolution")
        return false;
Paul's avatar
Paul committed
179
180
    if(ins->get_shape().type() != shape::float_type)
        return false;
Paul's avatar
Paul committed
181
182
183
    auto wei = ins->inputs().at(1)->get_shape();
    assert(wei.lens().size() == 4);
    auto conv = any_cast<miopen_convolution>(ins->get_operator());
Khalique's avatar
Khalique committed
184
    if(conv.op.group > 1)
Khalique's avatar
Khalique committed
185
        return false;
Paul's avatar
Paul committed
186
    if(wei.lens()[1] > 512 and conv.algo != miopenConvolutionFwdAlgoWinograd)
Paul's avatar
Paul committed
187
        return false;
188
189
190
191
192
193

    // Do not fuse non-symmetric input
    auto input_lens = ins->inputs().at(0)->get_shape().lens();
    if(input_lens[2] != input_lens[3] or wei.lens()[2] != wei.lens()[3])
        return false;

Paul's avatar
Paul committed
194
    auto op = conv.op;
195
196
    // Dont fuse winograd for non-3x3s since there is no fused windograd for those configs
    if(conv.algo == miopenConvolutionFwdAlgoWinograd and wei.lens()[2] != 3 and
197
       wei.lens()[3] != 3 and contains({{1, 1}}, op.stride))
198
        return false;
kahmed10's avatar
kahmed10 committed
199
    return contains({{0, 0, 0, 0}, {1, 1, 1, 1}, {2, 2, 2, 2}}, op.padding) and
200
           contains({{0, 0}, {1, 1}}, op.stride) and contains({{1, 1}}, op.dilation);
Paul's avatar
Paul committed
201
202
}

203
struct hip_triadd : ternary_device<hip_triadd, &device::add>
Paul's avatar
Paul committed
204
205
{
};
206
MIGRAPHX_REGISTER_OP(hip_triadd)
Paul's avatar
Paul committed
207

208
struct hip_triadd_clip : quinary_device<hip_triadd_clip, &device::add_clip>
kahmed10's avatar
kahmed10 committed
209
210
{
};
211
MIGRAPHX_REGISTER_OP(hip_triadd_clip)
kahmed10's avatar
kahmed10 committed
212

213
struct hip_add_clip : quaternary_device<hip_add_clip, &device::add_clip>
kahmed10's avatar
kahmed10 committed
214
215
{
};
216
MIGRAPHX_REGISTER_OP(hip_add_clip)
kahmed10's avatar
kahmed10 committed
217

218
struct hip_triadd_relu : ternary_device<hip_triadd_relu, &device::add_relu>
Paul's avatar
Paul committed
219
220
{
};
221
MIGRAPHX_REGISTER_OP(hip_triadd_relu)
Paul's avatar
Paul committed
222

223
224
225
struct hip_triadd_sigmoid : ternary_device<hip_triadd_sigmoid, &device::add_sigmoid>
{
};
226
MIGRAPHX_REGISTER_OP(hip_triadd_sigmoid)
227
228
229
230

struct hip_triadd_tanh : ternary_device<hip_triadd_tanh, &device::add_tanh>
{
};
231
MIGRAPHX_REGISTER_OP(hip_triadd_tanh)
232
233
234
235

struct hip_add_relu : binary_device<hip_add_relu, &device::add_relu>
{
};
236
MIGRAPHX_REGISTER_OP(hip_add_relu)
237
238
239
240

struct hip_add_sigmoid : binary_device<hip_add_relu, &device::add_sigmoid>
{
};
241
MIGRAPHX_REGISTER_OP(hip_add_sigmoid)
242
243

struct hip_add_tanh : binary_device<hip_add_tanh, &device::add_tanh>
Paul's avatar
Paul committed
244
245
{
};
246
MIGRAPHX_REGISTER_OP(hip_add_tanh)
Paul's avatar
Paul committed
247

kahmed10's avatar
kahmed10 committed
248
249
struct hip_layernorm : unary_device<hip_layernorm, &device::layernorm>
{
250
251
    // Empty finalize to skip dimension reduction
    void finalize(context&, const shape&, const std::vector<shape>&) {}
kahmed10's avatar
kahmed10 committed
252
};
253
MIGRAPHX_REGISTER_OP(hip_layernorm)
kahmed10's avatar
kahmed10 committed
254

Paul Fultz II's avatar
Paul Fultz II committed
255
256
257
258
259
260
261
struct hip_triadd_layernorm : ternary_device<hip_triadd_layernorm, &device::triadd_layernorm>
{
    // Empty finalize to skip dimension reduction
    void finalize(context&, const shape&, const std::vector<shape>&) {}
};
MIGRAPHX_REGISTER_OP(hip_triadd_layernorm)

kahmed10's avatar
kahmed10 committed
262
263
264
struct hip_gelu : unary_device<hip_gelu, &device::gelu>
{
};
265
MIGRAPHX_REGISTER_OP(hip_gelu)
kahmed10's avatar
kahmed10 committed
266
267
268
269

struct hip_add_gelu : binary_device<hip_add_gelu, &device::add_gelu>
{
};
270
MIGRAPHX_REGISTER_OP(hip_add_gelu)
kahmed10's avatar
kahmed10 committed
271
272
273
274

struct hip_gelu_new : unary_device<hip_gelu_new, &device::gelu_new>
{
};
275
MIGRAPHX_REGISTER_OP(hip_gelu_new)
kahmed10's avatar
kahmed10 committed
276
277
278
279

struct hip_add_gelu_new : binary_device<hip_add_gelu_new, &device::add_gelu_new>
{
};
280
MIGRAPHX_REGISTER_OP(hip_add_gelu_new)
kahmed10's avatar
kahmed10 committed
281

282
struct hip_mul_add : ternary_device<hip_mul_add, &device::mul_add>
Paul's avatar
Paul committed
283
284
{
};
285
MIGRAPHX_REGISTER_OP(hip_mul_add)
Paul's avatar
Paul committed
286

287
struct hip_mul_add_relu : ternary_device<hip_mul_add_relu, &device::mul_add_relu>
Paul's avatar
Paul committed
288
289
{
};
290
MIGRAPHX_REGISTER_OP(hip_mul_add_relu)
Paul's avatar
Paul committed
291

Paul's avatar
Paul committed
292
293
294
void move_broadcasted_back(std::vector<instruction_ref>& args)
{
    // Ensure the last arguments is the broadcasted one
Paul's avatar
Paul committed
295
    auto last = std::prev(args.end());
Paul's avatar
Paul committed
296
297
    auto it =
        std::find_if(args.begin(), last, [](auto arg) { return arg->get_shape().broadcasted(); });
Paul's avatar
Paul committed
298
299
    if(it != last)
        std::swap(*it, *std::prev(last));
Paul's avatar
Paul committed
300
301
302
303
304
}

void move_standard_front(std::vector<instruction_ref>& args)
{
    // Ensure the first arguments is the standard one
Paul's avatar
Paul committed
305
    auto last = std::prev(args.end());
Paul's avatar
Paul committed
306
307
    auto it =
        std::find_if(args.begin(), last, [](auto arg) { return arg->get_shape().standard(); });
Paul's avatar
Paul committed
308
    if(it != last)
Paul's avatar
Paul committed
309
310
311
        std::swap(*it, args.front());
}

312
313
auto gpu_name(const std::string& s) { return match::name("gpu::" + s); }

kahmed10's avatar
kahmed10 committed
314
315
struct find_layernorm
{
316
    auto matcher() const { return match::layernorm(&gpu_name); }
kahmed10's avatar
kahmed10 committed
317

318
    void apply(module& p, match::matcher_result r) const
kahmed10's avatar
kahmed10 committed
319
320
321
322
323
    {
        auto ins   = r.result;
        auto x_ins = r.instructions["x"];
        auto args  = ins->inputs();

324
325
326
327
328
329
330
331
332
        // We dont fuse for non-standard layouts
        if(not x_ins->get_shape().standard())
            return;

        auto relements = x_ins->get_shape().lens().back();

        if(relements > 1024 or (relements % 4 != 0 and relements > 256))
            return;

kahmed10's avatar
kahmed10 committed
333
334
335
336
        p.replace_instruction(ins, hip_layernorm{}, x_ins, args.back());
    }
};

Paul Fultz II's avatar
Paul Fultz II committed
337
338
339
340
341
342
343
344
struct find_triadd_layernorm
{
    auto matcher() const
    {
        return match::name("gpu::layernorm")(match::arg(0)(match::name("gpu::triadd")(
            match::used_once(), match::all_of[match::inputs()](match::standard_shape()))));
    }

Shucai Xiao's avatar
Shucai Xiao committed
345
    void apply(module& p, const match::matcher_result& r) const
Paul Fultz II's avatar
Paul Fultz II committed
346
347
348
349
350
351
352
    {
        auto ins    = r.result;
        auto triadd = ins->inputs().front();
        p.replace_instruction(ins, hip_triadd_layernorm{}, triadd->inputs());
    }
};

kahmed10's avatar
kahmed10 committed
353
354
struct find_gelu
{
355
    auto matcher() const { return match::gelu_erf(&gpu_name); }
kahmed10's avatar
kahmed10 committed
356

357
    void apply(module& p, match::matcher_result r) const
kahmed10's avatar
kahmed10 committed
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
    {
        auto ins   = r.result;
        auto x_ins = r.instructions["x"];
        auto args  = ins->inputs();

        p.replace_instruction(ins, hip_gelu{}, x_ins, args.back());
    }
};

struct find_add_gelu
{
    auto matcher() const
    {
        return match::name("gpu::gelu")(match::arg(0)(match::name("gpu::add").bind("add")));
    }

374
    void apply(module& p, match::matcher_result r) const
kahmed10's avatar
kahmed10 committed
375
376
377
378
379
380
381
382
383
384
385
386
387
388
    {
        auto add_ins = r.instructions["add"];
        auto ins     = r.result;
        auto args    = add_ins->inputs();
        move_standard_front(args);
        move_broadcasted_back(args);

        args.back() = ins->inputs().back();
        p.replace_instruction(ins, hip_add_gelu{}, args);
    }
};

struct find_gelu_new
{
kahmed10's avatar
kahmed10 committed
389
    bool fast_math = true;
kahmed10's avatar
kahmed10 committed
390

391
    auto matcher() const { return match::gelu_tanh(&gpu_name); }
kahmed10's avatar
kahmed10 committed
392

393
    void apply(module& p, match::matcher_result r) const
kahmed10's avatar
kahmed10 committed
394
395
396
397
398
    {
        auto ins   = r.result;
        auto x_ins = r.instructions["x"];
        auto args  = ins->inputs();

Paul Fultz II's avatar
Paul Fultz II committed
399
        if(fast_math)
kahmed10's avatar
kahmed10 committed
400
            p.replace_instruction(ins, hip_gelu{}, x_ins, args.back());
Paul Fultz II's avatar
Paul Fultz II committed
401
402
        else
            p.replace_instruction(ins, hip_gelu_new{}, x_ins, args.back());
kahmed10's avatar
kahmed10 committed
403
404
405
406
407
408
409
410
411
412
    }
};

struct find_add_gelu_new
{
    auto matcher() const
    {
        return match::name("gpu::gelu_new")(match::arg(0)(match::name("gpu::add").bind("add")));
    }

413
    void apply(module& p, match::matcher_result r) const
kahmed10's avatar
kahmed10 committed
414
415
416
417
418
419
420
421
422
423
424
425
    {
        auto add_ins = r.instructions["add"];
        auto ins     = r.result;
        auto args    = add_ins->inputs();
        move_standard_front(args);
        move_broadcasted_back(args);

        args.back() = ins->inputs().back();
        p.replace_instruction(ins, hip_add_gelu_new{}, args);
    }
};

kahmed10's avatar
kahmed10 committed
426
427
428
429
430
431
struct find_add_clip
{
    auto matcher() const
    {
        return match::name(std::unordered_set<std::string>{"gpu::clip", "gpu::clipped_relu"})(
            match::arg(0)(match::any_of(match::name("gpu::add"),
kahmed10's avatar
kahmed10 committed
432
                                        match::name("gpu::triadd"),
kahmed10's avatar
kahmed10 committed
433
434
435
436
                                        match::any_of[match::inputs()](match::standard_shape()))
                              .bind("add")));
    }

437
    void apply(module& p, match::matcher_result r) const
kahmed10's avatar
kahmed10 committed
438
    {
kahmed10's avatar
kahmed10 committed
439
440
441
442
443
444
445
446
447
448
        auto add_ins  = r.instructions["add"];
        auto ins      = r.result;
        auto ins_args = ins->inputs();
        auto add_args = add_ins->inputs();
        move_standard_front(add_args);
        move_broadcasted_back(add_args);

        // Use the allocation from the clip operator
        add_args.pop_back();
        add_args.insert(add_args.end(), std::next(ins_args.begin()), ins_args.end());
kahmed10's avatar
kahmed10 committed
449
        if(add_ins->name() == "gpu::add")
kahmed10's avatar
kahmed10 committed
450
            p.replace_instruction(ins, hip_add_clip{}, add_args);
kahmed10's avatar
kahmed10 committed
451
        else if(add_ins->name() == "gpu::triadd")
kahmed10's avatar
kahmed10 committed
452
            p.replace_instruction(ins, hip_triadd_clip{}, add_args);
kahmed10's avatar
kahmed10 committed
453
454
455
    }
};

456
struct find_add_unary
Paul's avatar
Paul committed
457
{
458
459
460
    std::string op_name;
    operation binary_add_op;
    operation ternary_add_op;
Paul's avatar
Paul committed
461
462
    auto matcher() const
    {
463
        return match::name(op_name)(match::arg(0)(
Paul's avatar
Paul committed
464
            match::used_once(),
Paul's avatar
Paul committed
465
            match::any_of(match::name("gpu::add"),
kahmed10's avatar
kahmed10 committed
466
                          match::name("gpu::triadd"),
Paul's avatar
Paul committed
467
468
469
                          match::any_of(match::name("@literal"),
                                        match::any_of[match::inputs()](match::standard_shape())))
                .bind("add")));
Paul's avatar
Paul committed
470
    }
Paul's avatar
Paul committed
471

472
    void apply(module& p, match::matcher_result r) const
Paul's avatar
Paul committed
473
    {
Paul's avatar
Paul committed
474
        auto add_ins = r.instructions["add"];
Paul's avatar
Paul committed
475
476
        auto ins     = r.result;
        auto args    = add_ins->inputs();
Paul's avatar
Paul committed
477
478
479
        move_standard_front(args);
        move_broadcasted_back(args);

Paul's avatar
Paul committed
480
        // Use the allocation from the relu operator
Paul's avatar
Paul committed
481
        args.back() = ins->inputs().back();
Paul's avatar
Paul committed
482
        if(add_ins->name() == "gpu::add")
483
            p.replace_instruction(ins, binary_add_op, args);
kahmed10's avatar
kahmed10 committed
484
        else if(add_ins->name() == "gpu::triadd")
485
            p.replace_instruction(ins, ternary_add_op, args);
Paul's avatar
Paul committed
486
487
488
    }
};

Paul's avatar
Paul committed
489
struct find_triadd
Paul's avatar
Paul committed
490
491
492
{
    auto matcher() const
    {
Paul's avatar
Paul committed
493
        return match::name("gpu::add")(match::either_arg(0, 1)(
Paul's avatar
Paul committed
494
            match::name("gpu::add")(match::used_once()).bind("add"),
Paul's avatar
Paul committed
495
496
497
            match::any(match::any_of(match::name("@literal"),
                                     match::any_of[match::inputs()](match::standard_shape())))
                .bind("input")));
Paul's avatar
Paul committed
498
499
    }

500
    void apply(module& p, match::matcher_result r) const
Paul's avatar
Paul committed
501
    {
Paul's avatar
Paul committed
502
503
504
505
        auto add_ins   = r.instructions["add"];
        auto input_ins = r.instructions["input"];
        auto ins       = r.result;
        auto args      = add_ins->inputs();
506

Paul's avatar
Paul committed
507
        auto is_broadcasted = [](auto arg) { return arg->get_shape().broadcasted(); };
508
        if(std::count_if(args.begin(), args.end(), is_broadcasted) > 2)
Paul's avatar
Paul committed
509
510
            return;
        args.insert(args.begin(), input_ins);
Paul's avatar
Paul committed
511
512
513
        move_standard_front(args);
        move_broadcasted_back(args);

Paul's avatar
Paul committed
514
515
        args.back() = ins->inputs().back();
        p.replace_instruction(ins, hip_triadd{}, args);
Paul's avatar
Paul committed
516
    }
Paul's avatar
Paul committed
517
518
};

Paul's avatar
Paul committed
519
520
521
522
struct find_mul_add
{
    auto matcher() const
    {
Paul's avatar
Paul committed
523
524
        return match::name("gpu::add")(match::either_arg(0, 1)(
            match::name("gpu::mul")(match::used_once()).bind("mul"), match::any().bind("b")));
Paul's avatar
Paul committed
525
526
    }

527
    void apply(module& p, match::matcher_result r) const
Paul's avatar
Paul committed
528
    {
Paul's avatar
Paul committed
529
530
531
532
        auto mul_ins = r.instructions["mul"];
        auto b_ins   = r.instructions["b"];
        auto ins     = r.result;
        auto args    = mul_ins->inputs();
Paul's avatar
Paul committed
533
534
535
536
537
538
539
540
541
542
543
        assert(mul_ins != b_ins);

        move_standard_front(args);
        move_broadcasted_back(args);
        args.insert(std::prev(args.end()), b_ins);

        args.back() = ins->inputs().back();
        p.replace_instruction(ins, hip_mul_add{}, args);
    }
};

Paul's avatar
Paul committed
544
545
546
547
struct find_mul_add_relu
{
    auto matcher() const
    {
Paul's avatar
Paul committed
548
        return match::name("gpu::relu")(
kahmed10's avatar
kahmed10 committed
549
            match::arg(0)(match::name("gpu::mul_add")(match::used_once()).bind("mul_add")));
Paul's avatar
Paul committed
550
551
    }

552
    void apply(module& p, match::matcher_result r) const
Paul's avatar
Paul committed
553
554
    {
        auto mul_add_ins = r.instructions["mul_add"];
Paul's avatar
Paul committed
555
556
        auto ins         = r.result;
        auto args        = mul_add_ins->inputs();
Paul's avatar
Paul committed
557
558
559
560
561
562
563

        // Use the allocation from the relu operator
        args.back() = ins->inputs().back();
        p.replace_instruction(ins, hip_mul_add_relu{}, args);
    }
};

Paul's avatar
Paul committed
564
565
566
struct miopen_conv_bias
{
    op::convolution op;
567
568
569
    fusion f          = {};
    fusion::op_t conv = {};
    fusion::op_t bias = {};
Paul's avatar
Paul committed
570

Paul's avatar
Paul committed
571
572
573
574
575
576
    template <class Self, class F>
    static auto reflect(Self& self, F f)
    {
        return op::convolution::reflect(self.op, f);
    }

Paul's avatar
Paul committed
577
578
579
580
581
    std::string name() const { return "gpu::conv_bias"; }
    shape compute_shape(const std::vector<shape>& inputs) const
    {
        check_shapes{inputs, *this}.has(5);
        // TODO: Check slices
kahmed10's avatar
kahmed10 committed
582
        return op.normalize_compute_shape({inputs.at(0), inputs.at(1)});
Paul's avatar
Paul committed
583
    }
Paul's avatar
Paul committed
584
    argument compute(context& ctx, const shape&, const std::vector<argument>& args) const
Paul's avatar
Paul committed
585
    {
Paul's avatar
Paul committed
586
        auto fargs  = make_fused_args();
Paul's avatar
Paul committed
587
        float alpha = 1;
Paul's avatar
Paul committed
588
        float beta  = 0;
Paul's avatar
Paul committed
589
590
        miopenSetOpArgsConvForward(fargs.get(), conv, &alpha, &beta, args[1].implicit());
        miopenSetOpArgsBiasForward(fargs.get(), bias, &alpha, &beta, args[3].implicit());
Paul's avatar
Paul committed
591
        return f.execute(ctx, fargs, args[0], args[4]);
Paul's avatar
Paul committed
592
593
    }

594
595
596
597
598
599
600
601
    void finalize(context& ctx, const shape&, const std::vector<shape>& inputs)
    {
        f    = fusion(inputs[0]);
        conv = f.create_conv(op, inputs[1]);
        bias = f.create_bias(inputs[3]);
        f.compile(ctx);
    }

Paul's avatar
Paul committed
602
    shape get_workspace(context& ctx) { return f.get_workspace(ctx); }
Paul's avatar
Paul committed
603
604
605
606
    std::ptrdiff_t output_alias(const std::vector<shape>& shapes) const
    {
        return shapes.size() - 1;
    }
Paul's avatar
Paul committed
607
};
608
MIGRAPHX_REGISTER_OP(miopen_conv_bias)
Paul's avatar
Paul committed
609

Paul's avatar
Add cbr  
Paul committed
610
611
612
struct miopen_conv_bias_relu
{
    op::convolution op;
613
614
615
616
    fusion f          = {};
    fusion::op_t conv = {};
    fusion::op_t bias = {};
    fusion::op_t relu = {};
Paul's avatar
Add cbr  
Paul committed
617

Paul's avatar
Paul committed
618
619
620
621
622
623
    template <class Self, class F>
    static auto reflect(Self& self, F f)
    {
        return op::convolution::reflect(self.op, f);
    }

Paul's avatar
Add cbr  
Paul committed
624
625
626
627
628
    std::string name() const { return "gpu::conv_bias_relu"; }
    shape compute_shape(const std::vector<shape>& inputs) const
    {
        check_shapes{inputs, *this}.has(5);
        // TODO: Check slices
kahmed10's avatar
kahmed10 committed
629
        return op.normalize_compute_shape({inputs.at(0), inputs.at(1)});
Paul's avatar
Add cbr  
Paul committed
630
    }
Paul's avatar
Paul committed
631
    argument compute(context& ctx, const shape&, const std::vector<argument>& args) const
Paul's avatar
Add cbr  
Paul committed
632
633
    {
        auto fargs  = make_fused_args();
Paul's avatar
Paul committed
634
        float alpha = 1;
Paul's avatar
Paul committed
635
        float beta  = 0;
Paul's avatar
Add cbr  
Paul committed
636
637
        miopenSetOpArgsConvForward(fargs.get(), conv, &alpha, &beta, args[1].implicit());
        miopenSetOpArgsBiasForward(fargs.get(), bias, &alpha, &beta, args[3].implicit());
Paul's avatar
Paul committed
638
639
        miopenSetOpArgsActivForward(fargs.get(), relu, &alpha, &beta, 0, 0, 0);
        return f.execute(ctx, fargs, args[0], args[4]);
Paul's avatar
Add cbr  
Paul committed
640
    }
641
642
643
644
645
646
647
648
649
    void finalize(context& ctx, const shape&, const std::vector<shape>& inputs)
    {
        f    = fusion(inputs[0]);
        conv = f.create_conv(op, inputs[1]);
        bias = f.create_bias(inputs[3]);
        relu = f.create_relu();
        f.compile(ctx);
    }

Paul's avatar
Paul committed
650
    shape get_workspace(context& ctx) { return f.get_workspace(ctx); }
Paul's avatar
Paul committed
651
652
653
654
    std::ptrdiff_t output_alias(const std::vector<shape>& shapes) const
    {
        return shapes.size() - 1;
    }
Paul's avatar
Add cbr  
Paul committed
655
};
656
MIGRAPHX_REGISTER_OP(miopen_conv_bias_relu)
Paul's avatar
Add cbr  
Paul committed
657

Paul's avatar
Paul committed
658
template <class... Ms>
Paul's avatar
Add cbr  
Paul committed
659
660
auto conv_bias(Ms... ms)
{
Paul's avatar
Paul committed
661
    return match::name("gpu::add")(
Paul's avatar
Paul committed
662
663
        match::either_arg(0, 1)(bias_shape(match::used_once()).bind("bias"),
                                fusable_conv(match::used_once()).bind("conv")),
Paul's avatar
Paul committed
664
        ms...);
Paul's avatar
Paul committed
665
666
}

Paul's avatar
Paul committed
667
template <class Op>
668
void apply_conv_bias(context& ctx, module& p, match::matcher_result r)
Paul's avatar
Paul committed
669
670
671
672
673
674
675
676
677
678
{
    auto conv_ins    = r.instructions["conv"];
    auto bias_ins    = r.instructions["bias"];
    auto ins         = r.result;
    auto input_ins   = conv_ins->inputs().at(0);
    auto weights_ins = conv_ins->inputs().at(1);
    auto conv_op     = any_cast<miopen_convolution>(conv_ins->get_operator()).op;
    auto alloc_ins   = ins->inputs().back();
    auto old_ws_ins  = conv_ins->inputs().at(2);

679
    Op cb{conv_op};
Paul's avatar
Paul committed
680
    // TODO: Insert ws allocation
Paul's avatar
Paul committed
681
    auto ws = cb.get_workspace(ctx);
Paul's avatar
Paul committed
682
    (void)ws;
Paul's avatar
Paul committed
683
    p.replace_instruction(ins, cb, input_ins, weights_ins, old_ws_ins, bias_ins, alloc_ins);
Paul's avatar
Add cbr  
Paul committed
684
685
}

Paul's avatar
Paul committed
686
struct find_conv_bias
Paul's avatar
Paul committed
687
{
Paul's avatar
Paul committed
688
    context* ctx = nullptr;
Paul's avatar
Paul committed
689
690
    auto matcher() const
    {
kahmed10's avatar
kahmed10 committed
691
692
        return conv_bias(match::none_of(
            match::output(match::name(std::unordered_set<std::string>{"gpu::relu"}))));
Paul's avatar
Paul committed
693
694
    }

695
    void apply(module& p, match::matcher_result r) const
Paul's avatar
Paul committed
696
    {
Paul's avatar
Paul committed
697
        apply_conv_bias<miopen_conv_bias>(*ctx, p, std::move(r));
Paul's avatar
Paul committed
698
699
700
    }
};

Paul's avatar
Paul committed
701
struct find_conv_bias_relu
Paul's avatar
Add cbr  
Paul committed
702
703
{
    context* ctx = nullptr;
Paul's avatar
Paul committed
704
    auto matcher() const { return match::name("gpu::relu")(match::arg(0)(conv_bias())); }
Paul's avatar
Add cbr  
Paul committed
705

706
    void apply(module& p, match::matcher_result r) const
Paul's avatar
Add cbr  
Paul committed
707
    {
Paul's avatar
Paul committed
708
        apply_conv_bias<miopen_conv_bias_relu>(*ctx, p, std::move(r));
Paul's avatar
Add cbr  
Paul committed
709
710
    }
};
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730

struct find_gemm_add
{
    auto matcher() const
    {
        return match::name("gpu::add")(
            match::all_of[match::inputs()](match::standard_shape()),
            match::either_arg(0, 1)(match::used_once().bind("c"),
                                    match::name("gpu::gemm")(match::nargs(3)).bind("gemm")));
    }

    void apply(module& p, match::matcher_result r) const
    {
        auto ins      = r.result;
        auto gemm_ins = r.instructions["gemm"];
        auto c_ins    = r.instructions["c"];

        auto gemm = any_cast<rocblas_gemm<op::dot>>(gemm_ins->get_operator());

        // Already fused gemm
731
        if(not float_equal(gemm.beta, 0))
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
            return;

        if(std::any_of(ins->inputs().begin(), ins->inputs().end(), [](auto i) {
               return not i->get_shape().standard();
           }))
            return;

        auto inputs = gemm_ins->inputs();
        inputs.pop_back();

        auto copy_ins = c_ins;

        // Insert copy
        if(ins == p.end() or c_ins->outputs().size() > 1 or c_ins->inputs().empty())
        {
            copy_ins = p.insert_instruction(ins, hip_copy{}, c_ins, ins->inputs().back());
        }
        inputs.push_back(copy_ins);
        inputs.push_back(copy_ins);

752
        gemm.beta = 1;
753
754
755
756
        p.replace_instruction(ins, gemm, inputs);
    }
};

757
758
759
760
761
762
763
struct find_commutative_broadcast
{
    auto matcher() const
    {
        return match::name("gpu::add", "gpu::mul")(match::arg(1)(match::broadcast_shape()));
    }

764
    void apply(module& p, const match::matcher_result& r) const
765
766
767
768
769
770
771
772
773
    {
        auto ins  = r.result;
        auto args = ins->inputs();
        move_broadcasted_back(args);

        p.replace_instruction(ins, ins->get_operator(), args);
    }
};

774
void fuse_ops::apply(module& p) const
Paul's avatar
Paul committed
775
{
kahmed10's avatar
kahmed10 committed
776
    match::find_matches(p, find_gelu{}, find_gelu_new{fast_math});
kahmed10's avatar
kahmed10 committed
777
    run_passes(p, {dead_code_elimination{}});
Paul's avatar
Paul committed
778
    match::find_matches(p, find_triadd{});
779
    match::find_matches(p,
kahmed10's avatar
kahmed10 committed
780
                        find_layernorm{},
781
782
783
784
785
786
787
788
789
790
                        find_conv_bias_relu{ctx},
                        find_conv_bias{ctx},
                        find_add_gelu{},
                        find_add_gelu_new{},
                        find_mul_add{},
                        find_mul_add_relu{},
                        find_add_unary{"gpu::relu", hip_add_relu{}, hip_triadd_relu{}},
                        find_add_unary{"gpu::sigmoid", hip_add_sigmoid{}, hip_triadd_sigmoid{}},
                        find_add_unary{"gpu::tanh", hip_add_tanh{}, hip_triadd_tanh{}},
                        find_add_clip{});
Paul Fultz II's avatar
Paul Fultz II committed
791
    run_passes(p, {dead_code_elimination{}});
792
    match::find_matches(p, find_triadd_layernorm{}, find_gemm_add{}, find_commutative_broadcast{});
Paul's avatar
Paul committed
793
}
Paul's avatar
Paul committed
794
795

} // namespace gpu
Paul's avatar
Paul committed
796
} // namespace MIGRAPHX_INLINE_NS
Paul's avatar
Paul committed
797
} // namespace migraphx