fuse_ops.cpp 26.5 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
/*
 * The MIT License (MIT)
 *
 * Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 * THE SOFTWARE.
 */
kahmed10's avatar
kahmed10 committed
24
25
#include <migraphx/pass_manager.hpp>
#include <migraphx/dead_code_elimination.hpp>
Paul's avatar
Paul committed
26
27
28
29
#include <migraphx/gpu/fuse_ops.hpp>
#include <migraphx/matcher.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <migraphx/gpu/convolution.hpp>
30
#include <migraphx/gpu/device_name.hpp>
31
#include <migraphx/gpu/oper.hpp>
32
#include <migraphx/gpu/gemm.hpp>
Paul's avatar
Paul committed
33
#include <migraphx/instruction.hpp>
34
#include <migraphx/register_op.hpp>
Paul's avatar
Paul committed
35
#include <migraphx/array.hpp>
36
#include <migraphx/permutation.hpp>
37
#include <migraphx/make_op.hpp>
kahmed10's avatar
kahmed10 committed
38
#include <cmath>
39
#include <set>
Paul's avatar
Paul committed
40
41

namespace migraphx {
Paul's avatar
Paul committed
42
inline namespace MIGRAPHX_INLINE_NS {
Paul's avatar
Paul committed
43
44
namespace gpu {

45
46
MIGRAPHX_DECLARE_ENV_VAR(MIGRAPHX_DISABLE_MIOPEN_FUSION)

Paul's avatar
Paul committed
47
48
49
50
51
52
53
54
struct fusion
{
    using op_t = miopenFusionOpDescriptor_t;
    shared<fusion_plan_descriptor> fp;

    // Used as a temporary hack to keep descriptor references alive
    std::vector<std::shared_ptr<void>> storage;

Paul's avatar
Paul committed
55
    template <class T>
Paul's avatar
Paul committed
56
57
58
59
60
61
62
    auto keep_alive(T x)
    {
        auto result = share(std::move(x));
        storage.push_back(result);
        return result;
    }

63
64
    fusion() = default;

Paul's avatar
Paul committed
65
66
    fusion(const shape& input)
    {
67
        assert(input.standard());
Paul's avatar
Paul committed
68
        auto t = make_tensor(input);
Paul's avatar
Paul committed
69
        fp     = make_fusion_plan(t);
70
        assert(fp);
Paul's avatar
Paul committed
71
72
73
        keep_alive(std::move(t));
    }

74
75
    bool empty() const { return fp == nullptr; }

Paul's avatar
Paul committed
76
77
    op_t operator[](std::size_t i) const
    {
78
        assert(fp);
Paul's avatar
Paul committed
79
80
81
        op_t result;
        auto status = miopenFusionPlanGetOp(fp.get(), i, &result);
        if(status != miopenStatusSuccess)
Paul's avatar
Paul committed
82
            MIGRAPHX_THROW("Failed retrieving operator at " + std::to_string(i));
Paul's avatar
Paul committed
83
84
85
        return result;
    }

86
87
88
89
90
    auto get() const
    {
        assert(fp);
        return fp.get();
    }
Paul's avatar
Paul committed
91
92
93

    op_t create_bias(const shape& bias)
    {
94
        assert(fp);
Paul's avatar
Paul committed
95
        op_t result;
Paul's avatar
Paul committed
96
97
        auto b      = shape{bias.type(), {1, bias.lens().at(1), 1, 1}};
        auto t      = keep_alive(make_tensor(b));
Paul's avatar
Paul committed
98
99
        auto status = miopenCreateOpBiasForward(fp.get(), &result, t.get());
        if(status != miopenStatusSuccess)
Paul's avatar
Paul committed
100
            MIGRAPHX_THROW("Creating operator failed");
Paul's avatar
Paul committed
101
102
103
104
105
        return result;
    }

    op_t create_relu()
    {
106
        assert(fp);
Paul's avatar
Paul committed
107
108
109
        op_t result;
        auto status = miopenCreateOpActivationForward(fp.get(), &result, miopenActivationRELU);
        if(status != miopenStatusSuccess)
Paul's avatar
Paul committed
110
            MIGRAPHX_THROW("Creating operator failed");
Paul's avatar
Paul committed
111
112
113
114
115
        return result;
    }

    op_t create_conv(const op::convolution& op, const shape& weights)
    {
116
        assert(fp);
Paul's avatar
Paul committed
117
        op_t result;
Paul's avatar
Paul committed
118
119
        auto cd     = keep_alive(make_conv(op));
        auto t      = keep_alive(make_tensor(weights));
Paul's avatar
Paul committed
120
121
        auto status = miopenCreateOpConvForward(fp.get(), &result, cd.get(), t.get());
        if(status != miopenStatusSuccess)
Paul's avatar
Paul committed
122
            MIGRAPHX_THROW("Creating operator failed");
Paul's avatar
Paul committed
123
124
        return result;
    }
Paul's avatar
Paul committed
125
126
127

    shape get_workspace(context&)
    {
128
        // assert(fp);
Paul's avatar
Paul committed
129
130
131
132
133
        // TODO: Use zero workspace for now
        std::size_t ws_size = 0;
        // int algo_count = 1;
        // miopenConvFwdAlgorithm_t algo;
        // miopenFusionPlanConvolutionGetAlgo(fp.get(), 1, &algo_count, &algo);
Paul's avatar
Paul committed
134
135
        // miopenFusionPlanGetWorkSpaceSize(ctx.get_stream().get_miopen(), fp.get(), &ws_size,
        // algo);
Paul's avatar
Paul committed
136
137
138
        return shape{shape::int8_type, {ws_size}};
    }

139
    bool compile(context& ctx)
Paul's avatar
Paul committed
140
    {
141
        assert(fp);
142
143
        return miopenCompileFusionPlan(ctx.get_stream().get_miopen(), fp.get()) ==
               miopenStatusSuccess;
Paul's avatar
Paul committed
144
145
    }

Paul's avatar
Paul committed
146
147
148
149
    argument execute(context& ctx,
                     const fused_operator_args& fargs,
                     const argument& x,
                     const argument& y) const
Paul's avatar
Paul committed
150
    {
151
        assert(fp);
Paul's avatar
Paul committed
152
153
        auto x_td   = make_tensor(x.get_shape());
        auto y_td   = make_tensor(y.get_shape());
Paul's avatar
Paul committed
154
        auto status = miopenExecuteFusionPlan(ctx.get_stream().get_miopen(),
Paul's avatar
Paul committed
155
156
157
158
159
160
                                              fp.get(),
                                              x_td.get(),
                                              x.implicit(),
                                              y_td.get(),
                                              y.implicit(),
                                              fargs.get());
Paul's avatar
Paul committed
161
        if(status != miopenStatusSuccess)
Paul's avatar
Paul committed
162
            MIGRAPHX_THROW("Failed to execute fusion plan");
Paul's avatar
Paul committed
163
164
        return y;
    }
Paul's avatar
Paul committed
165
166
};

167
168
169
170
171
172
const std::unordered_set<std::string>& get_supported_archs()
{
    static std::unordered_set<std::string> supported_archs{"gfx900", "gfx906", "gfx908", "gfx1030"};
    return supported_archs;
}

Paul's avatar
Paul committed
173
MIGRAPHX_PRED_MATCHER(bias_shape, instruction_ref ins)
Paul's avatar
Paul committed
174
175
{
    auto&& s = ins->get_shape();
Paul's avatar
Paul committed
176
177
    return s.broadcasted() and s.strides().size() == 4 and s.strides()[0] == 0 and
           s.strides()[1] != 0 and s.strides()[2] == 0 and s.strides()[3] == 0;
Paul's avatar
Paul committed
178
179
}

Paul's avatar
Paul committed
180
MIGRAPHX_PRED_MATCHER(fusable_conv, instruction_ref ins)
Paul's avatar
Paul committed
181
{
182
    const auto device_name = trim(split_string(get_device_name(), ':').front());
183
184
    if(not contains(get_supported_archs(), device_name))
        return false;
185
186
    if(enabled(MIGRAPHX_DISABLE_MIOPEN_FUSION{}))
        return false;
Paul's avatar
Paul committed
187
188
    if(ins->name() != "gpu::convolution")
        return false;
Paul's avatar
Paul committed
189
190
    if(ins->get_shape().type() != shape::float_type)
        return false;
Paul's avatar
Paul committed
191
192
193
    auto wei = ins->inputs().at(1)->get_shape();
    assert(wei.lens().size() == 4);
    auto conv = any_cast<miopen_convolution>(ins->get_operator());
Khalique's avatar
Khalique committed
194
    if(conv.op.group > 1)
Khalique's avatar
Khalique committed
195
        return false;
Paul's avatar
Paul committed
196
    if(wei.lens()[1] > 512 and conv.algo != miopenConvolutionFwdAlgoWinograd)
Paul's avatar
Paul committed
197
        return false;
198
199
200
201
202
203

    // Do not fuse non-symmetric input
    auto input_lens = ins->inputs().at(0)->get_shape().lens();
    if(input_lens[2] != input_lens[3] or wei.lens()[2] != wei.lens()[3])
        return false;

Paul's avatar
Paul committed
204
    auto op = conv.op;
205
206
    // Dont fuse winograd for non-3x3s since there is no fused windograd for those configs
    if(conv.algo == miopenConvolutionFwdAlgoWinograd and wei.lens()[2] != 3 and
207
       wei.lens()[3] != 3 and contains({{1, 1}}, op.stride))
208
        return false;
kahmed10's avatar
kahmed10 committed
209
    return contains({{0, 0, 0, 0}, {1, 1, 1, 1}, {2, 2, 2, 2}}, op.padding) and
210
           contains({{0, 0}, {1, 1}}, op.stride) and contains({{1, 1}}, op.dilation);
Paul's avatar
Paul committed
211
212
}

Paul's avatar
Paul committed
213
214
215
void move_broadcasted_back(std::vector<instruction_ref>& args)
{
    // Ensure the last arguments is the broadcasted one
Paul's avatar
Paul committed
216
    auto last = std::prev(args.end());
Paul's avatar
Paul committed
217
218
    auto it =
        std::find_if(args.begin(), last, [](auto arg) { return arg->get_shape().broadcasted(); });
Paul's avatar
Paul committed
219
220
    if(it != last)
        std::swap(*it, *std::prev(last));
Paul's avatar
Paul committed
221
222
223
224
225
}

void move_standard_front(std::vector<instruction_ref>& args)
{
    // Ensure the first arguments is the standard one
Paul's avatar
Paul committed
226
    auto last = std::prev(args.end());
Paul's avatar
Paul committed
227
228
    auto it =
        std::find_if(args.begin(), last, [](auto arg) { return arg->get_shape().standard(); });
Paul's avatar
Paul committed
229
    if(it != last)
Paul's avatar
Paul committed
230
231
232
        std::swap(*it, args.front());
}

233
234
auto gpu_name(const std::string& s) { return match::name("gpu::" + s); }

Paul Fultz II's avatar
Paul Fultz II committed
235
namespace {
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
struct miopen_fusion
{
    struct fuse_op_data
    {
        operation op;
        float alpha = 1;
        float beta  = 0;
    };
    struct fuse_op : fuse_op_data, reflect_equality<fuse_op>, reflect_stream<fuse_op>
    {
        template <class Self, class F>
        static auto reflect(Self& self, F f)
        {
            return pack(f(self.op, "op"), f(self.alpha, "alpha"), f(self.beta, "beta"));
        }
    };
    std::vector<fuse_op> ops = {};
    fusion f                 = {};
    std::function<void(context&, const fusion&, const std::vector<argument>&)> execute;
    template <class Self, class F>
    static auto reflect(Self& self, F f)
    {
        return pack(f(self.ops, "ops"));
    }

261
262
263
264
265
    std::ptrdiff_t output_alias(const std::vector<shape>& shapes) const
    {
        return shapes.size() - 1;
    }

266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
    value compile(context& ctx, const shape&, std::vector<shape> inputs)
    {
        // Compensate for allocation
        inputs.pop_back();
        std::size_t i = 0;
        f             = fusion(inputs[i]);
        i++;
        std::vector<std::function<void(const fused_operator_args&, const std::vector<argument>&)>>
            invokers;
        for(auto&& fop : ops)
        {
            if(i > inputs.size())
            {
                f = {};
                return {};
            }
            if(fop.op.name() == "convolution")
            {
                auto* mop = f.create_conv(any_cast<op::convolution>(fop.op), inputs[i]);
                invokers.push_back(
                    [=](const fused_operator_args& fargs, const std::vector<argument>& args) {
                        miopenSetOpArgsConvForward(
                            fargs.get(), mop, &fop.alpha, &fop.beta, args[i].implicit());
                    });
                i++;
            }
            else if(fop.op.name() == "add")
            {
                auto* mop = f.create_bias(inputs[i]);
                invokers.push_back(
                    [=](const fused_operator_args& fargs, const std::vector<argument>& args) {
                        miopenSetOpArgsBiasForward(
                            fargs.get(), mop, &fop.alpha, &fop.beta, args[i].implicit());
                    });
                i++;
            }
            else if(fop.op.name() == "relu")
            {
                auto* mop = f.create_relu();
                invokers.push_back([=](const fused_operator_args& fargs,
                                       const std::vector<argument>&) {
                    miopenSetOpArgsActivForward(fargs.get(), mop, &fop.alpha, &fop.beta, 0, 0, 0);
                });
            }
            else
            {
                f = {};
                return {};
            }
        }
        if(not f.compile(ctx))
        {
            f = {};
            return {};
        }
        execute = [invokers](context& c, const fusion& ff, const std::vector<argument>& args) {
            auto fargs = make_fused_args();
            for(auto&& invoker : invokers)
                invoker(fargs, args);
            ff.execute(c, fargs, args.front(), args.back());
        };
        return {{"workspace", f.get_workspace(ctx).bytes()}};
    }
    void finalize(context& ctx, const shape& output_shape, const std::vector<shape>& inputs)
    {
        if(not f.empty())
            return;
        auto v = compile(ctx, output_shape, inputs);
        if(not v.is_object())
            MIGRAPHX_THROW("Failed to compile fusion plan");
    }
    std::string name() const { return "gpu::miopen_fusion"; }
    shape compute_shape(const std::vector<shape>& inputs) const
    {
        if(ops.empty())
            return {};
        // TODO: Check number of arguments
        return ops.front().op.compute_shape({inputs[0], inputs[1]});
    }
    argument compute(context& ctx, const shape&, const std::vector<argument>& args) const
    {
        execute(ctx, f, args);
        return args.back();
    }
};
351
MIGRAPHX_REGISTER_OP(miopen_fusion)
352

Paul's avatar
Paul committed
353
354
355
struct miopen_conv_bias
{
    op::convolution op;
356
    fusion fp         = {};
357
358
    fusion::op_t conv = {};
    fusion::op_t bias = {};
Paul's avatar
Paul committed
359

Paul's avatar
Paul committed
360
361
362
363
364
365
    template <class Self, class F>
    static auto reflect(Self& self, F f)
    {
        return op::convolution::reflect(self.op, f);
    }

Paul's avatar
Paul committed
366
367
368
369
370
    std::string name() const { return "gpu::conv_bias"; }
    shape compute_shape(const std::vector<shape>& inputs) const
    {
        check_shapes{inputs, *this}.has(5);
        // TODO: Check slices
kahmed10's avatar
kahmed10 committed
371
        return op.normalize_compute_shape({inputs.at(0), inputs.at(1)});
Paul's avatar
Paul committed
372
    }
Paul's avatar
Paul committed
373
    argument compute(context& ctx, const shape&, const std::vector<argument>& args) const
Paul's avatar
Paul committed
374
    {
Paul's avatar
Paul committed
375
        auto fargs  = make_fused_args();
Paul's avatar
Paul committed
376
        float alpha = 1;
Paul's avatar
Paul committed
377
        float beta  = 0;
Paul's avatar
Paul committed
378
379
        miopenSetOpArgsConvForward(fargs.get(), conv, &alpha, &beta, args[1].implicit());
        miopenSetOpArgsBiasForward(fargs.get(), bias, &alpha, &beta, args[3].implicit());
380
        return fp.execute(ctx, fargs, args[0], args[4]);
Paul's avatar
Paul committed
381
382
    }

383
384
    void finalize(context& ctx, const shape&, const std::vector<shape>& inputs)
    {
385
386
387
388
        fp   = fusion(inputs[0]);
        conv = fp.create_conv(op, inputs[1]);
        bias = fp.create_bias(inputs[3]);
        if(not fp.compile(ctx))
389
            MIGRAPHX_THROW("Failed to compile fusion plan");
390
391
    }

392
    shape get_workspace(context& ctx) { return fp.get_workspace(ctx); }
Paul's avatar
Paul committed
393
394
395
396
    std::ptrdiff_t output_alias(const std::vector<shape>& shapes) const
    {
        return shapes.size() - 1;
    }
Paul's avatar
Paul committed
397
};
398
MIGRAPHX_REGISTER_OP(miopen_conv_bias)
Paul's avatar
Paul committed
399

Paul's avatar
Add cbr  
Paul committed
400
401
402
struct miopen_conv_bias_relu
{
    op::convolution op;
403
    fusion fp         = {};
404
405
406
    fusion::op_t conv = {};
    fusion::op_t bias = {};
    fusion::op_t relu = {};
Paul's avatar
Add cbr  
Paul committed
407

Paul's avatar
Paul committed
408
409
410
411
412
413
    template <class Self, class F>
    static auto reflect(Self& self, F f)
    {
        return op::convolution::reflect(self.op, f);
    }

Paul's avatar
Add cbr  
Paul committed
414
415
416
417
418
    std::string name() const { return "gpu::conv_bias_relu"; }
    shape compute_shape(const std::vector<shape>& inputs) const
    {
        check_shapes{inputs, *this}.has(5);
        // TODO: Check slices
kahmed10's avatar
kahmed10 committed
419
        return op.normalize_compute_shape({inputs.at(0), inputs.at(1)});
Paul's avatar
Add cbr  
Paul committed
420
    }
Paul's avatar
Paul committed
421
    argument compute(context& ctx, const shape&, const std::vector<argument>& args) const
Paul's avatar
Add cbr  
Paul committed
422
423
    {
        auto fargs  = make_fused_args();
Paul's avatar
Paul committed
424
        float alpha = 1;
Paul's avatar
Paul committed
425
        float beta  = 0;
Paul's avatar
Add cbr  
Paul committed
426
427
        miopenSetOpArgsConvForward(fargs.get(), conv, &alpha, &beta, args[1].implicit());
        miopenSetOpArgsBiasForward(fargs.get(), bias, &alpha, &beta, args[3].implicit());
Paul's avatar
Paul committed
428
        miopenSetOpArgsActivForward(fargs.get(), relu, &alpha, &beta, 0, 0, 0);
429
        return fp.execute(ctx, fargs, args[0], args[4]);
Paul's avatar
Add cbr  
Paul committed
430
    }
431
432
    void finalize(context& ctx, const shape&, const std::vector<shape>& inputs)
    {
433
434
435
436
437
        fp   = fusion(inputs[0]);
        conv = fp.create_conv(op, inputs[1]);
        bias = fp.create_bias(inputs[3]);
        relu = fp.create_relu();
        fp.compile(ctx);
438
439
    }

440
    shape get_workspace(context& ctx) { return fp.get_workspace(ctx); }
Paul's avatar
Paul committed
441
442
443
444
    std::ptrdiff_t output_alias(const std::vector<shape>& shapes) const
    {
        return shapes.size() - 1;
    }
Paul's avatar
Add cbr  
Paul committed
445
};
446
MIGRAPHX_REGISTER_OP(miopen_conv_bias_relu)
Paul's avatar
Add cbr  
Paul committed
447

Paul's avatar
Paul committed
448
template <class... Ms>
Paul's avatar
Add cbr  
Paul committed
449
450
auto conv_bias(Ms... ms)
{
Paul's avatar
Paul committed
451
    return match::name("gpu::add")(
Paul's avatar
Paul committed
452
453
        match::either_arg(0, 1)(bias_shape(match::used_once()).bind("bias"),
                                fusable_conv(match::used_once()).bind("conv")),
Paul's avatar
Paul committed
454
        ms...);
Paul's avatar
Paul committed
455
456
}

Paul's avatar
Paul committed
457
template <class Op>
458
void apply_conv_bias(context& ctx, module& m, const match::matcher_result& r)
Paul's avatar
Paul committed
459
460
461
462
463
464
465
466
467
468
{
    auto conv_ins    = r.instructions["conv"];
    auto bias_ins    = r.instructions["bias"];
    auto ins         = r.result;
    auto input_ins   = conv_ins->inputs().at(0);
    auto weights_ins = conv_ins->inputs().at(1);
    auto conv_op     = any_cast<miopen_convolution>(conv_ins->get_operator()).op;
    auto alloc_ins   = ins->inputs().back();
    auto old_ws_ins  = conv_ins->inputs().at(2);

469
    Op cb{conv_op};
Paul's avatar
Paul committed
470
    // TODO: Insert ws allocation
Paul's avatar
Paul committed
471
    auto ws = cb.get_workspace(ctx);
Paul's avatar
Paul committed
472
    (void)ws;
473
    m.replace_instruction(ins, cb, input_ins, weights_ins, old_ws_ins, bias_ins, alloc_ins);
Paul's avatar
Add cbr  
Paul committed
474
475
}

476
477
template <class... Strings>
inline auto precompile_name(Strings... names) // NOLINT
478
479
480
481
482
{
    return match::make_basic_pred_matcher([=](instruction_ref ins) {
        if(ins->name() != "gpu::precompile_op")
            return false;
        auto op = from_value<operation>(ins->get_operator().to_value().at("op"));
483
        return (contains({names...}, op.name()));
484
485
486
    });
}

Paul's avatar
Paul committed
487
struct find_conv_bias
Paul's avatar
Paul committed
488
{
Paul's avatar
Paul committed
489
    context* ctx = nullptr;
Paul's avatar
Paul committed
490
491
    auto matcher() const
    {
kahmed10's avatar
kahmed10 committed
492
493
        return conv_bias(match::none_of(
            match::output(match::name(std::unordered_set<std::string>{"gpu::relu"}))));
Paul's avatar
Paul committed
494
495
    }

496
    void apply(module& m, const match::matcher_result& r) const
Paul's avatar
Paul committed
497
    {
498
        apply_conv_bias<miopen_conv_bias>(*ctx, m, r);
Paul's avatar
Paul committed
499
500
501
    }
};

Paul's avatar
Paul committed
502
struct find_conv_bias_relu
Paul's avatar
Add cbr  
Paul committed
503
504
{
    context* ctx = nullptr;
Paul's avatar
Paul committed
505
    auto matcher() const { return match::name("gpu::relu")(match::arg(0)(conv_bias())); }
Paul's avatar
Add cbr  
Paul committed
506

507
    void apply(module& m, const match::matcher_result& r) const
Paul's avatar
Add cbr  
Paul committed
508
    {
509
        apply_conv_bias<miopen_conv_bias_relu>(*ctx, m, r);
Paul's avatar
Add cbr  
Paul committed
510
511
    }
};
512

513
514
515
516
517
518
519
520
521
522
523
struct find_conv_pointwise
{
    context* ctx = nullptr;
    auto matcher() const
    {
        return precompile_name("pointwise")(
            match::nargs(3),
            match::either_arg(0, 1)(bias_shape(match::used_once()).bind("bias"),
                                    fusable_conv(match::used_once()).bind("conv")));
    }

524
    void apply(module& m, const match::matcher_result& r) const
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
    {
        auto conv_ins    = r.instructions["conv"];
        auto bias_ins    = r.instructions["bias"];
        auto ins         = r.result;
        auto input_ins   = conv_ins->inputs().at(0);
        auto weights_ins = conv_ins->inputs().at(1);
        auto conv_op     = any_cast<miopen_convolution>(conv_ins->get_operator()).op;
        auto alloc_ins   = ins->inputs().back();

        module_ref pm = ins->module_inputs().front();

        miopen_fusion op{};
        op.ops.push_back({{conv_op}});
        for(auto&& i : *pm)
        {
            if(i.name()[0] == '@')
                continue;
            op.ops.push_back({{i.get_operator()}});
        }
        std::vector<instruction_ref> inputs = {input_ins, weights_ins, bias_ins, alloc_ins};
        auto v                              = op.compile(*ctx, ins->get_shape(), to_shapes(inputs));
        if(not v.is_object())
            return;
        m.replace_instruction(ins, op, inputs);
    }
};

552
553
554
555
struct find_gemm_pointwise
{
    auto matcher() const
    {
556
        return precompile_name("pointwise")(
557
            match::nargs(3),
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
            match::either_arg(0, 1)(
                match::any_of(match::standard_shape(), match::is_constant()).bind("c"),
                match::name("gpu::gemm")(match::nargs(3), match::used_once()).bind("gemm")));
    }

    // TODO: Move to matcher.hpp
    static auto match_param(const std::string& name)
    {
        return match::make_basic_pred_matcher([=](auto ins) {
            if(ins->name() != "@param")
                return false;
            auto p = any_cast<builtin::param>(ins->get_operator());
            return p.parameter == name;
        });
    }

    template <class M>
    static auto match_mul_const(M m, const std::string& var)
    {
        return match::name("mul")(match::either_arg(0, 1)(match::name("@literal").bind(var), m))
            .bind(var + "_mul");
    }

    static auto match_add(const std::string& input, const std::string& output)
    {
        auto param     = match::name("@param");
        auto add       = match::name("add")(match::args(param, param));
        auto inner_mul = match::any_of(match_mul_const(match_param(input), "alpha"),
                                       match_mul_const(match_param(output), "beta"));
        auto mul_add   = match::name("add")(match::either_arg(0, 1)(inner_mul, param));
        auto add_mul   = match_mul_const(add, "gamma");
        return match::name("@return")(match::args(match::any_of(add, mul_add, add_mul)));
    }

    static float get_float(instruction_ref ins) { return ins->get_literal().at<float>(); }

    template <class Gemm>
    static bool update_gemm(Gemm& gemm, module_ref pm, unsigned input)
    {
        auto names = pm->get_parameter_names();
        if(names.size() != 2)
            return false;
        std::sort(names.begin(), names.end());
        unsigned output = input == 0 ? 1 : 0;
        auto mr         = match::match_instruction(
            *pm, std::prev(pm->end()), match_add(names[input], names[output]));
        if(mr.result == pm->end())
            return false;
        if(contains(mr.instructions, "alpha_mul"))
            gemm.alpha *= get_float(mr.instructions["alpha"]);
        else if(contains(mr.instructions, "beta_mul"))
            gemm.beta *= get_float(mr.instructions["beta"]);
        else if(contains(mr.instructions, "gamma_mul"))
        {
            gemm.alpha *= get_float(mr.instructions["gamma"]);
            gemm.beta *= get_float(mr.instructions["gamma"]);
        }
        return true;
616
617
618
619
620
621
622
623
624
625
626
627
628
    }

    void apply(module& m, const match::matcher_result& r) const
    {
        auto ins      = r.result;
        auto gemm_ins = r.instructions["gemm"];
        auto c_ins    = r.instructions["c"];

        auto gemm = any_cast<rocblas_gemm<op::dot>>(gemm_ins->get_operator());

        // Already fused gemm
        if(not float_equal(gemm.beta, 0))
            return;
629
630
631
632
633
634
635
636
637
        gemm.beta = 1;

        if(not update_gemm(
               gemm, ins->module_inputs().front(), ins->inputs().front() == gemm_ins ? 0 : 1))
            return;

        // const-fold input if not standard shape since rocblas can't handle it
        if(not c_ins->get_shape().standard())
        {
Paul Fultz II's avatar
Paul Fultz II committed
638
            auto c = make_op("contiguous");
639
640
641
            auto l = c.compute(c.compute_shape({c_ins->get_shape()}), {c_ins->eval()});
            c_ins  = m.add_literal(l.get_shape(), l.data());
        }
642
643
644
645
646

        auto inputs = gemm_ins->inputs();
        inputs.pop_back();

        inputs.push_back(c_ins);
647
        inputs.push_back(ins->inputs().back());
648
649
650
651
652

        m.replace_instruction(ins, gemm, inputs);
    }
};

653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
struct find_contiguous_tranpose_gemm
{
    auto matcher() const
    {
        return match::name("gpu::contiguous")(match::arg(0)(
            match::name("transpose")(
                match::arg(0)(match::name("gpu::gemm")(match::used_once()).bind("gemm")))
                .bind("transpose")));
    }

    template <class Vector>
    static bool is_swapped(const Vector& perm, std::size_t i, std::size_t j)
    {
        if(i >= perm.size() or j >= perm.size())
            return false;
        auto perm2 = perm;
        std::iota(perm2.begin(), perm2.end(), 0);
        std::swap(perm2[i], perm2[j]);
        return perm2 == perm;
    }

    void apply(module& m, const match::matcher_result& r) const
    {
        auto ins       = r.result;
        auto gemm      = r.instructions["gemm"];
        auto alloc     = gemm->inputs().back();
        auto transpose = r.instructions["transpose"];
        auto perm      = transpose->get_operator().to_value()["permutation"].to_vector<int64_t>();
        auto iperm     = invert_permutation(perm);

        if(perm.size() < 3)
            return;

        if(not is_swapped(perm, perm.size() - 3, perm.size() - 2))
            return;

        auto lens = gemm->get_shape().lens();
        if(lens.size() > 3 and
           not std::all_of(lens.begin(), lens.end() - 3, [](auto i) { return i == 1; }))
            return;

        auto gemmv           = gemm->get_operator().to_value();
        gemmv["trans_batch"] = 1;

        auto s = shape{alloc->get_shape().type(), reorder_dims(alloc->get_shape().lens(), iperm)};
        auto new_alloc = m.insert_instruction(gemm, make_op("allocate", {{"shape", to_value(s)}}));
        auto alloc_transpose =
            m.insert_instruction(gemm, make_op("transpose", {{"permutation", perm}}), new_alloc);

        auto inputs        = gemm->inputs();
        inputs.back()      = alloc_transpose;
        auto new_gemm      = m.insert_instruction(gemm, make_op("gpu::gemm", gemmv), inputs);
        auto gemm_transpoe = m.insert_instruction(gemm, transpose->get_operator(), new_gemm);

        m.replace_instruction(ins, gemm_transpoe);
    }
};

711
712
713
714
715
716
717
struct find_commutative_broadcast
{
    auto matcher() const
    {
        return match::name("gpu::add", "gpu::mul")(match::arg(1)(match::broadcast_shape()));
    }

718
    void apply(module& m, const match::matcher_result& r) const
719
720
721
722
723
    {
        auto ins  = r.result;
        auto args = ins->inputs();
        move_broadcasted_back(args);

724
        m.replace_instruction(ins, ins->get_operator(), args);
725
726
    }
};
Paul Fultz II's avatar
Paul Fultz II committed
727
} // namespace
728

729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
struct find_contiguous
{
    auto matcher() const { return match::name("gpu::contiguous"); }

    void apply(module& m, const match::matcher_result& r) const
    {
        auto ins = r.result;

        m.replace_instruction(
            ins,
            make_op("gpu::precompile_op", {{"op", to_value(make_op("contiguous"))}}),
            ins->inputs());
    }
};

struct find_contiguous_pointwise
{
    auto matcher() const
    {
        return match::name("gpu::contiguous")(match::arg(0)(precompile_name("pointwise")));
    }

    void apply(module& m, const match::matcher_result& r) const
    {
        auto ins    = r.result;
        auto pw     = ins->inputs().front();
        auto alloc  = ins->inputs().back();
        auto args   = pw->inputs();
        args.back() = alloc;

        m.replace_instruction(ins, pw->get_operator(), args, pw->module_inputs());
    }
};

763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
struct find_layernorm_pointwise
{
    auto matcher() const
    {
        return precompile_name("pointwise")(match::arg(0)(
            precompile_name("gpu::prelayernorm", "gpu::preadd_layernorm").bind("layernorm")));
    }

    void apply(module& m, const match::matcher_result& r) const
    {
        auto ins       = r.result;
        auto layernorm = r.instructions["layernorm"];
        auto* pm       = ins->module_inputs().front();

        if(not layernorm->module_inputs().empty())
            return;

        auto inputs = layernorm->inputs();
        inputs.pop_back();
        inputs.insert(inputs.end(), ins->inputs().begin() + 1, ins->inputs().end());

        m.replace_instruction(ins, layernorm->get_operator(), inputs, {pm});
    }
};

788
void fuse_ops::apply(module& m) const
Paul's avatar
Paul committed
789
{
790
    match::find_matches(m, find_contiguous_pointwise{});
791
    run_passes(m, {dead_code_elimination{}});
792
    match::find_matches(m, find_conv_pointwise{ctx}, find_conv_bias_relu{ctx}, find_conv_bias{ctx});
793
    run_passes(m, {dead_code_elimination{}});
794
    match::find_matches(m,
795
                        find_layernorm_pointwise{},
796
                        find_gemm_pointwise{},
797
                        find_contiguous_tranpose_gemm{},
798
                        find_commutative_broadcast{});
799
    match::find_matches(m, find_contiguous{});
Paul's avatar
Paul committed
800
}
Paul's avatar
Paul committed
801
802

} // namespace gpu
Paul's avatar
Paul committed
803
} // namespace MIGRAPHX_INLINE_NS
Paul's avatar
Paul committed
804
} // namespace migraphx